Commit 3f1e8493 authored by ZeinabRm13's avatar ZeinabRm13

Run

parent db5bb48d
# application/ports/analysis_service_port.py
from abc import ABC, abstractmethod
from application.dtos.analysis import AnalysisRequestDTO, AnalysisResponseDTO
from src.application.dtos.analysis import AnalysisRequestDTO, AnalysisResponseDTO
class AnalysisServicePort(ABC):
"""Port/Interface for the analysis service"""
......
# domain/ports/llm_service_port.py
from abc import ABC, abstractmethod
from dataclasses import dataclass
from dtos.LLM import LLMResponseDTO, LLMRequestDTO
from src.application.dtos.LLM import LLMResponseDTO, LLMRequestDTO
class LLMServicePort(ABC):
"""Port/Interface for LLM services"""
......
# application/services/analysis_service.py
from application.ports.analyze_service import AnalysisServicePort
from application.dtos.analysis import AnalysisRequestDTO, AnalysisResponseDTO
from src.application.ports.analyze_service import AnalysisServicePort
from src.application.dtos.analysis import AnalysisRequestDTO, AnalysisResponseDTO
# from domain.ports.image_processor import IImageProcessor # Domain interface
from application.ports.llm_service_port import LLMServicePort # Domain interface
from src.application.ports.llm_service_port import LLMServicePort # Domain interface
import uuid
class AnalyzeService(AnalysisServicePort):
......
# application/services/llm_service.py
from application.dtos.LLM import LLMRequestDTO, LLMResponseDTO
from application.dtos.error import LLMErrorDTO
from src.application.dtos.LLM import LLMRequestDTO, LLMResponseDTO
from src.application.dtos.error import LLMErrorDTO
class LLMService:
async def analyze(self, request: LLMRequestDTO) -> LLMResponseDTO:
......
......@@ -8,7 +8,9 @@ from src.domain.ports.repositories.user_repository import UserRepositoryPort
from src.domain.ports.repositories.charts_repository import ChartsRepositoryPort
from src.infrastructure.adapters.sqlserver.sql_user_repository import SqlUserRepository
from src.infrastructure.adapters.sqlserver.sql_charts_repository import SqlChartsRepository
from application.services.analyze_service import AnalyzeService
from src.application.services.analyze_service import AnalyzeService
from src.application.services.llm_service import LLMService
from src.application.ports.llm_service_port import LLMServicePort
# from infrastructure.services.llm.openai_service import OpenAIService # Concrete LLM impl
# from infrastructure.services.image.pillow_service import PillowImageService # Concrete impl
from src.config import settings
......@@ -44,6 +46,18 @@ def get_upload_use_case(charts_repo: ChartsRepositoryPort = Depends(get_charts_r
def get_analysis_service() -> AnalyzeService:
"""Factory for the analysis service"""
llm_service = OpenAIService(api_key="your-api-key")
image_service = PillowImageService()
return AnalyzeService(llm_service, image_service)
\ No newline at end of file
llm_service = LLMService()
# image_service = PillowImageService()
return AnalyzeService(llm_service)
def get_llm_service() -> LLMServicePort:
"""Dependency injection factory for LLMService"""
# Choose adapter based on config
# adapter = HuggingFaceAdapter(
# api_key=os.getenv("HF_API_KEY"),
# model_id="ahmed-masry/chartgemma"
# )
return LLMService()
\ No newline at end of file
from fastapi import APIRouter, UploadFile, File, Depends
from fastapi import APIRouter, Form, UploadFile, File, Depends
from src.application.services.analyze_service import AnalyzeService
from src.dependencies import get_upload_use_case
from src.application.dtos.LLM import LLMResponseDTO, LLMRequestDTO
from src.application.ports.llm_service_port import LLMServicePort
from src.dependencies import get_llm_service
router = APIRouter()
@router.post("/analyze")
router = APIRouter(tags=["charts"])
@router.post("/ask", response_model=LLMResponseDTO)
async def ask_about_chart(
image: UploadFile = File(...),
question: str = Form(...),
llm_service: LLMServicePort = Depends(get_llm_service)
):
request = LLMRequestDTO(
image_bytes=await image.read(),
question=question
)
return await llm_service.analyze(request)
@router.post("/analyze", response_model=LLMResponseDTO)
async def analyze_chart(
file: UploadFile = File(...),
service: AnalyzeService = Depends(get_upload_use_case)
) -> dict:
image_bytes = await file.read()
image_id = service.execute(image_bytes, "user123")
return {"image_id": image_id}
\ No newline at end of file
image: UploadFile = File(...),
# question: str = Form(...),
llm_service: LLMServicePort = Depends(get_llm_service)
):
request = LLMRequestDTO(
image_bytes=await image.read(),
question="analyze the trends showing in this chart"
)
return await llm_service.analyze(request)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment