Commit da949056 authored by ZeinabRm13's avatar ZeinabRm13

Add cahrtgemma srvice

parent 80763c81
# ChartAnalyzer System - UML Diagrams
## 1. Use Case Diagram
```mermaid
graph TB
subgraph "ChartAnalyzer System"
subgraph "Actors"
User[👤 User]
Admin[👨‍💼 Admin]
LLMService[🤖 LLM Service]
end
subgraph "Authentication Use Cases"
UC1[Register Account]
UC2[Login]
UC3[Logout]
UC4[Validate Token]
end
subgraph "Chart Analysis Use Cases"
UC5[Upload Chart Image]
UC6[Analyze Chart General]
UC7[Ask Specific Question]
UC8[Get Advanced Analysis]
UC9[View Analysis History]
end
subgraph "System Management Use Cases"
UC10[Check Server Status]
UC11[List Available Models]
UC12[Test Model Connection]
end
end
%% Actor relationships
User --> UC1
User --> UC2
User --> UC3
User --> UC5
User --> UC6
User --> UC7
User --> UC8
User --> UC9
User --> UC10
User --> UC11
User --> UC12
Admin --> UC10
Admin --> UC11
Admin --> UC12
%% Include relationships
UC6 -.->|includes| UC5
UC7 -.->|includes| UC5
UC8 -.->|includes| UC5
%% Extend relationships
UC8 -.->|extends| UC7
%% System dependencies
UC6 -.->|uses| LLMService
UC7 -.->|uses| LLMService
UC8 -.->|uses| LLMService
```
## 2. System Sequence Diagrams
### 2.1 User Registration Flow
```mermaid
sequenceDiagram
participant U as User
participant F as Frontend
participant API as FastAPI
participant AS as AuthService
participant UR as UserRepository
participant DB as Database
U->>F: Fill registration form
F->>API: POST /auth/register
API->>AS: register(email, password)
AS->>AS: hash_password(password)
AS->>UR: create_user(user)
UR->>DB: INSERT user
DB-->>UR: user_id
UR-->>AS: User object
AS-->>API: User object
API-->>F: 201 Created
F-->>U: Registration success
```
### 2.2 Chart Analysis Flow
```mermaid
sequenceDiagram
participant U as User
participant F as Frontend
participant API as FastAPI
participant CS as ChartService
participant LS as LLMService
participant CG as ChartGemma
participant O as Ollama
U->>F: Upload image + question
F->>API: POST /charts/analyze
API->>CS: analyze_chart(image, question)
CS->>LS: analyze(request)
alt Model Selection
LS->>CG: analyze(image, question)
CG-->>LS: analysis_result
else Default Model
LS->>O: analyze(image, question)
O-->>LS: analysis_result
end
LS-->>CS: LLMResponse
CS-->>API: Analysis result
API-->>F: 200 OK
F-->>U: Display analysis
```
### 2.3 Advanced Chart Analysis Flow
```mermaid
sequenceDiagram
participant U as User
participant F as Frontend
participant API as FastAPI
participant CS as ChartService
participant CG as ChartGemma
U->>F: Upload image + question + advanced=true
F->>API: POST /charts/ask
API->>CS: analyze_specialized(image, question, advanced)
CS->>CS: Add "PROGRAM OF THOUGHT" prefix
CS->>CG: analyze(enhanced_question)
CG-->>CS: Advanced analysis result
CS-->>API: Analysis result
API-->>F: 200 OK
F-->>U: Display advanced analysis
```
## 3. Class Diagram
```mermaid
classDiagram
%% Domain Entities
class User {
+String id
+EmailStr email
+String password_hash
+Boolean is_active
+DateTime created_at
+DateTime last_login
}
class ChartImage {
+String id
+String user_id
+Bytes image_data
+DateTime uploaded_at
}
class ChartAnalysis {
+String id
+String chart_image_id
+String question
+String answer
+DateTime created_at
}
%% Application DTOs
class RegisterRequestDTO {
+String email
+String password
}
class LoginRequestDTO {
+String email
+String password
}
class LLMRequestDTO {
+Bytes image_bytes
+String question
}
class LLMResponseDTO {
+String answer
+String model_used
+Float processing_time
}
%% Application Services
class AuthService {
-UserRepositoryPort user_repo
-TokenRepositoryPort token_repo
-String secret_key
+register(email, password) User
+login(email, password) String
+logout(token) void
+validate_token(token) Boolean
}
class ChartGemmaService {
-String gradio_url
+analyze(request) LLMResponseDTO
-_load_image(image_data) Image
-_call_gradio_api(image, question) String
}
class OllamaService {
-String host
-String model
+analyze(request) LLMResponseDTO
}
%% Domain Ports (Interfaces)
class UserRepositoryPort {
<<interface>>
+create_user(user) void
+get_by_email(email) User
+get_by_id(id) User
}
class ChartsRepositoryPort {
<<interface>>
+save(chart_image) void
+get_by_id(id) ChartImage
+get_by_user_id(user_id) List~ChartImage~
}
class LLMServicePort {
<<interface>>
+analyze(request) LLMResponseDTO
}
%% Use Cases
class UploadChartUseCase {
-ChartsRepositoryPort image_repo
+execute(image_bytes, user_id) String
}
class AnalyzeChartUseCase {
-ChartsRepositoryPort charts_repo
-AnalysisRepositoryPort analysis_repo
+execute(image_bytes, question) ChartAnalysis
}
%% Relationships
User ||--o{ ChartImage : owns
ChartImage ||--o{ ChartAnalysis : generates
AuthService ..|> UserRepositoryPort : uses
ChartGemmaService ..|> LLMServicePort : implements
OllamaService ..|> LLMServicePort : implements
UploadChartUseCase ..|> ChartsRepositoryPort : uses
AnalyzeChartUseCase ..|> ChartsRepositoryPort : uses
AnalyzeChartUseCase ..|> AnalysisRepositoryPort : uses
```
## 4. Component Diagram
```mermaid
graph TB
subgraph "Frontend Layer (React + TypeScript)"
UI[User Interface]
AC[Auth Context]
CS[Chart Service]
AS[API Service]
end
subgraph "API Gateway Layer (FastAPI)"
Router[Router]
Middleware[CORS Middleware]
Auth[Authentication]
end
subgraph "Application Layer"
UC[Use Cases]
AServices[Application Services]
DTOs[Data Transfer Objects]
end
subgraph "Domain Layer"
Entities[Domain Entities]
Ports[Domain Ports]
Services[Domain Services]
end
subgraph "Infrastructure Layer"
Repos[Repositories]
External[External Services]
DB[(PostgreSQL)]
end
subgraph "External Services"
ChartGemma[ChartGemma Model]
Ollama[Ollama LLM]
Gradio[Gradio API]
end
%% Frontend connections
UI --> AC
UI --> CS
CS --> AS
%% API connections
AS --> Router
Router --> Middleware
Router --> Auth
%% Application connections
Router --> UC
UC --> AServices
AServices --> DTOs
%% Domain connections
AServices --> Entities
AServices --> Ports
Ports --> Services
%% Infrastructure connections
Ports --> Repos
Repos --> DB
Services --> External
%% External service connections
External --> ChartGemma
External --> Ollama
External --> Gradio
```
## 5. Activity Diagram - Chart Analysis Process
```mermaid
flowchart TD
Start([User starts analysis]) --> Upload{Upload chart image?}
Upload -->|Yes| Validate{Image valid?}
Validate -->|No| Error[Show error message]
Validate -->|Yes| Question{Enter question?}
Question -->|No| Auto[Use default analysis]
Question -->|Yes| Model{Select model?}
Auto --> Model
Model -->|ChartGemma| CG[Process with ChartGemma]
Model -->|Ollama| OL[Process with Ollama]
Model -->|Default| Default[Use default model]
CG --> Advanced{Advanced analysis?}
Advanced -->|Yes| Enhance[Add Program of Thought]
Advanced -->|No| Process[Process normally]
Enhance --> Process
OL --> Process
Default --> Process
Process --> Result[Generate analysis result]
Result --> Save[Save to database]
Save --> Display[Display result to user]
Display --> End([Analysis complete])
Error --> End
```
## 6. State Diagram - User Authentication
```mermaid
stateDiagram-v2
[*] --> Unauthenticated
Unauthenticated --> Registering : Register
Unauthenticated --> LoggingIn : Login
Registering --> Unauthenticated : Registration failed
Registering --> Authenticated : Registration success
LoggingIn --> Unauthenticated : Login failed
LoggingIn --> Authenticated : Login success
Authenticated --> Unauthenticated : Logout
Authenticated --> TokenExpired : Token expires
TokenExpired --> Unauthenticated : Auto logout
TokenExpired --> Authenticated : Refresh token
```
## 7. Deployment Diagram
```mermaid
graph TB
subgraph "Client Layer"
Browser[Web Browser]
Mobile[Mobile App]
end
subgraph "Frontend Server"
React[React App<br/>Vite Dev Server]
Static[Static Assets]
end
subgraph "Backend Server"
FastAPI[FastAPI Application]
Uvicorn[Uvicorn Server]
end
subgraph "Database Server"
PostgreSQL[(PostgreSQL Database)]
Alembic[Alembic Migrations]
end
subgraph "External Services"
ChartGemmaAPI[ChartGemma API<br/>Gradio]
OllamaAPI[Ollama API<br/>Local]
end
%% Connections
Browser --> React
Mobile --> React
React --> Static
React --> FastAPI
FastAPI --> Uvicorn
Uvicorn --> PostgreSQL
Uvicorn --> Alembic
FastAPI --> ChartGemmaAPI
FastAPI --> OllamaAPI
```
## 8. Data Flow Diagram
```mermaid
graph LR
subgraph "Input Sources"
UserInput[User Input]
ImageUpload[Image Upload]
QuestionInput[Question Input]
end
subgraph "Processing"
Auth[Authentication]
ImageProc[Image Processing]
LLMProc[LLM Processing]
Analysis[Analysis Engine]
end
subgraph "Storage"
UserDB[(User Database)]
ImageDB[(Image Storage)]
AnalysisDB[(Analysis History)]
end
subgraph "Output"
Response[API Response]
UI[User Interface]
Logs[System Logs]
end
%% Data flows
UserInput --> Auth
ImageUpload --> ImageProc
QuestionInput --> LLMProc
Auth --> UserDB
ImageProc --> ImageDB
LLMProc --> Analysis
Analysis --> AnalysisDB
Analysis --> Response
Response --> UI
Auth --> Logs
Analysis --> Logs
```
\ No newline at end of file
from fastapi import FastAPI
from src.infrastructure.api.fastapi.routes import auth, charts
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from src.config import settings
app = FastAPI(debug = "True",
app = FastAPI(
debug=True,
title="Chart Analyzer API",
description="API for analyzing charts and managing users.",
version="1.0.0",
......@@ -13,11 +14,11 @@ app = FastAPI(debug = "True",
@app.on_event("startup")
async def startup_event():
engine = create_engine("postgresql+psycopg://chart_analyzer_user:chartanalyzer13@localhost:5432/chart_analyzer")
engine = create_engine(settings.DATABASE_URL)
try:
with engine.connect() as conn:
print("✅ Database connection successful")
print(f"🔗 Connection string: postgresql+psycopg://chart_analyzer_user:chartanalyzer13@localhost:5432/chart_analyzer")
print(f"🔗 Connection string: {settings.DATABASE_URL}")
except OperationalError as e:
print("❌ Database connection failed")
print(f"Error: {e}")
......@@ -31,10 +32,17 @@ app.add_middleware(
allow_headers=["*"],
)
# Include routers
app.include_router(auth.router, prefix="/auth")
app.include_router(charts.router, prefix="/charts")
@app.get("/")
async def root():
return {"message": "Welcome to Chart Analyzer"}
return {
"message": "Welcome to Chart Analyzer API",
"version": "1.0.0",
"endpoints": {
"authentication": "/auth",
"chart_analysis": "/charts"
}
}
......@@ -11,7 +11,6 @@ class LLMRequestDTO(BaseModel):
"""Input for LLM analysis requests"""
image_bytes: bytes = Field(..., description="Binary image data (PNG/JPEG)")
question: str = Field(..., max_length=500, description="Question about the chart")
provider: LLMProvider = Field(LLMProvider.HUGGINGFACE, description="LLM service provider")
max_tokens: int = Field(300, gt=0, le=2000, description="Max response length")
# temperature: float = Field(0.3, ge=0.1, le=1.0, description="Creativity control")
......
from pydantic import BaseModel
from typing import List, Optional
class CreateConversationRequestDTO(BaseModel):
chart_image_id: str
title: str
class SendMessageRequestDTO(BaseModel):
conversation_id: str
message: str
class ConversationResponseDTO(BaseModel):
id: str
title: str
created_at: str
updated_at: str
message_count: int
class MessageResponseDTO(BaseModel):
id: str
message_type: str
content: str
timestamp: str
class ConversationDetailResponseDTO(BaseModel):
id: str
title: str
created_at: str
updated_at: str
messages: List[MessageResponseDTO]
\ No newline at end of file
import httpx
from src.application.ports.llm_service_port import LLMServicePort
from src.application.dtos.LLM import LLMRequestDTO, LLMResponseDTO
from typing import Optional
import base64
import io
from PIL import Image
import json
import tempfile
import os
class ChartGemmaService(LLMServicePort):
def __init__(self, gradio_url: str):
self.gradio_url = gradio_url
async def analyze(self, request: LLMRequestDTO) -> LLMResponseDTO:
try:
# Check if image data is provided
if not request.image_bytes:
return LLMResponseDTO(
answer="Error: No image data provided",
model_used="ChartGemma",
processing_time=None
)
# Convert image (if provided as bytes) to PIL Image
image = self._load_image(request.image_bytes)
if not image:
return LLMResponseDTO(
answer="Error: Could not load image data",
model_used="ChartGemma",
processing_time=None
)
# Call Gradio API
response = await self._call_gradio_api(
image=image,
question=request.question
)
return LLMResponseDTO(
answer=response,
model_used="ChartGemma",
processing_time=None # Gradio doesn't return this
)
except Exception as e:
# Return a more informative error response
return LLMResponseDTO(
answer=f"Error analyzing chart: {str(e)}",
model_used="ChartGemma",
processing_time=None
)
def _load_image(self, image_data: Optional[bytes]) -> Optional[Image.Image]:
if not image_data:
return None
try:
return Image.open(io.BytesIO(image_data)).convert("RGB")
except Exception as e:
print(f"Error loading image: {e}")
return None
async def _call_gradio_api(self, image: Image.Image, question: str) -> str:
# Save image to a temporary file (since Gradio expects filepath)
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file:
image.save(tmp_file, format='PNG')
tmp_file_path = tmp_file.name
try:
# Prepare the payload according to Gradio's API format
payload = {
"data": [
tmp_file_path, # Filepath to the image
question # Optional question
]
}
async with httpx.AsyncClient() as client:
# The Gradio API endpoint is /api/predict/
api_url = f"{self.gradio_url.rstrip('/')}/run/predict/"
response = await client.post(api_url, json=payload)
response.raise_for_status()
# Gradio returns the output in the "data" field as a list
return response.json()["data"][0]
finally:
# Clean up the temporary file
try:
os.unlink(tmp_file_path)
except:
pass
\ No newline at end of file
from src.domain.entities.chart_analysis import ChartAnalysis
from src.domain.ports.repositories.analysis_repository import AnalysisRepositoryPort
from src.application.ports.llm_service_port import LLMServicePort
from src.application.dtos.LLM import LLMRequestDTO
import uuid
from datetime import datetime, timezone
from typing import List
class ChatConversationUseCase:
def __init__(
self,
analysis_repo: AnalysisRepositoryPort,
llm_service: LLMServicePort
):
self._analysis_repo = analysis_repo
self._llm_service = llm_service
def execute(self, chart_image_id: str, conversation_history: List[str], new_question: str) -> ChartAnalysis:
"""Handle interactive chat conversation about a chart"""
# Build context from conversation history
context = self._build_conversation_context(conversation_history)
# Create enhanced question with context
enhanced_question = f"Context from previous conversation: {context}\n\nNew question: {new_question}"
# Create request for LLM
request = LLMRequestDTO(
image_bytes=b"", # Will be loaded by service
question=enhanced_question
)
# Get response from LLM
response = await self._llm_service.analyze(request)
# Save the conversation turn
analysis = ChartAnalysis(
id=str(uuid.uuid4()),
chart_image_id=chart_image_id,
question=new_question,
answer=response.answer,
created_at=datetime.now(timezone.utc)
)
self._analysis_repo.save_analysis(analysis)
return analysis
def _build_conversation_context(self, history: List[str]) -> str:
"""Build context string from conversation history"""
if not history:
return ""
context_parts = []
for i, entry in enumerate(history[-5:], 1): # Last 5 entries for context
context_parts.append(f"Turn {i}: {entry}")
return " | ".join(context_parts)
\ No newline at end of file
from src.domain.ports.repositories.analysis_repository import AnalysisRepositoryPort
from src.domain.entities.chart_analysis import ChartAnalysis
from typing import List
class GetAnalysisHistoryUseCase:
def __init__(self, analysis_repo: AnalysisRepositoryPort):
self._analysis_repo = analysis_repo
def execute(self, user_id: str, limit: int = 50) -> List[ChartAnalysis]:
"""Get analysis history for a user"""
return self._analysis_repo.get_analyses_by_user_id(user_id, limit)
\ No newline at end of file
from src.domain.entities.chart_analysis import ChartAnalysis
from src.domain.ports.repositories.analysis_repository import AnalysisRepositoryPort
from src.application.dtos.LLM import LLMResponseDTO
import uuid
from datetime import datetime, timezone
class SaveAnalysisUseCase:
def __init__(self, analysis_repo: AnalysisRepositoryPort):
self._analysis_repo = analysis_repo
def execute(self, chart_image_id: str, question: str, llm_response: LLMResponseDTO) -> ChartAnalysis:
"""Save analysis result to database for history tracking"""
analysis = ChartAnalysis(
id=str(uuid.uuid4()),
chart_image_id=chart_image_id,
question=question,
answer=llm_response.answer,
created_at=datetime.now(timezone.utc)
)
self._analysis_repo.save_analysis(analysis)
return analysis
\ No newline at end of file
......@@ -8,6 +8,12 @@ class Settings(BaseSettings):
JWT_SECRET: str = "zKReJQaoTK_F9Y2EIkDKZS1hgnfOZsplgzbjXY7IWyc"
JWT_ALGORITHM: str = "HS256"
JWT_EXPIRE_MINUTES: int = 30
# LLM Service Configuration
OLLAMA_HOST: str = "http://localhost:11434"
OLLAMA_MODEL: str = "llava:34b"
CHARTGEMMA_GRADIO_URL: str = "https://3f4f53fba3f99b8778.gradio.live/"
DEFAULT_LLM_MODEL: str = "ollama"
# Use model_config instead of inner Config class
model_config = ConfigDict(
......
from fastapi import Depends
from fastapi import Depends, Query
from typing import AsyncGenerator
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
......@@ -10,30 +10,15 @@ from src.domain.ports.repositories.charts_repository import ChartsRepositoryPort
from src.infrastructure.adapters.sqlserver.sql_user_repository import SqlUserRepository
from src.infrastructure.adapters.sqlserver.sql_charts_repository import SqlChartsRepository
from src.application.services.analyze_service import AnalyzeService
from src.application.services.llm_service import LLMService
from src.application.services.ollama_service import OllamaService
from src.application.services.chartGemma_service import ChartGemmaService
from src.application.ports.llm_service_port import LLMServicePort
from src.infrastructure.adapters.sqlserver.sql_token_repository import SqlTokenRepository
# from infrastructure.services.llm.openai_service import OpenAIService # Concrete LLM impl
# from infrastructure.services.image.pillow_service import PillowImageService # Concrete impl
from src.config import settings
from src.application.services.ollama_service import OllamaService
engine = create_async_engine(settings.DATABASE_URL, echo=True)
AsyncSessionLocal = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
def get_llm_service() -> LLMServicePort:
return OllamaService(host="http://172.25.1.141:11434")
async def get_db_session() -> AsyncGenerator[AsyncSession, None]:
async with AsyncSessionLocal() as session:
yield session
......@@ -44,10 +29,9 @@ def get_user_repository(session: AsyncSession = Depends(get_db_session)) -> User
def get_charts_repository(session: AsyncSession = Depends(get_db_session)) -> ChartsRepositoryPort:
return SqlChartsRepository(session)
def get_token_repo(session: AsyncSession = Depends(get_db_session)) -> SqlTokenRepository:
def get_token_repo(session: AsyncSession = Depends(get_db_session)) -> TokenRepositoryPort:
return SqlTokenRepository(session)
def get_auth_service(
user_repo: UserRepositoryPort = Depends(get_user_repository),
token_repo: TokenRepositoryPort = Depends(get_token_repo)
......@@ -57,11 +41,38 @@ def get_auth_service(
def get_upload_use_case(charts_repo: ChartsRepositoryPort = Depends(get_charts_repository)) -> UploadChartUseCase:
return UploadChartUseCase(charts_repo)
def get_analysis_service() -> AnalyzeService:
"""Factory for the analysis service"""
llm_service = LLMService()
# image_service = PillowImageService()
llm_service = get_llm_service()
return AnalyzeService(llm_service)
def get_ollama_service() -> OllamaService:
"""Factory for Ollama LLM service"""
return OllamaService(host=settings.OLLAMA_HOST)
def get_chartgemma_service() -> ChartGemmaService:
"""Factory for ChartGemma LLM service"""
return ChartGemmaService(gradio_url=settings.CHARTGEMMA_GRADIO_URL)
def get_llm_service(model: str = settings.DEFAULT_LLM_MODEL) -> LLMServicePort:
"""
Factory function to get the appropriate LLM service based on model selection.
This allows easy switching between different LLM providers.
"""
if model.lower() == "ollama":
return get_ollama_service()
elif model.lower() == "chartgemma":
return get_chartgemma_service()
else:
# Default to Ollama if unknown model is specified
return get_ollama_service()
def get_llm_service_by_query(
model: str = Query(default=settings.DEFAULT_LLM_MODEL, description="LLM model to use (ollama or chartgemma)")
) -> LLMServicePort:
"""
Dependency function that allows model selection via query parameter.
This provides a clean way for users to select their preferred LLM model.
"""
return get_llm_service(model)
from pydantic import BaseModel
from datetime import datetime, timezone
from typing import List, Optional
class ConversationMessage(BaseModel):
id: str
conversation_id: str
user_id: str
message_type: str # "user" or "assistant"
content: str
timestamp: datetime = datetime.now(timezone.utc)
class Conversation(BaseModel):
id: str
user_id: str
chart_image_id: str
title: str
created_at: datetime = datetime.now(timezone.utc)
updated_at: datetime = datetime.now(timezone.utc)
is_active: bool = True
messages: List[ConversationMessage] = []
\ No newline at end of file
from fastapi import APIRouter, Form, UploadFile, File, Depends
from src.application.services.analyze_service import AnalyzeService
from src.dependencies import get_upload_use_case
from src.application.dtos.LLM import LLMResponseDTO, LLMRequestDTO
from fastapi import APIRouter, UploadFile, File, HTTPException, Depends, Query
from src.application.ports.llm_service_port import LLMServicePort
from src.dependencies import get_llm_service
from src.application.dtos.LLM import LLMRequestDTO
from src.dependencies import get_llm_service_by_query, get_llm_service
from src.config import settings
from typing import Optional
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
router = APIRouter(tags=["charts"])
@router.post("/ask", response_model=LLMResponseDTO)
async def ask_about_chart(
@router.post("/analyze")
async def analyze_general(
image: UploadFile = File(...),
question: str = Form(...),
llm_service: LLMServicePort = Depends(get_llm_service)
question: str = Query(default="", description="Question about the chart"),
llm_service: LLMServicePort = Depends(get_llm_service_by_query)
):
request = LLMRequestDTO(
image_bytes=await image.read(),
question=question
)
return await llm_service.analyze(request)
"""
Unified endpoint that works with both Ollama and ChartGemma models.
Users can select the model via the 'model' query parameter.
"""
try:
logger.info(f"Starting analysis with question: {question}")
logger.info(f"Image filename: {image.filename}, content_type: {image.content_type}")
image_bytes = await image.read()
logger.info(f"Image size: {len(image_bytes)} bytes")
request = LLMRequestDTO(
image_bytes=image_bytes,
question=question
)
logger.info("Calling LLM service...")
response = await llm_service.analyze(request)
logger.info(f"LLM service response received: {response.answer[:100]}...")
return {
"answer": response.answer,
"model": response.model_used,
"processing_time": response.processing_time
}
except Exception as e:
logger.error(f"Error in analyze_general: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
@router.post("/analyze", response_model=LLMResponseDTO)
async def analyze_chart(
@router.post("/ask")
async def analyze_chart_specialized(
image: UploadFile = File(...),
# question: str = Form(...),
llm_service: LLMServicePort = Depends(get_llm_service)
question: str = Query(default="", description="Question about the chart"),
advanced_analysis: bool = Query(default=False, description="Enable advanced analysis with program of thought")
):
request = LLMRequestDTO(
image_bytes=await image.read(),
question="analyze the trends showing in this chart"
)
return await llm_service.analyze(request)
\ No newline at end of file
"""
Specialized endpoint for chart analysis with ChartGemma.
- advanced_analysis: When True, adds specific prompt engineering for charts
"""
try:
logger.info(f"Starting specialized analysis with question: {question}, advanced: {advanced_analysis}")
# Force ChartGemma for specialized chart analysis
service = get_llm_service("chartgemma")
# Enhanced prompt for charts if needed
processed_question = f"PROGRAM OF THOUGHT: {question}" if advanced_analysis else question
image_bytes = await image.read()
request = LLMRequestDTO(
image_bytes=image_bytes,
question=processed_question
)
logger.info("Calling ChartGemma service...")
response = await service.analyze(request)
logger.info(f"ChartGemma service response received: {response.answer[:100]}...")
return {
"answer": response.answer,
"model": "ChartGemma",
"analysis_type": "advanced" if advanced_analysis else "basic",
"processing_time": response.processing_time
}
except Exception as e:
logger.error(f"Error in analyze_chart_specialized: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
@router.get("/test-chartgemma")
async def test_chartgemma_connection():
"""
Test endpoint to verify ChartGemma service connection and response structure.
"""
try:
logger.info("Testing ChartGemma service connection...")
# Create a simple test request without an image
service = get_llm_service("chartgemma")
# Test the service with a minimal request
test_request = LLMRequestDTO(
image_bytes=b"", # Empty image for testing
question="Test question"
)
response = await service.analyze(test_request)
return {
"status": "success",
"service": "ChartGemma",
"response": response.answer,
"model_used": response.model_used
}
except Exception as e:
logger.error(f"Error testing ChartGemma: {str(e)}", exc_info=True)
return {
"status": "error",
"service": "ChartGemma",
"error": str(e)
}
@router.get("/models")
async def list_available_models():
"""
Endpoint to list available LLM services and their configurations.
"""
return {
"available_models": ["ollama", "chartgemma"],
"default_model": settings.DEFAULT_LLM_MODEL,
"model_configs": {
"ollama": {
"host": settings.OLLAMA_HOST,
"model": settings.OLLAMA_MODEL,
"description": "Local Ollama LLM service"
},
"chartgemma": {
"gradio_url": settings.CHARTGEMMA_GRADIO_URL,
"description": "Specialized chart analysis model"
}
}
}
\ No newline at end of file
from fastapi import APIRouter, Depends, HTTPException, status
from src.application.dtos.conversation import (
CreateConversationRequestDTO,
SendMessageRequestDTO,
ConversationResponseDTO,
ConversationDetailResponseDTO
)
from src.application.use_cases.chat_conversation import ChatConversationUseCase
from src.dependencies import get_current_user
from typing import List
import logging
logger = logging.getLogger(__name__)
router = APIRouter(tags=["conversations"])
@router.post("/", response_model=ConversationResponseDTO, status_code=status.HTTP_201_CREATED)
async def create_conversation(
request: CreateConversationRequestDTO,
current_user: dict = Depends(get_current_user)
):
"""Create a new conversation for a chart"""
try:
# Implementation would use conversation service
# For now, return mock response
return ConversationResponseDTO(
id="conv_123",
title=request.title,
created_at="2024-01-01T00:00:00Z",
updated_at="2024-01-01T00:00:00Z",
message_count=0
)
except Exception as e:
logger.error(f"Error creating conversation: {str(e)}")
raise HTTPException(status_code=500, detail="Failed to create conversation")
@router.get("/", response_model=List[ConversationResponseDTO])
async def list_conversations(
current_user: dict = Depends(get_current_user),
limit: int = 20
):
"""List user's conversations"""
try:
# Implementation would fetch from repository
return []
except Exception as e:
logger.error(f"Error listing conversations: {str(e)}")
raise HTTPException(status_code=500, detail="Failed to list conversations")
@router.get("/{conversation_id}", response_model=ConversationDetailResponseDTO)
async def get_conversation(
conversation_id: str,
current_user: dict = Depends(get_current_user)
):
"""Get conversation details with messages"""
try:
# Implementation would fetch from repository
return ConversationDetailResponseDTO(
id=conversation_id,
title="Sample Conversation",
created_at="2024-01-01T00:00:00Z",
updated_at="2024-01-01T00:00:00Z",
messages=[]
)
except Exception as e:
logger.error(f"Error getting conversation: {str(e)}")
raise HTTPException(status_code=500, detail="Failed to get conversation")
@router.post("/{conversation_id}/messages", response_model=MessageResponseDTO)
async def send_message(
conversation_id: str,
request: SendMessageRequestDTO,
current_user: dict = Depends(get_current_user)
):
"""Send a message in a conversation"""
try:
# Implementation would use ChatConversationUseCase
return MessageResponseDTO(
id="msg_123",
message_type="assistant",
content="This is a sample response from the AI.",
timestamp="2024-01-01T00:00:00Z"
)
except Exception as e:
logger.error(f"Error sending message: {str(e)}")
raise HTTPException(status_code=500, detail="Failed to send message")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment