Files
decision/backend/app/routers/qualify.py
Yvv 9b6322c546 IA : Claude substitut Qwen + auth rate limit prototype
- qualify_ai_service : ai_frame_async() avec Claude Haiku
  · round 1 → questions contextualisées si ANTHROPIC_API_KEY définie
  · round 2 → explication enrichie par Claude
  · fallback transparent sur ai_frame() si pas de clé (tests inchangés)
- config : ANTHROPIC_API_KEY + ANTHROPIC_MODEL (claude-haiku-4-5-20251001)
- requirements : anthropic>=0.97.0
- main : auth rate limit = RATE_LIMIT_DEFAULT partout (prototype mode)
  → supporte accès démo/test sans lockout en prod comme en dev

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-23 23:55:10 +02:00

187 lines
5.7 KiB
Python

"""Qualify router: decision qualification engine endpoint."""
from __future__ import annotations
from dataclasses import asdict
from fastapi import APIRouter, Depends
from pydantic import BaseModel
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.database import get_db
from app.engine.qualifier import QualificationConfig, QualificationInput, qualify
from app.models.qualification import QualificationProtocol
from app.schemas.qualification import (
QualificationProtocolCreate,
QualificationProtocolOut,
QualifyRequest,
QualifyResponse,
)
from app.services.auth_service import get_current_identity
from app.services.qualify_ai_service import (
AIFrameRequest,
AIFrameResponse,
AIMessage,
AIQuestion,
AIQualifyResult,
ai_frame_async,
)
router = APIRouter()
# ── Pydantic wrappers for AI chat (FastAPI needs Pydantic, not dataclasses) ──
class AIMessagePayload(BaseModel):
role: str
content: str
class AIChatRequest(BaseModel):
within_mandate: bool = False
affected_count: int | None = None
is_structural: bool = False
context: str | None = None
messages: list[AIMessagePayload] = []
class AIQuestionOut(BaseModel):
id: str
text: str
options: list[str]
class AIQualifyResultOut(BaseModel):
decision_type: str
process: str
recommended_modalities: list[str]
recommend_onchain: bool
onchain_reason: str | None
confidence: str
collective_available: bool
record_in_observatory: bool
reasons: list[str]
class AIChatResponse(BaseModel):
done: bool
questions: list[AIQuestionOut] = []
result: AIQualifyResultOut | None = None
explanation: str | None = None
async def _load_config(db: AsyncSession) -> QualificationConfig:
"""Load the active QualificationProtocol from DB, or fall back to defaults."""
result = await db.execute(
select(QualificationProtocol)
.where(QualificationProtocol.is_active == True) # noqa: E712
.order_by(QualificationProtocol.created_at.desc())
.limit(1)
)
proto = result.scalar_one_or_none()
if proto is None:
return QualificationConfig()
return QualificationConfig(
small_group_max=proto.small_group_max,
collective_wot_min=proto.collective_wot_min,
default_modalities=proto.default_modalities,
)
@router.post("/", response_model=QualifyResponse)
async def qualify_decision(
payload: QualifyRequest,
db: AsyncSession = Depends(get_db),
) -> QualifyResponse:
"""Qualify a decision: determine type, process, and modalities.
No authentication required — this is an advisory endpoint that helps
users understand which decision pathway fits their situation.
"""
config = await _load_config(db)
inp = QualificationInput(
within_mandate=payload.within_mandate,
affected_count=payload.affected_count,
is_structural=payload.is_structural,
context_description=payload.context_description,
)
result = qualify(inp, config)
return QualifyResponse(**asdict(result))
@router.post("/ai-chat", response_model=AIChatResponse)
async def ai_chat(payload: AIChatRequest) -> AIChatResponse:
"""Run one round of AI-assisted qualification framing.
Round 1 (messages=[]) → returns 2 clarifying questions.
Round 2 (messages set) → returns final qualification result.
No auth required — advisory endpoint.
"""
req = AIFrameRequest(
within_mandate=payload.within_mandate,
affected_count=payload.affected_count,
is_structural=payload.is_structural,
context=payload.context,
messages=[AIMessage(role=m.role, content=m.content) for m in payload.messages],
)
resp = await ai_frame_async(req)
return AIChatResponse(
done=resp.done,
questions=[AIQuestionOut(id=q.id, text=q.text, options=q.options) for q in resp.questions],
result=AIQualifyResultOut(**asdict(resp.result)) if resp.result else None,
explanation=resp.explanation,
)
@router.get("/protocol", response_model=QualificationProtocolOut | None)
async def get_active_protocol(
db: AsyncSession = Depends(get_db),
) -> QualificationProtocolOut | None:
"""Return the currently active qualification protocol (thresholds)."""
result = await db.execute(
select(QualificationProtocol)
.where(QualificationProtocol.is_active == True) # noqa: E712
.order_by(QualificationProtocol.created_at.desc())
.limit(1)
)
proto = result.scalar_one_or_none()
if proto is None:
return None
return QualificationProtocolOut.model_validate(proto)
@router.post("/protocol", response_model=QualificationProtocolOut, status_code=201)
async def create_protocol(
payload: QualificationProtocolCreate,
db: AsyncSession = Depends(get_db),
_identity=Depends(get_current_identity),
) -> QualificationProtocolOut:
"""Create a new qualification protocol (requires auth).
Deactivates the current active protocol before saving the new one.
"""
# Deactivate current
current = await db.execute(
select(QualificationProtocol).where(QualificationProtocol.is_active == True) # noqa: E712
)
for proto in current.scalars().all():
proto.is_active = False
import json
proto = QualificationProtocol(
name=payload.name,
description=payload.description,
small_group_max=payload.small_group_max,
collective_wot_min=payload.collective_wot_min,
default_modalities_json=json.dumps(payload.default_modalities),
is_active=True,
)
db.add(proto)
await db.commit()
await db.refresh(proto)
return QualificationProtocolOut.model_validate(proto)