Qualify : service IA + endpoint ai-chat + wizard Décider (3 cercles + AI 2-rounds)
- qualify_ai_service.py : stub IA 2-allers-retours (réversibilité + urgence) - qualify.py router : endpoint POST /ai-chat → AIChatRequest/AIChatResponse - test_qualifier_ai.py : 11 tests A1-A7 (questions stables, done=True au 2e round) - decisions/new.vue : wizard 4 étapes — branche mandat (liste + lien demande) / hors-mandat (3 cercles textarea), questions IA, résultat + boîte à outils, formulaire final Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -5,6 +5,7 @@ from __future__ import annotations
|
|||||||
from dataclasses import asdict
|
from dataclasses import asdict
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends
|
from fastapi import APIRouter, Depends
|
||||||
|
from pydantic import BaseModel
|
||||||
from sqlalchemy import select
|
from sqlalchemy import select
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
|
|
||||||
@@ -18,10 +19,59 @@ from app.schemas.qualification import (
|
|||||||
QualifyResponse,
|
QualifyResponse,
|
||||||
)
|
)
|
||||||
from app.services.auth_service import get_current_identity
|
from app.services.auth_service import get_current_identity
|
||||||
|
from app.services.qualify_ai_service import (
|
||||||
|
AIFrameRequest,
|
||||||
|
AIFrameResponse,
|
||||||
|
AIMessage,
|
||||||
|
AIQuestion,
|
||||||
|
AIQualifyResult,
|
||||||
|
ai_frame,
|
||||||
|
)
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
# ── Pydantic wrappers for AI chat (FastAPI needs Pydantic, not dataclasses) ──
|
||||||
|
|
||||||
|
|
||||||
|
class AIMessagePayload(BaseModel):
|
||||||
|
role: str
|
||||||
|
content: str
|
||||||
|
|
||||||
|
|
||||||
|
class AIChatRequest(BaseModel):
|
||||||
|
within_mandate: bool = False
|
||||||
|
affected_count: int | None = None
|
||||||
|
is_structural: bool = False
|
||||||
|
context: str | None = None
|
||||||
|
messages: list[AIMessagePayload] = []
|
||||||
|
|
||||||
|
|
||||||
|
class AIQuestionOut(BaseModel):
|
||||||
|
id: str
|
||||||
|
text: str
|
||||||
|
options: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
class AIQualifyResultOut(BaseModel):
|
||||||
|
decision_type: str
|
||||||
|
process: str
|
||||||
|
recommended_modalities: list[str]
|
||||||
|
recommend_onchain: bool
|
||||||
|
onchain_reason: str | None
|
||||||
|
confidence: str
|
||||||
|
collective_available: bool
|
||||||
|
record_in_observatory: bool
|
||||||
|
reasons: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
class AIChatResponse(BaseModel):
|
||||||
|
done: bool
|
||||||
|
questions: list[AIQuestionOut] = []
|
||||||
|
result: AIQualifyResultOut | None = None
|
||||||
|
explanation: str | None = None
|
||||||
|
|
||||||
|
|
||||||
async def _load_config(db: AsyncSession) -> QualificationConfig:
|
async def _load_config(db: AsyncSession) -> QualificationConfig:
|
||||||
"""Load the active QualificationProtocol from DB, or fall back to defaults."""
|
"""Load the active QualificationProtocol from DB, or fall back to defaults."""
|
||||||
result = await db.execute(
|
result = await db.execute(
|
||||||
@@ -61,6 +111,32 @@ async def qualify_decision(
|
|||||||
return QualifyResponse(**asdict(result))
|
return QualifyResponse(**asdict(result))
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/ai-chat", response_model=AIChatResponse)
|
||||||
|
async def ai_chat(payload: AIChatRequest) -> AIChatResponse:
|
||||||
|
"""Run one round of AI-assisted qualification framing.
|
||||||
|
|
||||||
|
Round 1 (messages=[]) → returns 2 clarifying questions.
|
||||||
|
Round 2 (messages set) → returns final qualification result.
|
||||||
|
|
||||||
|
No auth required — advisory endpoint.
|
||||||
|
"""
|
||||||
|
req = AIFrameRequest(
|
||||||
|
within_mandate=payload.within_mandate,
|
||||||
|
affected_count=payload.affected_count,
|
||||||
|
is_structural=payload.is_structural,
|
||||||
|
context=payload.context,
|
||||||
|
messages=[AIMessage(role=m.role, content=m.content) for m in payload.messages],
|
||||||
|
)
|
||||||
|
resp = ai_frame(req)
|
||||||
|
|
||||||
|
return AIChatResponse(
|
||||||
|
done=resp.done,
|
||||||
|
questions=[AIQuestionOut(id=q.id, text=q.text, options=q.options) for q in resp.questions],
|
||||||
|
result=AIQualifyResultOut(**asdict(resp.result)) if resp.result else None,
|
||||||
|
explanation=resp.explanation,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/protocol", response_model=QualificationProtocolOut | None)
|
@router.get("/protocol", response_model=QualificationProtocolOut | None)
|
||||||
async def get_active_protocol(
|
async def get_active_protocol(
|
||||||
db: AsyncSession = Depends(get_db),
|
db: AsyncSession = Depends(get_db),
|
||||||
|
|||||||
205
backend/app/services/qualify_ai_service.py
Normal file
205
backend/app/services/qualify_ai_service.py
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
"""AI framing service for decision qualification.
|
||||||
|
|
||||||
|
Orchestrates a 2-round conversation that clarifies reversibility and urgency
|
||||||
|
before producing a final QualificationResult.
|
||||||
|
|
||||||
|
Currently a rule-based stub — will be replaced by Qwen3.6 (MacStudio) calls
|
||||||
|
once the local LLM endpoint is available. The interface is stable: callers
|
||||||
|
always receive AIFrameResponse; the underlying engine is swappable.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Schemas (dataclasses — no Pydantic dependency in the engine layer)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AIMessage:
|
||||||
|
role: str # "user" | "assistant"
|
||||||
|
content: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AIQuestion:
|
||||||
|
id: str
|
||||||
|
text: str
|
||||||
|
options: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AIQualifyResult:
|
||||||
|
decision_type: str
|
||||||
|
process: str
|
||||||
|
recommended_modalities: list[str]
|
||||||
|
recommend_onchain: bool
|
||||||
|
onchain_reason: str | None
|
||||||
|
confidence: str
|
||||||
|
collective_available: bool
|
||||||
|
record_in_observatory: bool
|
||||||
|
reasons: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AIFrameRequest:
|
||||||
|
within_mandate: bool = False
|
||||||
|
affected_count: int | None = None
|
||||||
|
is_structural: bool = False
|
||||||
|
context: str | None = None
|
||||||
|
messages: list[AIMessage] | None = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
if self.messages is None:
|
||||||
|
self.messages = []
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AIFrameResponse:
|
||||||
|
done: bool
|
||||||
|
questions: list[AIQuestion]
|
||||||
|
result: AIQualifyResult | None
|
||||||
|
explanation: str | None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Standard clarifying questions (stub — same regardless of context)
|
||||||
|
# Real Qwen integration will generate context-aware questions
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
_CLARIFYING_QUESTIONS: list[AIQuestion] = [
|
||||||
|
AIQuestion(
|
||||||
|
id="reversibility",
|
||||||
|
text="Si cette décision s'avère inappropriée dans 6 mois, peut-on facilement revenir en arrière ?",
|
||||||
|
options=[
|
||||||
|
"Oui, facilement",
|
||||||
|
"Difficilement",
|
||||||
|
"Non, c'est irréversible",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
AIQuestion(
|
||||||
|
id="urgency",
|
||||||
|
text="Y a-t-il une contrainte temporelle sur cette décision ?",
|
||||||
|
options=[
|
||||||
|
"Urgente (< 1 semaine)",
|
||||||
|
"Délai raisonnable (quelques semaines)",
|
||||||
|
"Pas d'urgence",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Core function
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def ai_frame(request: AIFrameRequest) -> AIFrameResponse:
|
||||||
|
"""Run one round of AI framing.
|
||||||
|
|
||||||
|
Round 1 (messages=[]) → return 2 clarifying questions, done=False
|
||||||
|
Round 2 (messages set) → parse answers, qualify, return result, done=True
|
||||||
|
"""
|
||||||
|
messages = request.messages or []
|
||||||
|
|
||||||
|
# ── Round 1: no conversation yet ────────────────────────────────────────
|
||||||
|
if not messages:
|
||||||
|
return AIFrameResponse(
|
||||||
|
done=False,
|
||||||
|
questions=list(_CLARIFYING_QUESTIONS),
|
||||||
|
result=None,
|
||||||
|
explanation=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ── Round 2: answers present → qualify ──────────────────────────────────
|
||||||
|
answers = _parse_answers(messages)
|
||||||
|
result = _build_result(request, answers)
|
||||||
|
explanation = _build_explanation(answers)
|
||||||
|
|
||||||
|
return AIFrameResponse(
|
||||||
|
done=True,
|
||||||
|
questions=[],
|
||||||
|
result=result,
|
||||||
|
explanation=explanation,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_answers(messages: list[AIMessage]) -> dict[str, str]:
|
||||||
|
"""Extract question answers from the last user message.
|
||||||
|
|
||||||
|
Expected format: "reversibility:<answer>|urgency:<answer>"
|
||||||
|
Anything not matching is treated as free text for context.
|
||||||
|
"""
|
||||||
|
answers: dict[str, str] = {}
|
||||||
|
for msg in reversed(messages):
|
||||||
|
if msg.role == "user" and "|" in msg.content and ":" in msg.content:
|
||||||
|
for part in msg.content.split("|"):
|
||||||
|
if ":" in part:
|
||||||
|
key, _, val = part.partition(":")
|
||||||
|
answers[key.strip()] = val.strip()
|
||||||
|
break
|
||||||
|
return answers
|
||||||
|
|
||||||
|
|
||||||
|
def _build_result(request: AIFrameRequest, answers: dict[str, str]) -> AIQualifyResult:
|
||||||
|
"""Produce a qualification result enriched by the AI answers."""
|
||||||
|
from app.engine.qualifier import (
|
||||||
|
QualificationConfig,
|
||||||
|
QualificationInput,
|
||||||
|
qualify,
|
||||||
|
)
|
||||||
|
|
||||||
|
config = QualificationConfig()
|
||||||
|
inp = QualificationInput(
|
||||||
|
within_mandate=request.within_mandate,
|
||||||
|
affected_count=request.affected_count,
|
||||||
|
is_structural=request.is_structural,
|
||||||
|
context_description=request.context,
|
||||||
|
)
|
||||||
|
base = qualify(inp, config)
|
||||||
|
|
||||||
|
reasons = list(base.reasons)
|
||||||
|
|
||||||
|
# Reversibility adjustment
|
||||||
|
reversibility = answers.get("reversibility", "")
|
||||||
|
if "irréversible" in reversibility.lower():
|
||||||
|
reasons.append("Décision irréversible : consensus élevé recommandé.")
|
||||||
|
if not base.recommend_onchain and request.is_structural:
|
||||||
|
pass # already handled by engine
|
||||||
|
|
||||||
|
# Urgency note
|
||||||
|
urgency = answers.get("urgency", "")
|
||||||
|
if "urgente" in urgency.lower() or "< 1" in urgency:
|
||||||
|
reasons.append("Urgence signalée : privilégier un protocole à délai court.")
|
||||||
|
|
||||||
|
return AIQualifyResult(
|
||||||
|
decision_type=base.decision_type.value,
|
||||||
|
process=base.process,
|
||||||
|
recommended_modalities=base.recommended_modalities,
|
||||||
|
recommend_onchain=base.recommend_onchain,
|
||||||
|
onchain_reason=base.onchain_reason,
|
||||||
|
confidence=base.confidence,
|
||||||
|
collective_available=base.collective_available,
|
||||||
|
record_in_observatory=base.record_in_observatory,
|
||||||
|
reasons=reasons,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_explanation(answers: dict[str, str]) -> str:
|
||||||
|
parts = []
|
||||||
|
rev = answers.get("reversibility", "")
|
||||||
|
urg = answers.get("urgency", "")
|
||||||
|
if rev:
|
||||||
|
parts.append(f"Réversibilité : {rev}.")
|
||||||
|
if urg:
|
||||||
|
parts.append(f"Urgence : {urg}.")
|
||||||
|
return " ".join(parts) if parts else "Qualification basée sur les éléments fournis."
|
||||||
172
backend/app/tests/test_qualifier_ai.py
Normal file
172
backend/app/tests/test_qualifier_ai.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
"""TDD — Service AI de cadrage des décisions (qualify/ai-chat).
|
||||||
|
|
||||||
|
Invariants testés :
|
||||||
|
A1 Premier appel (messages=[]) → retourne toujours 2 questions, done=False
|
||||||
|
A2 Les 2 questions couvrent réversibilité et urgence (ids stables)
|
||||||
|
A3 Deuxième appel (messages=[q+réponse]) → done=True, résultat qualifié
|
||||||
|
A4 Réponse "irréversible" → recommend_onchain conservé si is_structural
|
||||||
|
A5 Réponse "urgente" → raison "urgence" présente dans le résultat
|
||||||
|
A6 La qualification finale respecte les règles du moteur (R1/R2/R4/R5/R6)
|
||||||
|
A7 Sans contexte, les questions restent les mêmes (stub ne dépend pas du LLM)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from app.services.qualify_ai_service import (
|
||||||
|
AIFrameRequest,
|
||||||
|
AIMessage,
|
||||||
|
ai_frame,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_REQUEST = AIFrameRequest(
|
||||||
|
context="Révision du règlement intérieur de l'association",
|
||||||
|
within_mandate=False,
|
||||||
|
affected_count=20,
|
||||||
|
is_structural=False,
|
||||||
|
messages=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A1 — Premier appel → 2 questions, done=False
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a1_first_call_returns_questions():
|
||||||
|
resp = ai_frame(DEFAULT_REQUEST)
|
||||||
|
assert resp.done is False
|
||||||
|
assert len(resp.questions) == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_a1_first_call_result_is_none():
|
||||||
|
resp = ai_frame(DEFAULT_REQUEST)
|
||||||
|
assert resp.result is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A2 — Questions couvrent réversibilité et urgence
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a2_questions_have_stable_ids():
|
||||||
|
resp = ai_frame(DEFAULT_REQUEST)
|
||||||
|
ids = {q.id for q in resp.questions}
|
||||||
|
assert "reversibility" in ids
|
||||||
|
assert "urgency" in ids
|
||||||
|
|
||||||
|
|
||||||
|
def test_a2_questions_have_options():
|
||||||
|
resp = ai_frame(DEFAULT_REQUEST)
|
||||||
|
for q in resp.questions:
|
||||||
|
assert len(q.options) >= 2, f"Question '{q.id}' doit avoir au moins 2 options"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A3 — Deuxième appel (avec réponses) → done=True + résultat
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _make_second_request(reversibility_ans: str, urgency_ans: str, **kwargs) -> AIFrameRequest:
|
||||||
|
questions = ai_frame(DEFAULT_REQUEST).questions
|
||||||
|
messages = []
|
||||||
|
for q in questions:
|
||||||
|
messages.append(AIMessage(role="assistant", content=q.text))
|
||||||
|
# One user message bundling all answers
|
||||||
|
messages.append(AIMessage(
|
||||||
|
role="user",
|
||||||
|
content=f"reversibility:{reversibility_ans}|urgency:{urgency_ans}",
|
||||||
|
))
|
||||||
|
return AIFrameRequest(
|
||||||
|
**{**vars(DEFAULT_REQUEST), "messages": messages, **kwargs}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_a3_second_call_is_done():
|
||||||
|
req = _make_second_request("Difficilement", "Pas d'urgence")
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.done is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_a3_second_call_has_result():
|
||||||
|
req = _make_second_request("Difficilement", "Pas d'urgence")
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.result is not None
|
||||||
|
assert resp.result.decision_type in ("individual", "collective")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A4 — Irréversible + structurant → recommend_onchain
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a4_irreversible_structural_recommends_onchain():
|
||||||
|
req = _make_second_request(
|
||||||
|
"Non, c'est irréversible",
|
||||||
|
"Pas d'urgence",
|
||||||
|
is_structural=True,
|
||||||
|
)
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.result is not None
|
||||||
|
assert resp.result.recommend_onchain is True
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A5 — Urgence → raison présente
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a5_urgent_adds_urgency_reason():
|
||||||
|
req = _make_second_request("Oui, facilement", "Urgente (< 1 semaine)")
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.result is not None
|
||||||
|
reasons_text = " ".join(resp.result.reasons).lower()
|
||||||
|
assert "urgence" in reasons_text or "urgent" in reasons_text
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A6 — Résultat respecte les règles du moteur
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a6_within_mandate_gives_individual():
|
||||||
|
req = AIFrameRequest(
|
||||||
|
within_mandate=True,
|
||||||
|
affected_count=None,
|
||||||
|
messages=[
|
||||||
|
AIMessage(role="assistant", content="q"),
|
||||||
|
AIMessage(role="user", content="reversibility:Facilement|urgency:Pas d'urgence"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.done is True
|
||||||
|
assert resp.result is not None
|
||||||
|
assert resp.result.decision_type == "individual"
|
||||||
|
assert resp.result.process == "consultation_avis"
|
||||||
|
|
||||||
|
|
||||||
|
def test_a6_large_group_gives_collective():
|
||||||
|
req = _make_second_request("Difficilement", "Pas d'urgence", affected_count=100)
|
||||||
|
resp = ai_frame(req)
|
||||||
|
assert resp.result is not None
|
||||||
|
assert resp.result.decision_type == "collective"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A7 — Sans contexte, mêmes questions (stub ne dépend pas du LLM)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_a7_no_context_same_question_ids():
|
||||||
|
req_with = DEFAULT_REQUEST
|
||||||
|
req_without = AIFrameRequest(
|
||||||
|
context=None,
|
||||||
|
within_mandate=False,
|
||||||
|
affected_count=20,
|
||||||
|
messages=[],
|
||||||
|
)
|
||||||
|
ids_with = {q.id for q in ai_frame(req_with).questions}
|
||||||
|
ids_without = {q.id for q in ai_frame(req_without).questions}
|
||||||
|
assert ids_with == ids_without
|
||||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user