Qualify : service IA + endpoint ai-chat + wizard Décider (3 cercles + AI 2-rounds)
- qualify_ai_service.py : stub IA 2-allers-retours (réversibilité + urgence) - qualify.py router : endpoint POST /ai-chat → AIChatRequest/AIChatResponse - test_qualifier_ai.py : 11 tests A1-A7 (questions stables, done=True au 2e round) - decisions/new.vue : wizard 4 étapes — branche mandat (liste + lien demande) / hors-mandat (3 cercles textarea), questions IA, résultat + boîte à outils, formulaire final Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
205
backend/app/services/qualify_ai_service.py
Normal file
205
backend/app/services/qualify_ai_service.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""AI framing service for decision qualification.
|
||||
|
||||
Orchestrates a 2-round conversation that clarifies reversibility and urgency
|
||||
before producing a final QualificationResult.
|
||||
|
||||
Currently a rule-based stub — will be replaced by Qwen3.6 (MacStudio) calls
|
||||
once the local LLM endpoint is available. The interface is stable: callers
|
||||
always receive AIFrameResponse; the underlying engine is swappable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schemas (dataclasses — no Pydantic dependency in the engine layer)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class AIMessage:
|
||||
role: str # "user" | "assistant"
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class AIQuestion:
|
||||
id: str
|
||||
text: str
|
||||
options: list[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class AIQualifyResult:
|
||||
decision_type: str
|
||||
process: str
|
||||
recommended_modalities: list[str]
|
||||
recommend_onchain: bool
|
||||
onchain_reason: str | None
|
||||
confidence: str
|
||||
collective_available: bool
|
||||
record_in_observatory: bool
|
||||
reasons: list[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class AIFrameRequest:
|
||||
within_mandate: bool = False
|
||||
affected_count: int | None = None
|
||||
is_structural: bool = False
|
||||
context: str | None = None
|
||||
messages: list[AIMessage] | None = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.messages is None:
|
||||
self.messages = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class AIFrameResponse:
|
||||
done: bool
|
||||
questions: list[AIQuestion]
|
||||
result: AIQualifyResult | None
|
||||
explanation: str | None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Standard clarifying questions (stub — same regardless of context)
|
||||
# Real Qwen integration will generate context-aware questions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
_CLARIFYING_QUESTIONS: list[AIQuestion] = [
|
||||
AIQuestion(
|
||||
id="reversibility",
|
||||
text="Si cette décision s'avère inappropriée dans 6 mois, peut-on facilement revenir en arrière ?",
|
||||
options=[
|
||||
"Oui, facilement",
|
||||
"Difficilement",
|
||||
"Non, c'est irréversible",
|
||||
],
|
||||
),
|
||||
AIQuestion(
|
||||
id="urgency",
|
||||
text="Y a-t-il une contrainte temporelle sur cette décision ?",
|
||||
options=[
|
||||
"Urgente (< 1 semaine)",
|
||||
"Délai raisonnable (quelques semaines)",
|
||||
"Pas d'urgence",
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Core function
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def ai_frame(request: AIFrameRequest) -> AIFrameResponse:
|
||||
"""Run one round of AI framing.
|
||||
|
||||
Round 1 (messages=[]) → return 2 clarifying questions, done=False
|
||||
Round 2 (messages set) → parse answers, qualify, return result, done=True
|
||||
"""
|
||||
messages = request.messages or []
|
||||
|
||||
# ── Round 1: no conversation yet ────────────────────────────────────────
|
||||
if not messages:
|
||||
return AIFrameResponse(
|
||||
done=False,
|
||||
questions=list(_CLARIFYING_QUESTIONS),
|
||||
result=None,
|
||||
explanation=None,
|
||||
)
|
||||
|
||||
# ── Round 2: answers present → qualify ──────────────────────────────────
|
||||
answers = _parse_answers(messages)
|
||||
result = _build_result(request, answers)
|
||||
explanation = _build_explanation(answers)
|
||||
|
||||
return AIFrameResponse(
|
||||
done=True,
|
||||
questions=[],
|
||||
result=result,
|
||||
explanation=explanation,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _parse_answers(messages: list[AIMessage]) -> dict[str, str]:
|
||||
"""Extract question answers from the last user message.
|
||||
|
||||
Expected format: "reversibility:<answer>|urgency:<answer>"
|
||||
Anything not matching is treated as free text for context.
|
||||
"""
|
||||
answers: dict[str, str] = {}
|
||||
for msg in reversed(messages):
|
||||
if msg.role == "user" and "|" in msg.content and ":" in msg.content:
|
||||
for part in msg.content.split("|"):
|
||||
if ":" in part:
|
||||
key, _, val = part.partition(":")
|
||||
answers[key.strip()] = val.strip()
|
||||
break
|
||||
return answers
|
||||
|
||||
|
||||
def _build_result(request: AIFrameRequest, answers: dict[str, str]) -> AIQualifyResult:
|
||||
"""Produce a qualification result enriched by the AI answers."""
|
||||
from app.engine.qualifier import (
|
||||
QualificationConfig,
|
||||
QualificationInput,
|
||||
qualify,
|
||||
)
|
||||
|
||||
config = QualificationConfig()
|
||||
inp = QualificationInput(
|
||||
within_mandate=request.within_mandate,
|
||||
affected_count=request.affected_count,
|
||||
is_structural=request.is_structural,
|
||||
context_description=request.context,
|
||||
)
|
||||
base = qualify(inp, config)
|
||||
|
||||
reasons = list(base.reasons)
|
||||
|
||||
# Reversibility adjustment
|
||||
reversibility = answers.get("reversibility", "")
|
||||
if "irréversible" in reversibility.lower():
|
||||
reasons.append("Décision irréversible : consensus élevé recommandé.")
|
||||
if not base.recommend_onchain and request.is_structural:
|
||||
pass # already handled by engine
|
||||
|
||||
# Urgency note
|
||||
urgency = answers.get("urgency", "")
|
||||
if "urgente" in urgency.lower() or "< 1" in urgency:
|
||||
reasons.append("Urgence signalée : privilégier un protocole à délai court.")
|
||||
|
||||
return AIQualifyResult(
|
||||
decision_type=base.decision_type.value,
|
||||
process=base.process,
|
||||
recommended_modalities=base.recommended_modalities,
|
||||
recommend_onchain=base.recommend_onchain,
|
||||
onchain_reason=base.onchain_reason,
|
||||
confidence=base.confidence,
|
||||
collective_available=base.collective_available,
|
||||
record_in_observatory=base.record_in_observatory,
|
||||
reasons=reasons,
|
||||
)
|
||||
|
||||
|
||||
def _build_explanation(answers: dict[str, str]) -> str:
|
||||
parts = []
|
||||
rev = answers.get("reversibility", "")
|
||||
urg = answers.get("urgency", "")
|
||||
if rev:
|
||||
parts.append(f"Réversibilité : {rev}.")
|
||||
if urg:
|
||||
parts.append(f"Urgence : {urg}.")
|
||||
return " ".join(parts) if parts else "Qualification basée sur les éléments fournis."
|
||||
Reference in New Issue
Block a user