TDD Qualifier : moteur de qualification des décisions (R1–R12)

- engine/qualifier.py : QualificationConfig (seuils configurables),
  QualificationInput, QualificationResult, DecisionType enum, qualify()
- Règles : within_mandate→consultation_avis, affected_count→routing,
  is_structural→recommend_onchain, seuils lus depuis config
- Stub suggest_modalities_from_context() — interface LLM définie,
  intégration Qwen3.6 (MacStudio) à venir
- 22 tests, 212/212 verts, zéro régression

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Yvv
2026-04-23 16:16:35 +02:00
parent fc84600f97
commit 428299c9c8
2 changed files with 535 additions and 0 deletions

View File

@@ -0,0 +1,228 @@
"""Decision qualification engine.
Pure functions — no database, no I/O.
Takes a QualificationInput + QualificationConfig and returns a QualificationResult.
LLM integration (suggest_modalities_from_context) is stubbed pending local Qwen deployment.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
class DecisionType(str, Enum):
INDIVIDUAL = "individual"
COLLECTIVE = "collective"
# ---------------------------------------------------------------------------
# Configuration (thresholds — stored as a QualificationProtocol in DB)
# ---------------------------------------------------------------------------
@dataclass
class QualificationConfig:
"""Configurable thresholds for the qualification engine.
These defaults will be seeded as a QualificationProtocol record so they
can be adjusted through the admin interface without code changes.
individual_max: affected_count <= this → always individual
small_group_max: affected_count <= this → individual recommended, collective available
collective_wot_min: affected_count > this → collective required (WoT formula applies)
Default modalities shown when collective is chosen (ordered by relevance).
"""
individual_max: int = 1
small_group_max: int = 5
collective_wot_min: int = 50
default_modalities: list[str] = field(default_factory=lambda: [
"vote_wot",
"vote_smith",
"consultation_avis",
"election",
])
# ---------------------------------------------------------------------------
# Input / Output
# ---------------------------------------------------------------------------
@dataclass
class QualificationInput:
within_mandate: bool = False
affected_count: int | None = None
is_structural: bool = False
context_description: str | None = None # reserved for LLM suggestion
@dataclass
class QualificationResult:
decision_type: DecisionType
process: str
recommended_modalities: list[str]
recommend_onchain: bool
confidence: str # "required" | "recommended" | "optional"
collective_available: bool
reasons: list[str]
onchain_reason: str | None = None
# ---------------------------------------------------------------------------
# LLM stub
# ---------------------------------------------------------------------------
def suggest_modalities_from_context(
context: str,
config: QualificationConfig,
) -> list[str]:
"""Suggest voting modalities based on a natural-language context description.
Stub — returns empty list until local Qwen (qwen3.6) is integrated.
When implemented, this will call the LLM API and return an ordered list
of modality slugs from config.default_modalities.
"""
return []
# ---------------------------------------------------------------------------
# Core engine
# ---------------------------------------------------------------------------
def qualify(inp: QualificationInput, config: QualificationConfig) -> QualificationResult:
"""Qualify a decision and recommend a type, process, and modalities.
Rules (in priority order):
R1/R2 within_mandate → individual + consultation_avis, no modalities
R3 affected_count == 1 → individual + personal
R4 affected_count ≤ small_group_max → individual recommended, collective available
R5 small_group_max < affected_count ≤ collective_wot_min → collective recommended
R6 affected_count > collective_wot_min → collective required (WoT)
R7/R8 is_structural → recommend_onchain with reason
"""
reasons: list[str] = []
# ── R1/R2: mandate scope overrides everything ───────────────────────────
if inp.within_mandate:
reasons.append("Décision dans le périmètre d'un mandat existant.")
return QualificationResult(
decision_type=DecisionType.INDIVIDUAL,
process="consultation_avis",
recommended_modalities=[],
recommend_onchain=_onchain(inp, reasons),
confidence="required",
collective_available=False,
reasons=reasons,
onchain_reason=_onchain_reason(inp),
)
count = inp.affected_count if inp.affected_count is not None else 1
# ── R3: single person ───────────────────────────────────────────────────
if count <= config.individual_max:
reasons.append("Une seule personne concernée.")
return QualificationResult(
decision_type=DecisionType.INDIVIDUAL,
process="personal",
recommended_modalities=[],
recommend_onchain=_onchain(inp, reasons),
confidence="required",
collective_available=False,
reasons=reasons,
onchain_reason=_onchain_reason(inp),
)
# ── R4: small group → individual recommended, collective available ───────
if count <= config.small_group_max:
reasons.append(
f"{count} personnes concernées : décision individuelle recommandée, "
"vote collectif possible."
)
modalities = _collect_modalities(inp, config)
return QualificationResult(
decision_type=DecisionType.INDIVIDUAL,
process="personal",
recommended_modalities=[],
recommend_onchain=_onchain(inp, reasons),
confidence="recommended",
collective_available=True,
reasons=reasons,
onchain_reason=_onchain_reason(inp),
)
# ── R5/R6: medium or large group → collective ────────────────────────────
modalities = _collect_modalities(inp, config)
if count <= config.collective_wot_min:
reasons.append(
f"{count} personnes concernées : vote collectif recommandé."
)
confidence = "recommended"
else:
reasons.append(
f"{count} personnes concernées : vote collectif obligatoire "
"(formule WoT applicable)."
)
confidence = "required"
if "vote_wot" not in modalities:
modalities = ["vote_wot"] + modalities
return QualificationResult(
decision_type=DecisionType.COLLECTIVE,
process="vote_collective",
recommended_modalities=modalities,
recommend_onchain=_onchain(inp, reasons),
confidence=confidence,
collective_available=True,
reasons=reasons,
onchain_reason=_onchain_reason(inp),
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _onchain(inp: QualificationInput, reasons: list[str]) -> bool:
if inp.is_structural:
reasons.append(
"Décision structurante : gravure on-chain recommandée "
"(a force de loi ou déclenche une action machine)."
)
return inp.is_structural
def _onchain_reason(inp: QualificationInput) -> str | None:
if not inp.is_structural:
return None
return (
"Cette décision est structurante : elle a valeur de loi au sein de la "
"communauté ou déclenche une action machine (ex : runtime upgrade). "
"La gravure on-chain (IPFS + system.remark) garantit son immuabilité "
"et sa vérifiabilité publique."
)
def _collect_modalities(
inp: QualificationInput,
config: QualificationConfig,
) -> list[str]:
"""Combine default modalities with any LLM suggestions (stub for now)."""
llm_suggestions = []
if inp.context_description:
llm_suggestions = suggest_modalities_from_context(inp.context_description, config)
seen: set[str] = set()
result: list[str] = []
for m in llm_suggestions + config.default_modalities:
if m not in seen:
seen.add(m)
result.append(m)
return result