Corrections moteur (TDD) : - R2 : within_mandate → record_in_observatory=True (Observatoire des décisions) - R6 : >50 personnes → collective recommandé, pas obligatoire (confidence=recommended) - R3 supprimée : affected_count=1 hors périmètre de l'outil - R9-R12 renommés G1-G4 (garde-fous internes) - 23 tests, 213/213 verts Étape 1 — Router /api/v1/qualify : - POST / → qualify() avec config depuis DB ou defaults - GET /protocol → protocole actif - POST /protocol → créer/remplacer (auth requise) Étape 2 — Modèle QualificationProtocol : - Table qualification_protocols (seuils configurables via admin) - Migration Alembic + seed du protocole par défaut Étape 3 — Wizard frontend decisions/new.vue : - Étape 1 : formulaire de qualification (mandat, affected_count, structurant, contexte) - Étape 2 : résultat (type, raisons, modalités, observatoire, on-chain) - Étape 3 : formulaire de décision (titre, description, protocole si collectif) Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
212 lines
7.5 KiB
Python
212 lines
7.5 KiB
Python
"""Decision qualification engine.
|
|
|
|
Pure functions — no database, no I/O.
|
|
Takes a QualificationInput + QualificationConfig and returns a QualificationResult.
|
|
|
|
LLM integration (suggest_modalities_from_context) is stubbed pending local Qwen deployment.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
|
|
|
|
class DecisionType(str, Enum):
|
|
INDIVIDUAL = "individual"
|
|
COLLECTIVE = "collective"
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Configuration (thresholds — stored as QualificationProtocol in DB)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
@dataclass
|
|
class QualificationConfig:
|
|
"""Configurable thresholds for the qualification engine.
|
|
|
|
Seeded as a QualificationProtocol record so they can be adjusted
|
|
through the admin interface without code changes.
|
|
|
|
small_group_max: affected_count <= this → individual recommended, collective available
|
|
collective_wot_min: affected_count > this → WoT formula applicable (still recommended, not required)
|
|
|
|
affected_count must be >= 2 — decisions affecting only the author
|
|
have no place in this tool.
|
|
"""
|
|
small_group_max: int = 5
|
|
collective_wot_min: int = 50
|
|
|
|
default_modalities: list[str] = field(default_factory=lambda: [
|
|
"vote_wot",
|
|
"vote_smith",
|
|
"consultation_avis",
|
|
"election",
|
|
])
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Input / Output
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
@dataclass
|
|
class QualificationInput:
|
|
within_mandate: bool = False
|
|
affected_count: int | None = None # must be >= 2 when within_mandate=False
|
|
is_structural: bool = False
|
|
context_description: str | None = None # reserved for LLM suggestion
|
|
|
|
|
|
@dataclass
|
|
class QualificationResult:
|
|
decision_type: DecisionType
|
|
process: str
|
|
recommended_modalities: list[str]
|
|
recommend_onchain: bool
|
|
onchain_reason: str | None
|
|
confidence: str # "required" | "recommended" | "optional"
|
|
collective_available: bool
|
|
record_in_observatory: bool # True → decision must be logged in Observatoire
|
|
reasons: list[str]
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# LLM stub
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def suggest_modalities_from_context(
|
|
context: str,
|
|
config: QualificationConfig,
|
|
) -> list[str]:
|
|
"""Suggest voting modalities based on a natural-language context description.
|
|
|
|
Stub — returns empty list until local Qwen (qwen3.6, MacStudio) is integrated.
|
|
When implemented, will call the LLM API and return an ordered subset of
|
|
config.default_modalities ranked by contextual relevance.
|
|
"""
|
|
return []
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Core engine
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def qualify(inp: QualificationInput, config: QualificationConfig) -> QualificationResult:
|
|
"""Qualify a decision and recommend a type, process, and modalities.
|
|
|
|
Rules (in priority order):
|
|
R1/R2 within_mandate → individual + consultation_avis, no vote modalities,
|
|
decision must be recorded in Observatoire des décisions
|
|
R4 2 ≤ affected_count ≤ small_group_max → individual recommended, collective available
|
|
R5 small_group_max < affected_count ≤ collective_wot_min → collective recommended
|
|
R6 affected_count > collective_wot_min → collective recommended (WoT formula applicable)
|
|
R7/R8 is_structural → recommend_onchain with reason
|
|
"""
|
|
reasons: list[str] = []
|
|
|
|
# ── R1/R2: mandate scope overrides everything ───────────────────────────
|
|
if inp.within_mandate:
|
|
reasons.append("Décision dans le périmètre d'un mandat existant.")
|
|
return QualificationResult(
|
|
decision_type=DecisionType.INDIVIDUAL,
|
|
process="consultation_avis",
|
|
recommended_modalities=[],
|
|
recommend_onchain=_onchain(inp, reasons),
|
|
onchain_reason=_onchain_reason(inp),
|
|
confidence="required",
|
|
collective_available=False,
|
|
record_in_observatory=True,
|
|
reasons=reasons,
|
|
)
|
|
|
|
count = inp.affected_count if inp.affected_count is not None else 2
|
|
|
|
# ── R4: small group → individual recommended, collective available ───────
|
|
if count <= config.small_group_max:
|
|
reasons.append(
|
|
f"{count} personnes concernées : décision individuelle recommandée, "
|
|
"vote collectif possible."
|
|
)
|
|
return QualificationResult(
|
|
decision_type=DecisionType.INDIVIDUAL,
|
|
process="personal",
|
|
recommended_modalities=[],
|
|
recommend_onchain=_onchain(inp, reasons),
|
|
onchain_reason=_onchain_reason(inp),
|
|
confidence="recommended",
|
|
collective_available=True,
|
|
record_in_observatory=False,
|
|
reasons=reasons,
|
|
)
|
|
|
|
# ── R5/R6: medium or large group → collective ────────────────────────────
|
|
modalities = _collect_modalities(inp, config)
|
|
|
|
if count <= config.collective_wot_min:
|
|
reasons.append(f"{count} personnes concernées : vote collectif recommandé.")
|
|
confidence = "recommended"
|
|
else:
|
|
reasons.append(
|
|
f"{count} personnes concernées : vote collectif recommandé "
|
|
"(formule WoT applicable à cette échelle)."
|
|
)
|
|
confidence = "recommended"
|
|
if "vote_wot" not in modalities:
|
|
modalities = ["vote_wot"] + modalities
|
|
|
|
return QualificationResult(
|
|
decision_type=DecisionType.COLLECTIVE,
|
|
process="vote_collective",
|
|
recommended_modalities=modalities,
|
|
recommend_onchain=_onchain(inp, reasons),
|
|
onchain_reason=_onchain_reason(inp),
|
|
confidence=confidence,
|
|
collective_available=True,
|
|
record_in_observatory=False,
|
|
reasons=reasons,
|
|
)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
def _onchain(inp: QualificationInput, reasons: list[str]) -> bool:
|
|
if inp.is_structural:
|
|
reasons.append(
|
|
"Décision structurante : gravure on-chain recommandée "
|
|
"(a force de loi ou déclenche une action machine)."
|
|
)
|
|
return inp.is_structural
|
|
|
|
|
|
def _onchain_reason(inp: QualificationInput) -> str | None:
|
|
if not inp.is_structural:
|
|
return None
|
|
return (
|
|
"Cette décision est structurante : elle a valeur de loi au sein de la "
|
|
"communauté ou déclenche une action machine (ex : runtime upgrade). "
|
|
"La gravure on-chain (IPFS + system.remark) garantit son immuabilité "
|
|
"et sa vérifiabilité publique."
|
|
)
|
|
|
|
|
|
def _collect_modalities(inp: QualificationInput, config: QualificationConfig) -> list[str]:
|
|
"""Combine default modalities with any LLM suggestions (stub for now)."""
|
|
llm_suggestions = []
|
|
if inp.context_description:
|
|
llm_suggestions = suggest_modalities_from_context(inp.context_description, config)
|
|
|
|
seen: set[str] = set()
|
|
result: list[str] = []
|
|
for m in llm_suggestions + config.default_modalities:
|
|
if m not in seen:
|
|
seen.add(m)
|
|
result.append(m)
|
|
return result
|