Sprint 5 : integration et production -- securite, performance, API publique, documentation

Backend: rate limiter, security headers, blockchain cache service avec RPC,
public API (7 endpoints read-only), WebSocket auth + heartbeat, DB connection
pooling, structured logging, health check DB. Frontend: API retry/timeout,
WebSocket auth + heartbeat + typed events, notifications toast, mobile hamburger
+ drawer, error boundary, offline banner, loading skeletons, dashboard enrichi.
Documentation: guides utilisateur complets (demarrage, vote, sanctuaire, FAQ 30+),
guide deploiement, politique securite. 123 tests, 155 fichiers.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Yvv
2026-02-28 15:12:50 +01:00
parent 3cb1754592
commit 403b94fa2c
31 changed files with 4472 additions and 356 deletions

View File

@@ -3,85 +3,302 @@
Provides functions to query WoT size, Smith sub-WoT size, and
Technical Committee size from the Duniter V2 blockchain.
Currently stubbed with hardcoded values matching GDev test data.
Architecture:
1. Check database cache (via cache_service)
2. Try JSON-RPC call to Duniter node
3. Fall back to hardcoded GDev test values (with warning log)
All public functions accept a db session for cache access.
"""
from __future__ import annotations
import logging
async def get_wot_size() -> int:
import httpx
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import settings
from app.services import cache_service
logger = logging.getLogger(__name__)
# Hardcoded fallback values from GDev snapshot
_FALLBACK_WOT_SIZE = 7224
_FALLBACK_SMITH_SIZE = 20
_FALLBACK_TECHCOMM_SIZE = 5
# Cache key prefixes
_CACHE_KEY_WOT = "blockchain:wot_size"
_CACHE_KEY_SMITH = "blockchain:smith_size"
_CACHE_KEY_TECHCOMM = "blockchain:techcomm_size"
async def _fetch_from_rpc(method: str, params: list | None = None) -> dict | None:
"""Send a JSON-RPC POST request to the Duniter node.
Uses the HTTP variant of the RPC URL. If DUNITER_RPC_URL starts with
``wss://`` or ``ws://``, it is converted to ``https://`` or ``http://``
for the HTTP JSON-RPC endpoint.
Parameters
----------
method:
The RPC method name (e.g. ``"state_getStorage"``).
params:
Optional list of parameters for the RPC call.
Returns
-------
dict | None
The ``"result"`` field from the JSON-RPC response, or None on error.
"""
# Convert WebSocket URL to HTTP for JSON-RPC
rpc_url = settings.DUNITER_RPC_URL
if rpc_url.startswith("wss://"):
rpc_url = rpc_url.replace("wss://", "https://", 1)
elif rpc_url.startswith("ws://"):
rpc_url = rpc_url.replace("ws://", "http://", 1)
# Strip /ws suffix if present
if rpc_url.endswith("/ws"):
rpc_url = rpc_url[:-3]
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": method,
"params": params or [],
}
try:
async with httpx.AsyncClient(
timeout=settings.DUNITER_RPC_TIMEOUT_SECONDS
) as client:
response = await client.post(rpc_url, json=payload)
response.raise_for_status()
data = response.json()
if "error" in data:
logger.warning(
"Erreur RPC Duniter pour %s: %s",
method, data["error"],
)
return None
return data.get("result")
except httpx.ConnectError:
logger.warning(
"Impossible de se connecter au noeud Duniter (%s)", rpc_url
)
return None
except httpx.TimeoutException:
logger.warning(
"Timeout lors de l'appel RPC Duniter pour %s (%s)",
method, rpc_url,
)
return None
except httpx.HTTPStatusError as exc:
logger.warning(
"Erreur HTTP Duniter RPC pour %s: %s",
method, exc.response.status_code,
)
return None
except Exception:
logger.warning(
"Erreur inattendue lors de l'appel RPC Duniter pour %s",
method,
exc_info=True,
)
return None
async def _fetch_membership_count(db: AsyncSession) -> int | None:
"""Fetch WoT membership count from the Duniter RPC.
Queries ``membership_membershipsCount`` via state RPC.
Returns
-------
int | None
The membership count, or None if the RPC call failed.
"""
# Try runtime API call for membership count
result = await _fetch_from_rpc("membership_membershipsCount", [])
if result is not None:
try:
count = int(result)
# Cache the result
await cache_service.set_cached(
_CACHE_KEY_WOT,
{"value": count},
db,
ttl_seconds=settings.BLOCKCHAIN_CACHE_TTL_SECONDS,
)
return count
except (ValueError, TypeError):
logger.warning("Reponse RPC invalide pour membership count: %s", result)
return None
async def _fetch_smith_count(db: AsyncSession) -> int | None:
"""Fetch Smith membership count from the Duniter RPC.
Returns
-------
int | None
The Smith member count, or None if the RPC call failed.
"""
result = await _fetch_from_rpc("smithMembers_smithMembersCount", [])
if result is not None:
try:
count = int(result)
await cache_service.set_cached(
_CACHE_KEY_SMITH,
{"value": count},
db,
ttl_seconds=settings.BLOCKCHAIN_CACHE_TTL_SECONDS,
)
return count
except (ValueError, TypeError):
logger.warning("Reponse RPC invalide pour smith count: %s", result)
return None
async def _fetch_techcomm_count(db: AsyncSession) -> int | None:
"""Fetch Technical Committee member count from the Duniter RPC.
Returns
-------
int | None
The TechComm member count, or None if the RPC call failed.
"""
result = await _fetch_from_rpc("technicalCommittee_members", [])
if result is not None:
try:
if isinstance(result, list):
count = len(result)
else:
count = int(result)
await cache_service.set_cached(
_CACHE_KEY_TECHCOMM,
{"value": count},
db,
ttl_seconds=settings.BLOCKCHAIN_CACHE_TTL_SECONDS,
)
return count
except (ValueError, TypeError):
logger.warning("Reponse RPC invalide pour techcomm count: %s", result)
return None
async def get_wot_size(db: AsyncSession) -> int:
"""Return the current number of WoT members.
TODO: Implement real RPC call using substrate-interface::
Resolution order:
1. Database cache (if not expired)
2. Duniter RPC call
3. Hardcoded fallback (7224, GDev snapshot)
from substrateinterface import SubstrateInterface
from app.config import settings
substrate = SubstrateInterface(url=settings.DUNITER_RPC_URL)
# Query membership count
result = substrate.query(
module="Membership",
storage_function="MembershipCount",
)
return int(result.value)
Parameters
----------
db:
Async database session (for cache access).
Returns
-------
int
Number of WoT members. Currently returns 7224 (GDev snapshot).
Number of WoT members.
"""
# TODO: Replace with real substrate-interface RPC call
return 7224
# 1. Try cache
cached = await cache_service.get_cached(_CACHE_KEY_WOT, db)
if cached is not None:
return cached["value"]
# 2. Try RPC
rpc_value = await _fetch_membership_count(db)
if rpc_value is not None:
return rpc_value
# 3. Fallback
logger.warning(
"Utilisation de la valeur WoT par defaut (%d) - "
"cache et RPC indisponibles",
_FALLBACK_WOT_SIZE,
)
return _FALLBACK_WOT_SIZE
async def get_smith_size() -> int:
async def get_smith_size(db: AsyncSession) -> int:
"""Return the current number of Smith members (forgerons).
TODO: Implement real RPC call using substrate-interface::
Resolution order:
1. Database cache (if not expired)
2. Duniter RPC call
3. Hardcoded fallback (20, GDev snapshot)
from substrateinterface import SubstrateInterface
from app.config import settings
substrate = SubstrateInterface(url=settings.DUNITER_RPC_URL)
# Query Smith membership count
result = substrate.query(
module="SmithMembers",
storage_function="SmithMembershipCount",
)
return int(result.value)
Parameters
----------
db:
Async database session (for cache access).
Returns
-------
int
Number of Smith members. Currently returns 20 (GDev snapshot).
Number of Smith members.
"""
# TODO: Replace with real substrate-interface RPC call
return 20
# 1. Try cache
cached = await cache_service.get_cached(_CACHE_KEY_SMITH, db)
if cached is not None:
return cached["value"]
# 2. Try RPC
rpc_value = await _fetch_smith_count(db)
if rpc_value is not None:
return rpc_value
# 3. Fallback
logger.warning(
"Utilisation de la valeur Smith par defaut (%d) - "
"cache et RPC indisponibles",
_FALLBACK_SMITH_SIZE,
)
return _FALLBACK_SMITH_SIZE
async def get_techcomm_size() -> int:
async def get_techcomm_size(db: AsyncSession) -> int:
"""Return the current number of Technical Committee members.
TODO: Implement real RPC call using substrate-interface::
Resolution order:
1. Database cache (if not expired)
2. Duniter RPC call
3. Hardcoded fallback (5, GDev snapshot)
from substrateinterface import SubstrateInterface
from app.config import settings
substrate = SubstrateInterface(url=settings.DUNITER_RPC_URL)
# Query TechComm member count
result = substrate.query(
module="TechnicalCommittee",
storage_function="Members",
)
return len(result.value) if result.value else 0
Parameters
----------
db:
Async database session (for cache access).
Returns
-------
int
Number of TechComm members. Currently returns 5 (GDev snapshot).
Number of TechComm members.
"""
# TODO: Replace with real substrate-interface RPC call
return 5
# 1. Try cache
cached = await cache_service.get_cached(_CACHE_KEY_TECHCOMM, db)
if cached is not None:
return cached["value"]
# 2. Try RPC
rpc_value = await _fetch_techcomm_count(db)
if rpc_value is not None:
return rpc_value
# 3. Fallback
logger.warning(
"Utilisation de la valeur TechComm par defaut (%d) - "
"cache et RPC indisponibles",
_FALLBACK_TECHCOMM_SIZE,
)
return _FALLBACK_TECHCOMM_SIZE

View File

@@ -0,0 +1,140 @@
"""Cache service: blockchain data caching with TTL expiry.
Uses the BlockchainCache model (PostgreSQL/JSONB) to cache
on-chain data like WoT size, Smith size, and TechComm size.
Avoids repeated RPC calls to the Duniter node.
"""
from __future__ import annotations
import logging
from datetime import datetime, timedelta, timezone
from sqlalchemy import delete, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.cache import BlockchainCache
logger = logging.getLogger(__name__)
async def get_cached(key: str, db: AsyncSession) -> dict | None:
"""Retrieve a cached value by key if it has not expired.
Parameters
----------
key:
The cache key to look up.
db:
Async database session.
Returns
-------
dict | None
The cached value as a dict, or None if missing/expired.
"""
result = await db.execute(
select(BlockchainCache).where(
BlockchainCache.cache_key == key,
BlockchainCache.expires_at > datetime.now(timezone.utc),
)
)
entry = result.scalar_one_or_none()
if entry is None:
logger.debug("Cache miss pour la cle '%s'", key)
return None
logger.debug("Cache hit pour la cle '%s'", key)
return entry.cache_value
async def set_cached(
key: str,
value: dict,
db: AsyncSession,
ttl_seconds: int = 3600,
) -> None:
"""Store a value in the cache with the given TTL.
If the key already exists, it is replaced (upsert).
Parameters
----------
key:
The cache key.
value:
The value to store (must be JSON-serializable).
db:
Async database session.
ttl_seconds:
Time-to-live in seconds (default: 1 hour).
"""
now = datetime.now(timezone.utc)
expires_at = now + timedelta(seconds=ttl_seconds)
# Check if key already exists
result = await db.execute(
select(BlockchainCache).where(BlockchainCache.cache_key == key)
)
existing = result.scalar_one_or_none()
if existing is not None:
existing.cache_value = value
existing.fetched_at = now
existing.expires_at = expires_at
logger.debug("Cache mis a jour pour la cle '%s' (TTL=%ds)", key, ttl_seconds)
else:
entry = BlockchainCache(
cache_key=key,
cache_value=value,
fetched_at=now,
expires_at=expires_at,
)
db.add(entry)
logger.debug("Cache cree pour la cle '%s' (TTL=%ds)", key, ttl_seconds)
await db.commit()
async def invalidate(key: str, db: AsyncSession) -> None:
"""Remove a specific cache entry by key.
Parameters
----------
key:
The cache key to invalidate.
db:
Async database session.
"""
await db.execute(
delete(BlockchainCache).where(BlockchainCache.cache_key == key)
)
await db.commit()
logger.debug("Cache invalide pour la cle '%s'", key)
async def cleanup_expired(db: AsyncSession) -> int:
"""Remove all expired cache entries.
Parameters
----------
db:
Async database session.
Returns
-------
int
Number of entries removed.
"""
result = await db.execute(
delete(BlockchainCache).where(
BlockchainCache.expires_at <= datetime.now(timezone.utc)
)
)
await db.commit()
count = result.rowcount
if count > 0:
logger.info("Nettoyage cache: %d entrees expirees supprimees", count)
return count