P3: routes (selected, fallback, queried)

Backend
- api/routes.rs models the Babel-style route shape; metric uses an
  untagged enum to round-trip both numeric hop counts and the
  literal "infinite" string the daemon emits for poisoned routes
- routes_snapshot() runs the three GETs concurrently with try_join
  so the snapshot is internally consistent
- poller spawns a second 5s loop emitting routes://updated; both
  loops are owned by the Poller and aborted together on stop_daemon

Frontend
- routes store mirrors the snapshot shape; tabbed view (radix-vue)
  with selected, fallback and queried lists
- RouteTable component shared by selected/fallback; metric column
  is colour-coded (0 green, low neutral, high yellow, infinite red)
- Queried subnets show a live `expires in 12s` countdown driven by
  a 1Hz tick ref instead of mutating the store
This commit is contained in:
syoul
2026-04-25 23:02:32 +02:00
parent c1a81a9065
commit 95e7cb4bd3
9 changed files with 382 additions and 34 deletions

View File

@@ -1,5 +1,6 @@
pub mod admin;
pub mod peers;
pub mod routes;
use crate::error::{AppError, AppResult};
use reqwest::{Client, Response};

View File

@@ -0,0 +1,68 @@
use crate::api::MyceliumClient;
use crate::error::AppResult;
use serde::{Deserialize, Serialize};
/// The daemon serializes the metric as either an unsigned integer for
/// reachable routes, or the literal string "infinite" for poisoned ones.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Metric {
Value(u64),
Infinite(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Route {
pub subnet: String,
pub next_hop: String,
pub metric: Metric,
pub seqno: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct QueriedSubnet {
pub subnet: String,
pub expiration: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct RoutesSnapshot {
pub selected: Vec<Route>,
pub fallback: Vec<Route>,
pub queried: Vec<QueriedSubnet>,
}
impl MyceliumClient {
pub async fn routes_selected(&self) -> AppResult<Vec<Route>> {
let r = self.http().get(self.url("/admin/routes/selected")).send().await?;
Self::parse(r).await
}
pub async fn routes_fallback(&self) -> AppResult<Vec<Route>> {
let r = self.http().get(self.url("/admin/routes/fallback")).send().await?;
Self::parse(r).await
}
pub async fn routes_queried(&self) -> AppResult<Vec<QueriedSubnet>> {
let r = self.http().get(self.url("/admin/routes/queried")).send().await?;
Self::parse(r).await
}
pub async fn routes_snapshot(&self) -> AppResult<RoutesSnapshot> {
// Issue the three calls concurrently so the snapshot reflects a
// near-coincident view of the routing table.
let (sel, fb, q) = tokio::try_join!(
self.routes_selected(),
self.routes_fallback(),
self.routes_queried(),
)?;
Ok(RoutesSnapshot {
selected: sel,
fallback: fb,
queried: q,
})
}
}

View File

@@ -1,5 +1,6 @@
use crate::api::admin::NodeInfo;
use crate::api::peers::{AggregatedStats, PeerInfo};
use crate::api::routes::RoutesSnapshot;
use crate::api::MyceliumClient;
use crate::error::{AppError, AppResult};
use crate::sidecar::SidecarConfig;
@@ -91,3 +92,10 @@ pub async fn peers_stats(state: State<'_, AppState>) -> AppResult<AggregatedStat
let peers = require_client(&state)?.list_peers().await?;
Ok(crate::api::peers::aggregate(&peers))
}
// ─── Routes ──────────────────────────────────────────────────────────────────
#[tauri::command]
pub async fn routes_snapshot(state: State<'_, AppState>) -> AppResult<RoutesSnapshot> {
require_client(&state)?.routes_snapshot().await
}

View File

@@ -39,6 +39,7 @@ pub fn run() {
commands::peer_add,
commands::peer_remove,
commands::peers_stats,
commands::routes_snapshot,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@@ -8,54 +8,82 @@ use tokio::task::JoinHandle;
use tracing::warn;
const PEERS_INTERVAL: Duration = Duration::from_secs(3);
const ROUTES_INTERVAL: Duration = Duration::from_secs(5);
pub struct Poller {
handle: Mutex<Option<JoinHandle<()>>>,
peers_handle: Mutex<Option<JoinHandle<()>>>,
routes_handle: Mutex<Option<JoinHandle<()>>>,
}
impl Poller {
pub fn new() -> Arc<Self> {
Arc::new(Self {
handle: Mutex::new(None),
peers_handle: Mutex::new(None),
routes_handle: Mutex::new(None),
})
}
/// Spawn a background task that pulls /admin/peers every few seconds and
/// fans the result out as `peers://updated` and an aggregated
/// `stats://updated` event. Cancels any previously-running task.
/// Spawn the two background loops. Cancels any previously-running tasks
/// so consecutive `start_daemon` calls don't leak handles.
pub fn start(self: &Arc<Self>, app: AppHandle, sidecar: Arc<SidecarHandle>) {
self.stop();
let h = tokio::spawn(async move {
// First tick is immediate so the UI doesn't wait a full interval
// for the first peer list right after the daemon comes up.
let mut first = true;
loop {
if !first {
tokio::time::sleep(PEERS_INTERVAL).await;
}
first = false;
let Some(client) = sidecar.client() else {
break;
};
match client.list_peers().await {
Ok(list) => {
let stats = peers::aggregate(&list);
let _ = app.emit("peers://updated", &list);
let _ = app.emit("stats://updated", &stats);
}
Err(e) => {
warn!(error = %e, "poller: list_peers failed");
}
}
}
});
*self.handle.lock() = Some(h);
*self.peers_handle.lock() = Some(spawn_peers_loop(app.clone(), Arc::clone(&sidecar)));
*self.routes_handle.lock() = Some(spawn_routes_loop(app, sidecar));
}
pub fn stop(&self) {
if let Some(h) = self.handle.lock().take() {
if let Some(h) = self.peers_handle.lock().take() {
h.abort();
}
if let Some(h) = self.routes_handle.lock().take() {
h.abort();
}
}
}
fn spawn_peers_loop(app: AppHandle, sidecar: Arc<SidecarHandle>) -> JoinHandle<()> {
tokio::spawn(async move {
// Tick once immediately so the UI doesn't wait the full interval.
let mut first = true;
loop {
if !first {
tokio::time::sleep(PEERS_INTERVAL).await;
}
first = false;
let Some(client) = sidecar.client() else {
break;
};
match client.list_peers().await {
Ok(list) => {
let stats = peers::aggregate(&list);
let _ = app.emit("peers://updated", &list);
let _ = app.emit("stats://updated", &stats);
}
Err(e) => warn!(error = %e, "poller: list_peers failed"),
}
}
})
}
fn spawn_routes_loop(app: AppHandle, sidecar: Arc<SidecarHandle>) -> JoinHandle<()> {
tokio::spawn(async move {
let mut first = true;
loop {
if !first {
tokio::time::sleep(ROUTES_INTERVAL).await;
}
first = false;
let Some(client) = sidecar.client() else {
break;
};
match client.routes_snapshot().await {
Ok(snap) => {
let _ = app.emit("routes://updated", &snap);
}
Err(e) => warn!(error = %e, "poller: routes_snapshot failed"),
}
}
})
}