Backend - api/routes.rs models the Babel-style route shape; metric uses an untagged enum to round-trip both numeric hop counts and the literal "infinite" string the daemon emits for poisoned routes - routes_snapshot() runs the three GETs concurrently with try_join so the snapshot is internally consistent - poller spawns a second 5s loop emitting routes://updated; both loops are owned by the Poller and aborted together on stop_daemon Frontend - routes store mirrors the snapshot shape; tabbed view (radix-vue) with selected, fallback and queried lists - RouteTable component shared by selected/fallback; metric column is colour-coded (0 green, low neutral, high yellow, infinite red) - Queried subnets show a live `expires in 12s` countdown driven by a 1Hz tick ref instead of mutating the store
90 lines
2.7 KiB
Rust
90 lines
2.7 KiB
Rust
use crate::api::peers;
|
|
use crate::sidecar::SidecarHandle;
|
|
use parking_lot::Mutex;
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
use tauri::{AppHandle, Emitter};
|
|
use tokio::task::JoinHandle;
|
|
use tracing::warn;
|
|
|
|
const PEERS_INTERVAL: Duration = Duration::from_secs(3);
|
|
const ROUTES_INTERVAL: Duration = Duration::from_secs(5);
|
|
|
|
pub struct Poller {
|
|
peers_handle: Mutex<Option<JoinHandle<()>>>,
|
|
routes_handle: Mutex<Option<JoinHandle<()>>>,
|
|
}
|
|
|
|
impl Poller {
|
|
pub fn new() -> Arc<Self> {
|
|
Arc::new(Self {
|
|
peers_handle: Mutex::new(None),
|
|
routes_handle: Mutex::new(None),
|
|
})
|
|
}
|
|
|
|
/// Spawn the two background loops. Cancels any previously-running tasks
|
|
/// so consecutive `start_daemon` calls don't leak handles.
|
|
pub fn start(self: &Arc<Self>, app: AppHandle, sidecar: Arc<SidecarHandle>) {
|
|
self.stop();
|
|
*self.peers_handle.lock() = Some(spawn_peers_loop(app.clone(), Arc::clone(&sidecar)));
|
|
*self.routes_handle.lock() = Some(spawn_routes_loop(app, sidecar));
|
|
}
|
|
|
|
pub fn stop(&self) {
|
|
if let Some(h) = self.peers_handle.lock().take() {
|
|
h.abort();
|
|
}
|
|
if let Some(h) = self.routes_handle.lock().take() {
|
|
h.abort();
|
|
}
|
|
}
|
|
}
|
|
|
|
fn spawn_peers_loop(app: AppHandle, sidecar: Arc<SidecarHandle>) -> JoinHandle<()> {
|
|
tokio::spawn(async move {
|
|
// Tick once immediately so the UI doesn't wait the full interval.
|
|
let mut first = true;
|
|
loop {
|
|
if !first {
|
|
tokio::time::sleep(PEERS_INTERVAL).await;
|
|
}
|
|
first = false;
|
|
|
|
let Some(client) = sidecar.client() else {
|
|
break;
|
|
};
|
|
match client.list_peers().await {
|
|
Ok(list) => {
|
|
let stats = peers::aggregate(&list);
|
|
let _ = app.emit("peers://updated", &list);
|
|
let _ = app.emit("stats://updated", &stats);
|
|
}
|
|
Err(e) => warn!(error = %e, "poller: list_peers failed"),
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
fn spawn_routes_loop(app: AppHandle, sidecar: Arc<SidecarHandle>) -> JoinHandle<()> {
|
|
tokio::spawn(async move {
|
|
let mut first = true;
|
|
loop {
|
|
if !first {
|
|
tokio::time::sleep(ROUTES_INTERVAL).await;
|
|
}
|
|
first = false;
|
|
|
|
let Some(client) = sidecar.client() else {
|
|
break;
|
|
};
|
|
match client.routes_snapshot().await {
|
|
Ok(snap) => {
|
|
let _ = app.emit("routes://updated", &snap);
|
|
}
|
|
Err(e) => warn!(error = %e, "poller: routes_snapshot failed"),
|
|
}
|
|
}
|
|
})
|
|
}
|