P1: sidecar lifecycle and HTTP bridge

Backend
- sidecar.rs supervises the bundled `mycelium` binary launched via
  pkexec; locates it in resource_dir or CARGO_MANIFEST_DIR/binaries
  matching $TAURI_ENV_TARGET_TRIPLE
- ephemeral port via portpicker, key + config persisted in
  app_data_dir, kill_on_drop with explicit start_kill on stop
- health-check loop calls /api/v1/admin until 2xx (timeout 20s);
  emits sidecar://ready and sidecar://exited
- 500-line ring buffer of stdout/stderr surfaced via sidecar_logs
  command for the upcoming Settings page
- elevation::is_auth_failure(126|127) maps pkexec cancel to a
  dedicated AppError variant
- AppError uses thiserror, Serialize impl renders messages as
  plain strings for the JS side

Frontend
- typed `api` wrapper around invoke() in src/lib/api.ts
- node store (Pinia) bootstraps on mount, listens on
  sidecar://ready and sidecar://exited
- StartupOverlay covers the whole window for idle/starting/error
  phases; sidebar status dot + start/stop button
- Status view renders subnet, pubkey, api endpoint and key path
  with one-click clipboard copy
This commit is contained in:
syoul
2026-04-25 22:45:52 +02:00
parent d79300caf8
commit d737231123
16 changed files with 950 additions and 14 deletions

0
src-tauri/binaries/.keep Normal file
View File

View File

@@ -0,0 +1,26 @@
use crate::api::MyceliumClient;
use crate::error::AppResult;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NodeInfo {
pub node_subnet: String,
pub node_pubkey: String,
}
impl MyceliumClient {
pub async fn node_info(&self) -> AppResult<NodeInfo> {
let resp = self.http().get(self.url("/admin")).send().await?;
Self::parse(resp).await
}
/// Cheap liveness probe used by the supervisor's health-check loop.
/// Returns `true` if `/admin` answered 2xx.
pub async fn is_alive(&self) -> bool {
match self.http().get(self.url("/admin")).send().await {
Ok(r) => r.status().is_success(),
Err(_) => false,
}
}
}

68
src-tauri/src/api/mod.rs Normal file
View File

@@ -0,0 +1,68 @@
pub mod admin;
use crate::error::{AppError, AppResult};
use reqwest::{Client, Response};
use serde::de::DeserializeOwned;
use std::time::Duration;
/// Thin REST client for the mycelium daemon's HTTP API.
///
/// The base URL is set after the sidecar reports ready; clients are cheap
/// to clone (the inner `reqwest::Client` keeps a shared connection pool).
#[derive(Debug, Clone)]
pub struct MyceliumClient {
base: String,
http: Client,
}
impl MyceliumClient {
pub fn new(base: impl Into<String>) -> Self {
let http = Client::builder()
.timeout(Duration::from_secs(10))
.build()
.expect("reqwest client build");
Self {
base: base.into(),
http,
}
}
pub fn base_url(&self) -> &str {
&self.base
}
pub(crate) fn url(&self, path: &str) -> String {
format!("{}/api/v1{}", self.base, path)
}
pub(crate) async fn parse<T: DeserializeOwned>(resp: Response) -> AppResult<T> {
let status = resp.status();
if status.is_success() {
resp.json::<T>().await.map_err(AppError::from)
} else {
let body = resp.text().await.unwrap_or_default();
Err(AppError::DaemonStatus {
status: status.as_u16(),
body,
})
}
}
#[allow(dead_code)] // wired in P2 (peers add/remove)
pub(crate) async fn check_status(resp: Response) -> AppResult<()> {
let status = resp.status();
if status.is_success() {
Ok(())
} else {
let body = resp.text().await.unwrap_or_default();
Err(AppError::DaemonStatus {
status: status.as_u16(),
body,
})
}
}
pub(crate) fn http(&self) -> &Client {
&self.http
}
}

53
src-tauri/src/commands.rs Normal file
View File

@@ -0,0 +1,53 @@
use crate::api::admin::NodeInfo;
use crate::error::{AppError, AppResult};
use crate::sidecar::SidecarConfig;
use crate::state::AppState;
use serde::Serialize;
use tauri::{AppHandle, State};
#[derive(Debug, Serialize)]
pub struct DaemonStatus {
pub running: bool,
pub api_url: Option<String>,
pub key_path: Option<String>,
pub config_path: Option<String>,
}
#[tauri::command]
pub fn daemon_status(state: State<'_, AppState>) -> DaemonStatus {
let sc = &state.sidecar;
DaemonStatus {
running: sc.is_running(),
api_url: sc.client().map(|c| c.base_url().to_string()),
key_path: sc.key_path().map(|p| p.display().to_string()),
config_path: sc.config_path().map(|p| p.display().to_string()),
}
}
#[tauri::command]
pub async fn start_daemon(
app: AppHandle,
state: State<'_, AppState>,
config: Option<SidecarConfig>,
) -> AppResult<DaemonStatus> {
let cfg = config.unwrap_or_default();
state.sidecar.start(&app, &cfg).await?;
Ok(daemon_status(state))
}
#[tauri::command]
pub async fn stop_daemon(state: State<'_, AppState>) -> AppResult<DaemonStatus> {
state.sidecar.stop().await;
Ok(daemon_status(state))
}
#[tauri::command]
pub async fn node_info(state: State<'_, AppState>) -> AppResult<NodeInfo> {
let client = state.sidecar.client().ok_or(AppError::DaemonNotRunning)?;
client.node_info().await
}
#[tauri::command]
pub fn sidecar_logs(state: State<'_, AppState>) -> Vec<String> {
state.sidecar.logs_snapshot()
}

View File

@@ -0,0 +1,26 @@
use std::path::Path;
use tokio::process::Command;
/// Build a `tokio::process::Command` that runs `target` with elevated
/// privileges via `pkexec`. The caller is responsible for setting stdio,
/// kill_on_drop, etc.
pub fn elevated(target: &Path, args: &[String]) -> Command {
let mut cmd = Command::new("pkexec");
cmd.arg(target);
for a in args {
cmd.arg(a);
}
cmd
}
/// pkexec exit codes worth surfacing distinctly.
///
/// 126 — authorization could not be obtained (user cancelled the dialog,
/// no agent available, or polkit policy denied).
/// 127 — command was not found / not authorized.
///
/// Anything else is forwarded as-is and the supervisor will translate it
/// into `SidecarExited`.
pub fn is_auth_failure(code: i32) -> bool {
matches!(code, 126 | 127)
}

54
src-tauri/src/error.rs Normal file
View File

@@ -0,0 +1,54 @@
use serde::Serialize;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum AppError {
#[error("daemon not running")]
DaemonNotRunning,
#[error("daemon already running")]
DaemonAlreadyRunning,
#[error("daemon health-check timed out after {0}s")]
HealthCheckTimeout(u64),
#[error("could not locate mycelium sidecar binary (looked for {0:?})")]
SidecarNotFound(Vec<std::path::PathBuf>),
#[error("sidecar exited unexpectedly: {0}")]
SidecarExited(String),
#[error("pkexec authentication was cancelled or failed")]
ElevationCancelled,
#[error("io error: {0}")]
Io(#[from] std::io::Error),
#[error("http error: {0}")]
Http(#[from] reqwest::Error),
#[error("daemon returned status {status}: {body}")]
DaemonStatus { status: u16, body: String },
#[error("tauri error: {0}")]
Tauri(#[from] tauri::Error),
#[error("tauri path error: {0}")]
TauriPath(String),
#[error("invalid argument: {0}")]
BadInput(String),
#[error("{0}")]
Other(String),
}
pub type AppResult<T> = Result<T, AppError>;
// Serialize errors as plain strings for the JS side. invoke() rejects with
// the message; the front-end matches on substring or surfaces it raw.
impl Serialize for AppError {
fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
s.serialize_str(&self.to_string())
}
}

View File

@@ -1,3 +1,12 @@
pub mod api;
pub mod commands;
pub mod elevation;
pub mod error;
pub mod sidecar;
pub mod state;
use state::AppState;
use tauri::Manager;
use tracing_subscriber::EnvFilter;
pub fn run() {
@@ -15,7 +24,17 @@ pub fn run() {
.plugin(tauri_plugin_store::Builder::new().build())
.plugin(tauri_plugin_shell::init())
.plugin(tauri_plugin_dialog::init())
.invoke_handler(tauri::generate_handler![])
.setup(|app| {
app.manage(AppState::new());
Ok(())
})
.invoke_handler(tauri::generate_handler![
commands::daemon_status,
commands::start_daemon,
commands::stop_daemon,
commands::node_info,
commands::sidecar_logs,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

286
src-tauri/src/sidecar.rs Normal file
View File

@@ -0,0 +1,286 @@
use crate::api::MyceliumClient;
use crate::elevation;
use crate::error::{AppError, AppResult};
use parking_lot::Mutex;
use std::collections::VecDeque;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tauri::{AppHandle, Emitter, Manager};
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Child;
use tracing::{info, warn};
const HEALTH_CHECK_TIMEOUT_SECS: u64 = 20;
const HEALTH_CHECK_INTERVAL_MS: u64 = 400;
const LOG_RING_CAPACITY: usize = 500;
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SidecarConfig {
pub peers: Vec<String>,
pub tun_name: Option<String>,
pub no_tun: bool,
}
impl Default for SidecarConfig {
fn default() -> Self {
Self {
// A small set of well-known public peers from the mycelium README,
// used as bootstrap when the user hasn't configured their own.
peers: vec![
"tcp://188.40.132.242:9651".into(),
"quic://[2a01:4f8:212:fa6::2]:9651".into(),
],
tun_name: None,
no_tun: false,
}
}
}
/// Holds the running mycelium child process plus a small in-memory log
/// buffer so the Settings page can show recent stderr/stdout without
/// reading from disk.
pub struct SidecarHandle {
child: Mutex<Option<Child>>,
api_url: Mutex<Option<String>>,
logs: Mutex<VecDeque<String>>,
config_path: Mutex<Option<PathBuf>>,
key_path: Mutex<Option<PathBuf>>,
}
impl SidecarHandle {
pub fn new() -> Arc<Self> {
Arc::new(Self {
child: Mutex::new(None),
api_url: Mutex::new(None),
logs: Mutex::new(VecDeque::with_capacity(LOG_RING_CAPACITY)),
config_path: Mutex::new(None),
key_path: Mutex::new(None),
})
}
pub fn is_running(&self) -> bool {
self.api_url.lock().is_some()
}
pub fn client(&self) -> Option<MyceliumClient> {
self.api_url
.lock()
.as_ref()
.map(|u| MyceliumClient::new(u.clone()))
}
pub fn logs_snapshot(&self) -> Vec<String> {
self.logs.lock().iter().cloned().collect()
}
pub fn key_path(&self) -> Option<PathBuf> {
self.key_path.lock().clone()
}
pub fn config_path(&self) -> Option<PathBuf> {
self.config_path.lock().clone()
}
fn push_log(&self, line: String) {
let mut buf = self.logs.lock();
if buf.len() >= LOG_RING_CAPACITY {
buf.pop_front();
}
buf.push_back(line);
}
pub async fn start(
self: &Arc<Self>,
app: &AppHandle,
config: &SidecarConfig,
) -> AppResult<String> {
if self.is_running() {
return Err(AppError::DaemonAlreadyRunning);
}
let bin = locate_sidecar(app)?;
let port = portpicker::pick_unused_port()
.ok_or_else(|| AppError::Other("no free port available".into()))?;
let data_dir = app
.path()
.app_data_dir()
.map_err(|e| AppError::TauriPath(e.to_string()))?;
std::fs::create_dir_all(&data_dir)?;
let key_path = data_dir.join("priv_key.bin");
let config_path = data_dir.join("mycelium.toml");
let mut args = vec![
"--api-addr".to_string(),
format!("127.0.0.1:{port}"),
"--key-file".to_string(),
key_path.display().to_string(),
];
if config_path.exists() {
args.push("--config-file".to_string());
args.push(config_path.display().to_string());
}
if config.no_tun {
args.push("--no-tun".to_string());
}
if let Some(name) = &config.tun_name {
args.push("--tun-name".to_string());
args.push(name.clone());
}
if !config.peers.is_empty() {
args.push("--peers".to_string());
for p in &config.peers {
args.push(p.clone());
}
}
info!(?bin, port, "spawning mycelium sidecar via pkexec");
let mut cmd = elevation::elevated(&bin, &args);
cmd.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true);
let mut child = cmd.spawn()?;
let stdout = child.stdout.take();
let stderr = child.stderr.take();
// Stash before we await the health check, so a slow daemon
// doesn't leave us with a zombie process if anything panics.
let api_url = format!("http://127.0.0.1:{port}");
*self.child.lock() = Some(child);
*self.api_url.lock() = Some(api_url.clone());
*self.config_path.lock() = Some(config_path);
*self.key_path.lock() = Some(key_path);
if let Some(out) = stdout {
let me = Arc::clone(self);
tokio::spawn(async move {
let mut lines = BufReader::new(out).lines();
while let Ok(Some(line)) = lines.next_line().await {
me.push_log(format!("[stdout] {line}"));
}
});
}
if let Some(err) = stderr {
let me = Arc::clone(self);
tokio::spawn(async move {
let mut lines = BufReader::new(err).lines();
while let Ok(Some(line)) = lines.next_line().await {
me.push_log(format!("[stderr] {line}"));
}
});
}
// Background watcher: polls every 2s and emits `sidecar://exited`
// when the child dies after the start sequence has succeeded.
{
let me = Arc::clone(self);
let app = app.clone();
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(2)).await;
if !me.is_running() {
break;
}
if let Some(code) = me.child_exit_status() {
me.handle_exit(&app, code);
break;
}
}
});
}
// Health-check loop. The pkexec dialog can take several seconds, so
// give the daemon a generous window to come up before failing.
let client = MyceliumClient::new(&api_url);
let started = Instant::now();
loop {
// Bail early if the child died (auth cancel, missing TUN cap, etc.).
if let Some(code) = self.child_exit_status() {
self.cleanup();
if elevation::is_auth_failure(code) {
return Err(AppError::ElevationCancelled);
}
return Err(AppError::SidecarExited(format!("exit code {code}")));
}
if client.is_alive().await {
info!(api_url = %api_url, "mycelium sidecar healthy");
let _ = app.emit("sidecar://ready", &api_url);
return Ok(api_url);
}
if started.elapsed() > Duration::from_secs(HEALTH_CHECK_TIMEOUT_SECS) {
self.stop().await;
return Err(AppError::HealthCheckTimeout(HEALTH_CHECK_TIMEOUT_SECS));
}
tokio::time::sleep(Duration::from_millis(HEALTH_CHECK_INTERVAL_MS)).await;
}
}
fn child_exit_status(&self) -> Option<i32> {
let mut lock = self.child.lock();
let child = lock.as_mut()?;
match child.try_wait() {
Ok(Some(s)) => Some(s.code().unwrap_or(-1)),
_ => None,
}
}
fn handle_exit(&self, app: &AppHandle, code: i32) {
warn!(code, "mycelium sidecar exited");
self.cleanup();
let _ = app.emit("sidecar://exited", code);
}
fn cleanup(&self) {
*self.api_url.lock() = None;
let _ = self.child.lock().take();
}
pub async fn stop(&self) {
// Take the child by value so the parking_lot guard isn't held across
// the await on `wait()`. `kill_on_drop(true)` is set, but pkexec runs
// mycelium as root, so we still ask politely first; the polkit agent
// reaps the elevated child.
let child_opt = self.child.lock().take();
if let Some(mut child) = child_opt {
let _ = child.start_kill();
let _ = child.wait().await;
}
self.cleanup();
}
}
/// Resolve the bundled `mycelium-<triple>` binary in both `tauri dev`
/// (cargo manifest) and bundled (resource_dir) modes.
fn locate_sidecar(app: &AppHandle) -> AppResult<PathBuf> {
let triple = std::env::var("TAURI_ENV_TARGET_TRIPLE")
.ok()
.or_else(|| option_env!("TARGET").map(|s| s.to_string()))
.unwrap_or_else(|| "x86_64-unknown-linux-gnu".to_string());
let name = format!("mycelium-{triple}");
let mut tried: Vec<PathBuf> = Vec::new();
if let Ok(resource) = app.path().resource_dir() {
let p = resource.join(&name);
if p.exists() {
return Ok(p);
}
tried.push(p);
}
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let dev_path = manifest_dir.join("binaries").join(&name);
if dev_path.exists() {
return Ok(dev_path);
}
tried.push(dev_path);
Err(AppError::SidecarNotFound(tried))
}

20
src-tauri/src/state.rs Normal file
View File

@@ -0,0 +1,20 @@
use crate::sidecar::SidecarHandle;
use std::sync::Arc;
pub struct AppState {
pub sidecar: Arc<SidecarHandle>,
}
impl AppState {
pub fn new() -> Self {
Self {
sidecar: SidecarHandle::new(),
}
}
}
impl Default for AppState {
fn default() -> Self {
Self::new()
}
}