Compare commits

...

14 Commits

Author SHA1 Message Date
Lee Smet
688c42493e Add info on how to run demo script
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-09 11:10:46 +02:00
Lee Smet
a75fb9c55d Format code
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 14:25:35 +02:00
Lee Smet
5ed9739d68 Properly update DAG view with started/finished jobs
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 14:12:40 +02:00
Lee Smet
3cd1a55768 Fix job status transitions
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 13:37:42 +02:00
Lee Smet
c860553acd Stop polling when a job reached terminal status
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 12:07:26 +02:00
Lee Smet
78a776877a Fetch the result of a job more than once if needed
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:54:15 +02:00
Lee Smet
8cea17f4ec Increase supervisor hub popmessage timeout
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:43:44 +02:00
Lee Smet
66c89d2485 Format codebase
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:37:51 +02:00
Lee Smet
512c99db54 Improve jsonrpc client to properly route replies
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:37:22 +02:00
Lee Smet
25f35ea8fc Check job status in redis db as well before sending rpc call
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 19:58:52 +02:00
Lee Smet
fb34b4e2f3 Use single cached supervisorclient
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 17:09:57 +02:00
Lee Smet
2c88114d45 Remove notion of sync calls
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 13:23:48 +02:00
Lee Smet
8de2597f19 Fix loading message status from mycelium
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 12:22:54 +02:00
Lee Smet
3220f52956 Add display impl for TransportStatus
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 12:22:26 +02:00
11 changed files with 1148 additions and 581 deletions

View File

@@ -1,2 +1,28 @@
# herocoordinator
## Demo setup
A python script is provided in the [scripts directory](./scripts/supervisor_flow_demo.py). This script
generates some demo jobs to be run by [a supervisor](https://git.ourworld.tf/herocode/supervisor).
Communication happens over [mycelium](https://github.com/threefoldtech/mycelium). To run the demo a
supervisor must be running, which uses a mycelium instance to read and write messages. A __different__
mycelium instance needs to run for the coordinator (the supervisor can run on a different host than
the coordinator, so long as the 2 mycelium instances used can reach eachother).
An example of a local setup:
```bash
# Run a redis docker
docker run -it -d -p 6379:6379 --name redis redis
# Spawn mycelium node 1 with default settings. This also creates a TUN interface though that is not
# necessary for the messages
mycelium
# Spawn mycelium node 2, connect to the first node
mycelium --key-file key.bin --peers tcp://127.0.0.1:9651 --disable-quic --disable-peer-discovery --api-addr 127.0.0.1:9989 --jsonrpc-addr 127.0.0.1:9990 --no-tun -t 8651
# Start the supervisor
supervisor --admin-secret admin123 --user-secret user123 --register-secret register123 --mycelium-url http://127.0.0.1:9990 --topic supervisor.rpc
# Start the coordinator
cargo run # (alternatively if a compiled binary is present that can be run)
# Finally, invoke the demo script
python3 scripts/supervisor_flow_demo.py
```

View File

@@ -1,7 +1,9 @@
pub mod mycelium_client;
pub mod supervisor_client;
pub mod supervisor_hub;
pub mod types;
pub use mycelium_client::{MyceliumClient, MyceliumClientError};
pub use supervisor_client::{SupervisorClient, SupervisorClientError};
pub use supervisor_hub::SupervisorHub;
pub use types::Destination;

View File

@@ -86,13 +86,13 @@ impl MyceliumClient {
&self,
id_hex: &str,
) -> Result<TransportStatus, MyceliumClientError> {
let params = json!({ "id": id_hex });
let body = self.jsonrpc("messageStatus", params).await?;
let params = json!(id_hex);
let body = self.jsonrpc("getMessageInfo", params).await?;
let result = body.get("result").ok_or_else(|| {
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
})?;
// Accept both { status: "..."} and bare "..."
let status_str = if let Some(s) = result.get("status").and_then(|v| v.as_str()) {
// Accept both { state: "..."} and bare "..."
let status_str = if let Some(s) = result.get("state").and_then(|v| v.as_str()) {
s.to_string()
} else if let Some(s) = result.as_str() {
s.to_string()
@@ -101,18 +101,19 @@ impl MyceliumClient {
"unexpected result shape: {result}"
)));
};
Self::map_status(&status_str).ok_or_else(|| {
let status = Self::map_status(&status_str).ok_or_else(|| {
MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}"))
})
});
tracing::info!(%id_hex, status = %status.as_ref().unwrap(), "queried messages status");
status
}
fn map_status(s: &str) -> Option<TransportStatus> {
match s {
"queued" => Some(TransportStatus::Queued),
"sent" => Some(TransportStatus::Sent),
"delivered" => Some(TransportStatus::Delivered),
"pending" => Some(TransportStatus::Queued),
"received" => Some(TransportStatus::Delivered),
"read" => Some(TransportStatus::Read),
"failed" => Some(TransportStatus::Failed),
"aborted" => Some(TransportStatus::Failed),
_ => None,
}
}

View File

@@ -1,20 +1,20 @@
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::{Value, json};
use thiserror::Error;
use tokio::time::timeout;
use crate::clients::{Destination, MyceliumClient, MyceliumClientError};
use crate::clients::{Destination, MyceliumClient, MyceliumClientError, SupervisorHub};
#[derive(Clone)]
pub struct SupervisorClient {
mycelium: Arc<MyceliumClient>, // Delegated Mycelium transport
hub: Arc<SupervisorHub>, // Global hub with background pop loop and shared id generator
destination: Destination, // ip or pk
topic: String, // e.g. "supervisor.rpc"
secret: Option<String>, // optional, required by several supervisor methods
id_counter: Arc<AtomicU64>, // JSON-RPC id generator (for inner supervisor requests)
}
#[derive(Debug, Error)]
@@ -46,24 +46,22 @@ impl From<MyceliumClientError> for SupervisorClientError {
}
impl SupervisorClient {
/// Preferred constructor: provide a shared Mycelium client.
pub fn new_with_client(
mycelium: Arc<MyceliumClient>,
/// Preferred constructor using a shared SupervisorHub (single global listener).
pub fn new_with_hub(
hub: Arc<SupervisorHub>,
destination: Destination,
topic: impl Into<String>,
secret: Option<String>,
) -> Self {
Self {
mycelium,
hub,
destination,
topic: topic.into(),
secret,
id_counter: Arc::new(AtomicU64::new(1)),
}
}
/// Backward-compatible constructor that builds a Mycelium client from base_url.
/// base_url defaults to Mycelium spec "http://127.0.0.1:8990" if empty.
/// Backward-compatible constructor that builds a new Hub from base_url/topic.
/// NOTE: This spawns a background popMessage listener for the given topic.
/// Prefer `new_with_hub` so the process has a single global hub.
pub fn new(
base_url: impl Into<String>,
destination: Destination,
@@ -78,8 +76,16 @@ impl SupervisorClient {
Ok(Self::new_with_client(mycelium, destination, topic, secret))
}
fn next_id(&self) -> u64 {
self.id_counter.fetch_add(1, Ordering::Relaxed)
/// Backward-compatible constructor that reuses an existing Mycelium client.
/// NOTE: This creates a new hub and its own background listener. Prefer `new_with_hub`.
pub fn new_with_client(
mycelium: Arc<MyceliumClient>,
destination: Destination,
topic: impl Into<String>,
secret: Option<String>,
) -> Self {
let hub = SupervisorHub::new_with_client(mycelium, topic);
Self::new_with_hub(hub, destination, secret)
}
/// Internal helper used by tests to inspect dst JSON shape.
@@ -93,7 +99,7 @@ impl SupervisorClient {
fn build_supervisor_payload(&self, method: &str, params: Value) -> Value {
json!({
"jsonrpc": "2.0",
"id": self.next_id(),
"id": self.hub.next_id(),
"method": method,
"params": params,
})
@@ -128,50 +134,37 @@ impl SupervisorClient {
.map(|s| s.to_string())
}
/// Generic call: build supervisor JSON-RPC message, send via Mycelium pushMessage, return outbound message id (hex).
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
let inner = self.build_supervisor_payload(method, params);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.mycelium
.push_message(
&self.destination,
&Self::encode_topic(self.topic.as_bytes()),
&payload_b64,
None,
)
.await?;
if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
return Ok(id);
}
// Some servers might return the oneOf wrapped, handle len==1 array defensively (not in spec but resilient)
if let Some(arr) = result.as_array()
&& arr.len() == 1
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
{
return Ok(id);
}
Err(SupervisorClientError::InvalidResponse(format!(
"result did not contain message id: {result}"
)))
fn need_secret(&self) -> Result<&str, SupervisorClientError> {
self.secret
.as_deref()
.ok_or(SupervisorClientError::MissingSecret)
}
/// Variant of call that also returns the inner supervisor JSON-RPC id used in the payload.
/// This id is required to correlate asynchronous popMessage replies coming from Mycelium.
pub async fn call_with_ids(
// -----------------------------
// Core: request-reply call via Hub with default 10s timeout
// -----------------------------
/// Send a supervisor JSON-RPC request and await its reply via the Hub.
/// Returns (outbound_message_id, reply_envelope_json).
pub async fn call_with_reply_timeout(
&self,
method: &str,
params: Value,
) -> Result<(String, u64), SupervisorClientError> {
let inner_id = self.next_id();
timeout_secs: u64,
) -> Result<(String, Value), SupervisorClientError> {
let inner_id = self.hub.next_id();
// Register waiter before sending to avoid race
let rx = self.hub.register_waiter(inner_id).await;
let inner = self.build_supervisor_payload_with_id(method, params, inner_id);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.mycelium
.hub
.mycelium()
.push_message(
&self.destination,
&Self::encode_topic(self.topic.as_bytes()),
&Self::encode_topic(self.hub.topic().as_bytes()),
&payload_b64,
None,
)
@@ -185,89 +178,210 @@ impl SupervisorClient {
{
id
} else {
// Clean pending entry to avoid leak
let _ = self.hub.remove_waiter(inner_id).await;
return Err(SupervisorClientError::InvalidResponse(format!(
"result did not contain message id: {result}"
)));
};
Ok((out_id, inner_id))
let d = Duration::from_secs(timeout_secs);
match timeout(d, rx).await {
Ok(Ok(reply)) => Ok((out_id, reply)),
Ok(Err(_canceled)) => Err(SupervisorClientError::InvalidResponse(
"oneshot canceled before receiving reply".into(),
)),
Err(_elapsed) => {
// Cleanup on timeout
let _ = self.hub.remove_waiter(inner_id).await;
Err(SupervisorClientError::TransportTimeout)
}
}
}
/// Synchronous variant: wait for a JSON-RPC reply via Mycelium reply_timeout, and return the inner JSON-RPC "result".
/// If the supervisor returns an error object, map to RpcError.
pub async fn call_sync(
/// Send and await with default 10s timeout.
pub async fn call_with_reply(
&self,
method: &str,
params: Value,
reply_timeout_secs: u64,
) -> Result<Value, SupervisorClientError> {
let inner = self.build_supervisor_payload(method, params);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.mycelium
.push_message(
&self.destination,
&Self::encode_topic(self.topic.as_bytes()),
&payload_b64,
Some(reply_timeout_secs),
)
.await?;
// Expect an InboundMessage-like with a base64 payload containing the supervisor JSON-RPC response
let payload_field = if let Some(p) = result.get("payload").and_then(|v| v.as_str()) {
p.to_string()
} else if let Some(arr) = result.as_array() {
// Defensive: handle single-element array shape
if let Some(one) = arr.get(0) {
one.get("payload")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.ok_or_else(|| {
SupervisorClientError::InvalidResponse(format!(
"missing payload in result: {result}"
))
})?
} else {
return Err(SupervisorClientError::TransportTimeout);
}
} else {
// No payload => no reply received within timeout (Mycelium would have returned just an id)
return Err(SupervisorClientError::TransportTimeout);
};
let raw = BASE64_STANDARD
.decode(payload_field.as_bytes())
.map_err(|e| {
SupervisorClientError::InvalidResponse(format!("invalid base64 payload: {e}"))
})?;
let rpc_resp: Value = serde_json::from_slice(&raw)?;
if let Some(err) = rpc_resp.get("error") {
return Err(SupervisorClientError::RpcError(err.to_string()));
}
let res = rpc_resp.get("result").ok_or_else(|| {
SupervisorClientError::InvalidResponse(format!(
"missing result in supervisor reply: {rpc_resp}"
))
})?;
Ok(res.clone())
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply_timeout(method, params, 60).await
}
fn need_secret(&self) -> Result<&str, SupervisorClientError> {
self.secret
.as_deref()
.ok_or(SupervisorClientError::MissingSecret)
/// Back-compat: Send and await a reply but return only the outbound id (discard reply).
/// This keeps existing call sites working while the system migrates to reply-aware paths.
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
let (out_id, _reply) = self.call_with_reply(method, params).await?;
Ok(out_id)
}
// -----------------------------
// Typed wrappers for Supervisor API
// Asynchronous-only: returns outbound message id
// Typed wrappers for Supervisor API (await replies)
// -----------------------------
// Runners
pub async fn list_runners_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("list_runners", json!([])).await
}
pub async fn register_runner_wait(
&self,
name: impl Into<String>,
queue: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"name": name.into(),
"queue": queue.into()
}]);
self.call_with_reply("register_runner", params).await
}
pub async fn remove_runner_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("remove_runner", json!([actor_id.into()]))
.await
}
pub async fn start_runner_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("start_runner", json!([actor_id.into()]))
.await
}
pub async fn stop_runner_wait(
&self,
actor_id: impl Into<String>,
force: bool,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("stop_runner", json!([actor_id.into(), force]))
.await
}
pub async fn get_runner_status_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_runner_status", json!([actor_id.into()]))
.await
}
pub async fn get_all_runner_status_wait(
&self,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_all_runner_status", json!([]))
.await
}
pub async fn start_all_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("start_all", json!([])).await
}
pub async fn stop_all_wait(
&self,
force: bool,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("stop_all", json!([force])).await
}
pub async fn get_all_status_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_all_status", json!([])).await
}
// Jobs (await)
pub async fn jobs_create_wait(
&self,
job: Value,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call_with_reply("jobs.create", params).await
}
pub async fn jobs_list_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("jobs.list", json!([])).await
}
pub async fn job_run_wait(&self, job: Value) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call_with_reply("job.run", params).await
}
pub async fn job_start_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.start", params).await
}
pub async fn job_status_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("job.status", json!([job_id.into()]))
.await
}
pub async fn job_result_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("job.result", json!([job_id.into()]))
.await
}
pub async fn job_stop_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.stop", params).await
}
pub async fn job_delete_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.delete", params).await
}
pub async fn rpc_discover_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("rpc.discover", json!([])).await
}
// -----------------------------
// Backward-compatible variants returning only outbound id (discarding reply)
// -----------------------------
pub async fn list_runners(&self) -> Result<String, SupervisorClientError> {
self.call("list_runners", json!([])).await
let (id, _) = self.list_runners_wait().await?;
Ok(id)
}
pub async fn register_runner(
@@ -275,27 +389,24 @@ impl SupervisorClient {
name: impl Into<String>,
queue: impl Into<String>,
) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"name": name.into(),
"queue": queue.into()
}]);
self.call("register_runner", params).await
let (id, _) = self.register_runner_wait(name, queue).await?;
Ok(id)
}
pub async fn remove_runner(
&self,
actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
self.call("remove_runner", json!([actor_id.into()])).await
let (id, _) = self.remove_runner_wait(actor_id).await?;
Ok(id)
}
pub async fn start_runner(
&self,
actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
self.call("start_runner", json!([actor_id.into()])).await
let (id, _) = self.start_runner_wait(actor_id).await?;
Ok(id)
}
pub async fn stop_runner(
@@ -303,184 +414,96 @@ impl SupervisorClient {
actor_id: impl Into<String>,
force: bool,
) -> Result<String, SupervisorClientError> {
self.call("stop_runner", json!([actor_id.into(), force]))
.await
let (id, _) = self.stop_runner_wait(actor_id, force).await?;
Ok(id)
}
pub async fn get_runner_status(
&self,
actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
self.call("get_runner_status", json!([actor_id.into()]))
.await
let (id, _) = self.get_runner_status_wait(actor_id).await?;
Ok(id)
}
pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> {
self.call("get_all_runner_status", json!([])).await
let (id, _) = self.get_all_runner_status_wait().await?;
Ok(id)
}
pub async fn start_all(&self) -> Result<String, SupervisorClientError> {
self.call("start_all", json!([])).await
let (id, _) = self.start_all_wait().await?;
Ok(id)
}
pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> {
self.call("stop_all", json!([force])).await
let (id, _) = self.stop_all_wait(force).await?;
Ok(id)
}
pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> {
self.call("get_all_status", json!([])).await
let (id, _) = self.get_all_status_wait().await?;
Ok(id)
}
// Jobs
pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call("jobs.create", params).await
let (id, _) = self.jobs_create_wait(job).await?;
Ok(id)
}
pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> {
self.call("jobs.list", json!([])).await
let (id, _) = self.jobs_list_wait().await?;
Ok(id)
}
pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call("job.run", params).await
}
/// Typed wrapper returning both outbound Mycelium id and inner supervisor JSON-RPC id.
pub async fn job_run_with_ids(
&self,
job: Value,
) -> Result<(String, u64), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call_with_ids("job.run", params).await
let (id, _) = self.job_run_wait(job).await?;
Ok(id)
}
pub async fn job_start(
&self,
job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.start", params).await
let (id, _) = self.job_start_wait(job_id).await?;
Ok(id)
}
pub async fn job_status(
&self,
job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
self.call("job.status", json!([job_id.into()])).await
}
/// Synchronous job.status: waits for the supervisor to reply and returns the status string.
/// The supervisor result may be an object with { status: "..." } or a bare string.
pub async fn job_status_sync(
&self,
job_id: impl Into<String>,
reply_timeout_secs: u64,
) -> Result<String, SupervisorClientError> {
let res = self
.call_sync("job.status", json!([job_id.into()]), reply_timeout_secs)
.await?;
let status = if let Some(s) = res.get("status").and_then(|v| v.as_str()) {
s.to_string()
} else if let Some(s) = res.as_str() {
s.to_string()
} else {
return Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.status result shape: {res}"
)));
};
Ok(status)
let (id, _) = self.job_status_wait(job_id).await?;
Ok(id)
}
pub async fn job_result(
&self,
job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
self.call("job.result", json!([job_id.into()])).await
}
/// Synchronous job.result: waits for the supervisor to reply and returns a map
/// containing exactly one of:
/// - {"success": "..."} on success
/// - {"error": "..."} on error reported by the runner
/// Some servers may return a bare string; we treat that as {"success": "<string>"}.
pub async fn job_result_sync(
&self,
job_id: impl Into<String>,
reply_timeout_secs: u64,
) -> Result<std::collections::HashMap<String, String>, SupervisorClientError> {
let res = self
.call_sync("job.result", json!([job_id.into()]), reply_timeout_secs)
.await?;
use std::collections::HashMap;
let mut out: HashMap<String, String> = HashMap::new();
if let Some(obj) = res.as_object() {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
out.insert("success".to_string(), s.to_string());
return Ok(out);
}
if let Some(s) = obj.get("error").and_then(|v| v.as_str()) {
out.insert("error".to_string(), s.to_string());
return Ok(out);
}
return Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.result result shape: {res}"
)));
} else if let Some(s) = res.as_str() {
out.insert("success".to_string(), s.to_string());
return Ok(out);
}
Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.result result shape: {res}"
)))
let (id, _) = self.job_result_wait(job_id).await?;
Ok(id)
}
pub async fn job_stop(
&self,
job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.stop", params).await
let (id, _) = self.job_stop_wait(job_id).await?;
Ok(id)
}
pub async fn job_delete(
&self,
job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.delete", params).await
let (id, _) = self.job_delete_wait(job_id).await?;
Ok(id)
}
// Discovery
pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> {
self.call("rpc.discover", json!([])).await
let (id, _) = self.rpc_discover_wait().await?;
Ok(id)
}
}
@@ -493,27 +516,27 @@ mod tests {
use std::net::IpAddr;
fn mk_client() -> SupervisorClient {
// Uses the legacy constructor but will not issue real network calls in these tests.
SupervisorClient::new(
"http://127.0.0.1:8990",
// Build a hub but it won't issue real network calls in these serializer-only tests.
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
let hub = SupervisorHub::new_with_client(mycelium, "supervisor.rpc");
SupervisorClient::new_with_hub(
hub,
Destination::Pk(
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(),
),
"supervisor.rpc",
Some("secret".to_string()),
)
.unwrap()
}
#[test]
fn builds_dst_ip_and_pk() {
let c_ip = SupervisorClient::new(
"http://127.0.0.1:8990",
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
let hub_ip = SupervisorHub::new_with_client(mycelium.clone(), "supervisor.rpc");
let c_ip = SupervisorClient::new_with_hub(
hub_ip,
Destination::Ip("2001:db8::1".parse().unwrap()),
"supervisor.rpc",
None,
)
.unwrap();
);
let v_ip = c_ip.build_dst();
assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1");

View File

@@ -0,0 +1,143 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::Value;
use tokio::sync::{Mutex, oneshot};
use crate::clients::mycelium_client::MyceliumClient;
/// Global hub that:
/// - Owns a single MyceliumClient
/// - Spawns a background popMessage loop filtered by topic
/// - Correlates supervisor JSON-RPC replies by inner id to waiting callers via oneshot channels
#[derive(Clone)]
pub struct SupervisorHub {
mycelium: Arc<MyceliumClient>,
topic: String,
pending: Arc<Mutex<HashMap<u64, oneshot::Sender<Value>>>>,
id_counter: Arc<AtomicU64>,
}
impl SupervisorHub {
/// Create a new hub and start the background popMessage task.
/// - base_url: Mycelium JSON-RPC endpoint, e.g. "http://127.0.0.1:8990"
/// - topic: plain-text topic (e.g., "supervisor.rpc")
pub fn new(
base_url: impl Into<String>,
topic: impl Into<String>,
) -> Result<Arc<Self>, crate::clients::MyceliumClientError> {
let myc = Arc::new(MyceliumClient::new(base_url)?);
Ok(Self::new_with_client(myc, topic))
}
/// Variant that reuses an existing Mycelium client.
pub fn new_with_client(mycelium: Arc<MyceliumClient>, topic: impl Into<String>) -> Arc<Self> {
let hub = Arc::new(Self {
mycelium,
topic: topic.into(),
pending: Arc::new(Mutex::new(HashMap::new())),
id_counter: Arc::new(AtomicU64::new(1)),
});
Self::spawn_pop_loop(hub.clone());
hub
}
fn spawn_pop_loop(hub: Arc<Self>) {
tokio::spawn(async move {
loop {
match hub.mycelium.pop_message(Some(false), Some(20), None).await {
Ok(Some(inb)) => {
// Extract and decode payload
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
// Not a payload-bearing message; ignore
continue;
};
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
tracing::warn!(target: "supervisor_hub", "Failed to decode inbound payload base64");
continue;
};
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
tracing::warn!(target: "supervisor_hub", "Failed to parse inbound payload JSON");
continue;
};
// Extract inner JSON-RPC id
let inner_id_u64 = match rpc.get("id") {
Some(Value::Number(n)) => n.as_u64(),
Some(Value::String(s)) => s.parse::<u64>().ok(),
_ => None,
};
if let Some(inner_id) = inner_id_u64 {
// Try to deliver to a pending waiter
let sender_opt = {
let mut guard = hub.pending.lock().await;
guard.remove(&inner_id)
};
if let Some(tx) = sender_opt {
let _ = tx.send(rpc);
} else {
tracing::warn!(
target: "supervisor_hub",
inner_id,
payload = %String::from_utf8_lossy(&raw),
"Unmatched supervisor reply; no waiter registered"
);
}
} else {
tracing::warn!(target: "supervisor_hub", "Inbound supervisor reply missing id; dropping");
}
}
Ok(None) => {
// No message; continue polling
continue;
}
Err(e) => {
tracing::warn!(target: "supervisor_hub", error = %e, "popMessage error; backing off");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
});
}
/// Allocate a new inner supervisor JSON-RPC id.
pub fn next_id(&self) -> u64 {
self.id_counter.fetch_add(1, Ordering::Relaxed)
}
/// Register a oneshot sender for the given inner id and return the receiver side.
pub async fn register_waiter(&self, inner_id: u64) -> oneshot::Receiver<Value> {
let (tx, rx) = oneshot::channel();
let mut guard = self.pending.lock().await;
guard.insert(inner_id, tx);
rx
}
/// Remove a pending waiter for a given id (used to cleanup on timeout).
pub async fn remove_waiter(&self, inner_id: u64) -> Option<oneshot::Sender<Value>> {
let mut guard = self.pending.lock().await;
guard.remove(&inner_id)
}
/// Access to underlying Mycelium client (for pushMessage).
pub fn mycelium(&self) -> Arc<MyceliumClient> {
self.mycelium.clone()
}
/// Access configured topic.
pub fn topic(&self) -> &str {
&self.topic
}
}
impl std::fmt::Debug for SupervisorHub {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SupervisorHub")
.field("topic", &self.topic)
.finish()
}
}

View File

@@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt;
use crate::{
models::{Flow, Job, ScriptType},
models::{Flow, Job, JobStatus, ScriptType},
storage::RedisDriver,
};
@@ -212,6 +212,41 @@ pub async fn build_flow_dag(
edges.sort_unstable();
reverse_edges.sort_unstable();
// Populate runtime execution state from persisted Job.status()
let mut started_set: HashSet<u32> = HashSet::new();
let mut completed_set: HashSet<u32> = HashSet::new();
let mut error_ids: Vec<u32> = Vec::new();
for (&jid, job) in &jobs {
match job.status() {
JobStatus::Finished => {
completed_set.insert(jid);
}
JobStatus::Started => {
started_set.insert(jid);
}
JobStatus::Dispatched => {
// Consider Dispatched as "in-flight" for DAG runtime started set,
// so queued/running work is visible in periodic snapshots.
started_set.insert(jid);
}
JobStatus::Error => {
error_ids.push(jid);
}
JobStatus::WaitingForPrerequisites => {
// Neither started nor completed
}
}
}
// Choose a deterministic failed job if any errors exist (smallest job id)
let failed_job = if error_ids.is_empty() {
None
} else {
error_ids.sort_unstable();
Some(error_ids[0])
};
let dag = FlowDag {
flow_id,
caller_id,
@@ -222,9 +257,9 @@ pub async fn build_flow_dag(
roots,
leaves,
levels,
started: HashSet::new(),
completed: HashSet::new(),
failed_job: None,
started: started_set,
completed: completed_set,
failed_job,
};
Ok(dag)

View File

@@ -99,21 +99,24 @@ async fn main() {
// Shared application state
let state = Arc::new(herocoordinator::rpc::AppState::new(service));
// Start router workers (auto-discovered contexts) and a single global inbound listener
// Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener)
{
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
let hub = herocoordinator::clients::SupervisorHub::new(
base_url.clone(),
"supervisor.rpc".to_string(),
)
.expect("Failed to initialize SupervisorHub");
let cfg = herocoordinator::router::RouterConfig {
context_ids: Vec::new(), // ignored by start_router_auto
concurrency: 32,
base_url,
topic: "supervisor.rpc".to_string(),
sup_hub: hub.clone(),
transport_poll_interval_secs: 2,
transport_poll_timeout_secs: 300,
};
// Global inbound listener for supervisor replies via Mycelium popMessage
let _inbound_handle =
herocoordinator::router::start_inbound_listener(service_for_router.clone(), cfg.clone());
// Per-context outbound delivery loops
// Per-context outbound delivery loops (replies handled by SupervisorHub)
let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg);
}

View File

@@ -59,6 +59,18 @@ pub enum TransportStatus {
Failed,
}
impl std::fmt::Display for TransportStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransportStatus::Queued => f.write_str("queued"),
TransportStatus::Sent => f.write_str("sent"),
TransportStatus::Delivered => f.write_str("delivered"),
TransportStatus::Read => f.write_str("read"),
TransportStatus::Failed => f.write_str("failed"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MessageFormatType {
Html,

View File

@@ -1,12 +1,17 @@
use std::{collections::HashSet, sync::Arc};
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::{Value, json};
use tokio::sync::Semaphore;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use tokio::sync::{Mutex, Semaphore};
use crate::{
clients::{Destination, MyceliumClient, SupervisorClient},
clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub},
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
service::AppService,
};
@@ -18,11 +23,89 @@ pub struct RouterConfig {
pub concurrency: usize,
pub base_url: String, // e.g. http://127.0.0.1:8990
pub topic: String, // e.g. "supervisor.rpc"
pub sup_hub: Arc<SupervisorHub>, // global supervisor hub for replies
// Transport status polling configuration
pub transport_poll_interval_secs: u64, // e.g. 2
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
}
/*
SupervisorClient reuse cache (Router-local):
Rationale:
- SupervisorClient maintains an internal JSON-RPC id_counter per instance.
- Rebuilding a client for each message resets this counter, causing inner JSON-RPC ids to restart at 1.
- We reuse one SupervisorClient per (destination, topic, secret) to preserve monotonically increasing ids.
Scope:
- Cache is per Router loop (and a separate one for the inbound listener).
- If cross-loop/process reuse becomes necessary later, promote to a process-global cache.
Keying:
- Key: destination + topic + secret-presence (secret content hashed; not stored in plaintext).
Concurrency:
- tokio::Mutex protects a HashMap<String, Arc<SupervisorClient>>.
- Values are Arc so call sites clone cheaply and share the same id_counter.
*/
#[derive(Clone)]
struct SupervisorClientCache {
map: Arc<Mutex<HashMap<String, Arc<SupervisorClient>>>>,
}
impl SupervisorClientCache {
fn new() -> Self {
Self {
map: Arc::new(Mutex::new(HashMap::new())),
}
}
fn make_key(dest: &Destination, topic: &str, secret: &Option<String>) -> String {
let dst = match dest {
Destination::Ip(ip) => format!("ip:{ip}"),
Destination::Pk(pk) => format!("pk:{pk}"),
};
// Hash the secret to avoid storing plaintext in keys while still differentiating values
let sec_hash = match secret {
Some(s) if !s.is_empty() => {
let mut hasher = DefaultHasher::new();
s.hash(&mut hasher);
format!("s:{}", hasher.finish())
}
_ => "s:none".to_string(),
};
format!("{dst}|t:{topic}|{sec_hash}")
}
async fn get_or_create(
&self,
hub: Arc<SupervisorHub>,
dest: Destination,
topic: String,
secret: Option<String>,
) -> Arc<SupervisorClient> {
let key = Self::make_key(&dest, &topic, &secret);
{
let guard = self.map.lock().await;
if let Some(existing) = guard.get(&key) {
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup");
return existing.clone();
}
}
let mut guard = self.map.lock().await;
if let Some(existing) = guard.get(&key) {
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)");
return existing.clone();
}
let client = Arc::new(SupervisorClient::new_with_hub(hub, dest, secret.clone()));
guard.insert(key, client.clone());
tracing::debug!(target: "router", cache="supervisor", hit=false, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache insert");
client
}
}
/// Start background router loops, one per context.
/// Each loop:
/// - BRPOP msg_out with 1s timeout
@@ -38,16 +121,11 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
let handle = tokio::spawn(async move {
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
// Create a shared Mycelium client for this context loop (retry until available)
let mycelium = loop {
match MyceliumClient::new(cfg_cloned.base_url.clone()) {
Ok(c) => break Arc::new(c),
Err(e) => {
error!(context_id=ctx_id, error=%e, "MyceliumClient init error");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
};
// Use the global SupervisorHub and its Mycelium client
let sup_hub = cfg_cloned.sup_hub.clone();
let mycelium = sup_hub.mycelium();
let cache = Arc::new(SupervisorClientCache::new());
loop {
// Pop next message key (blocking with timeout)
@@ -69,11 +147,20 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
let cfg_task = cfg_cloned.clone();
tokio::spawn({
let mycelium = mycelium.clone();
let cache = cache.clone();
let sup_hub = sup_hub.clone();
async move {
// Ensure permit is dropped at end of task
let _permit = permit;
if let Err(e) =
deliver_one(&service_task, &cfg_task, ctx_id, &key, mycelium)
if let Err(e) = deliver_one(
&service_task,
&cfg_task,
ctx_id,
&key,
mycelium,
sup_hub,
cache.clone(),
)
.await
{
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
@@ -104,6 +191,8 @@ async fn deliver_one(
context_id: u32,
msg_key: &str,
mycelium: Arc<MyceliumClient>,
sup_hub: Arc<SupervisorHub>,
cache: Arc<SupervisorClientCache>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Parse "message:{caller_id}:{id}"
let (caller_id, id) = parse_message_key(msg_key)
@@ -143,12 +232,14 @@ async fn deliver_one(
let dest_for_poller = dest.clone();
let topic_for_poller = cfg.topic.clone();
let secret_for_poller = runner.secret.clone();
let client = SupervisorClient::new_with_client(
mycelium.clone(),
let client = cache
.get_or_create(
sup_hub.clone(),
dest.clone(),
cfg.topic.clone(),
runner.secret.clone(),
);
)
.await;
// Build supervisor method and params from Message
let method = msg.message.clone();
@@ -157,15 +248,15 @@ async fn deliver_one(
// Send
// If this is a job.run and we have a secret configured on the client,
// prefer the typed wrapper that injects the secret into inner supervisor params,
// and also capture the inner supervisor JSON-RPC id for correlation.
let (out_id, inner_id_opt) = if method == "job.run" {
// and await the reply to capture job_queued immediately.
let (out_id, reply_opt) = if method == "job.run" {
if let Some(j) = msg.job.first() {
let jv = job_to_json(j)?;
// Returns (outbound message id, inner supervisor JSON-RPC id)
let (out, inner) = client.job_run_with_ids(jv).await?;
(out, Some(inner))
// Returns (outbound message id, reply envelope)
let (out, reply) = client.job_run_wait(jv).await?;
(out, Some(reply))
} else {
// Fallback: no embedded job, use the generic call
// Fallback: no embedded job, use the generic call (await reply, discard)
let out = client.call(&method, params).await?;
(out, None)
}
@@ -190,12 +281,58 @@ async fn deliver_one(
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
.await?;
// Record correlation (inner supervisor JSON-RPC id -> job/message) for inbound popMessage handling
if let (Some(inner_id), Some(job_id)) = (inner_id_opt, job_id_opt) {
// If we got a job.run reply, interpret job_queued immediately
if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) {
let result_opt = reply.get("result");
let error_opt = reply.get("error");
// Handle job.run success (job_queued)
let is_job_queued = result_opt
.and_then(|res| {
if res.get("job_queued").is_some() {
Some(true)
} else if let Some(s) = res.as_str() {
Some(s == "job_queued")
} else {
None
}
})
.unwrap_or(false);
if is_job_queued {
let _ = service
.supcorr_set(inner_id, context_id, caller_id, job_id, id)
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor reply for job {}: job_queued (processed synchronously)",
job_id
)],
)
.await;
} else if let Some(err_obj) = error_opt {
let _ = service
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor error for job {}: {} (processed synchronously)",
job_id, err_obj
)],
)
.await;
}
}
// No correlation map needed; replies are handled synchronously via SupervisorHub
// Spawn transport-status poller
{
@@ -204,12 +341,6 @@ async fn deliver_one(
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
let out_id_cloned = out_id.clone();
let mycelium = mycelium.clone();
// Determine reply timeout for supervisor job.result: prefer message.timeout_result, fallback to router config timeout
let job_result_reply_timeout: u64 = if msg.timeout_result > 0 {
msg.timeout_result as u64
} else {
cfg.transport_poll_timeout_secs
};
tokio::spawn(async move {
let start = std::time::Instant::now();
@@ -221,6 +352,8 @@ async fn deliver_one(
let job_id_opt = job_id_opt;
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
// Ensure we only request supervisor job.status or job.result once per outbound message
let mut requested_job_check: bool = false;
loop {
if start.elapsed() >= poll_timeout {
@@ -253,74 +386,229 @@ async fn deliver_one(
// Stop on terminal states
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
// On Read, fetch supervisor job.status and update local job/message if terminal
if matches!(s, TransportStatus::Read)
&& let Some(job_id) = job_id_opt
{
let sup = SupervisorClient::new_with_client(
client.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
);
match sup.job_status_sync(job_id.to_string(), 10).await {
Ok(remote_status) => {
if let Some((mapped, terminal)) =
map_supervisor_job_status(&remote_status)
{
if terminal {
if let Some(job_id) = job_id_opt {
// First consult Redis for the latest job state in case we already have a terminal update
match service_poll.load_job(context_id, caller_id, job_id).await {
Ok(job) => {
// Promote to Started as soon as transport is delivered/read,
// if currently Dispatched or WaitingForPrerequisites.
// This makes DAG.started reflect "in-flight" work even when jobs
// complete too quickly to observe an intermediate supervisor "running" status.
if matches!(
job.status(),
JobStatus::Dispatched
| JobStatus::WaitingForPrerequisites
) {
let _ = service_poll
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
mapped.clone(),
JobStatus::Started,
)
.await;
// After terminal status, fetch supervisor job.result and store into Job.result
let sup = SupervisorClient::new_with_client(
client.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
);
match sup
.job_result_sync(
job_id.to_string(),
job_result_reply_timeout,
)
.await
{
Ok(result_map) => {
// Persist the result into the Job.result map (merge)
let _ = service_poll
.update_job_result_merge_unchecked(
context_id,
caller_id,
job_id,
result_map.clone(),
)
.await;
// Log which key was stored (success or error)
let key = result_map
.keys()
.next()
.cloned()
.unwrap_or_else(|| {
"unknown".to_string()
});
}
match job.status() {
JobStatus::Finished | JobStatus::Error => {
// Local job is already terminal; skip supervisor job.status
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} ({})",
job_id, key
"Local job {} status is terminal ({:?}); skipping supervisor job.status",
job_id,
job.status()
)],
)
.await;
// If result is still empty, immediately request supervisor job.result
if job.result.is_empty() {
let sup = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup
.job_result_wait(job_id.to_string())
.await
{
Ok((_out2, reply2)) => {
// Interpret reply synchronously: success/error/bare string
let res = reply2.get("result");
if let Some(obj) =
res.and_then(|v| v.as_object())
{
if let Some(s) = obj
.get("success")
.and_then(|v| v.as_str())
{
let mut patch = std::collections::HashMap::new();
patch.insert(
"success".to_string(),
s.to_string(),
);
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Finished,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Finished (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else if let Some(s) = obj
.get("error")
.and_then(|v| v.as_str())
{
let mut patch = std::collections::HashMap::new();
patch.insert(
"error".to_string(),
s.to_string(),
);
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
// Also mark job as Error so the flow can handle failure (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Error,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Error (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (error, sync)",
job_id
)],
)
.await;
}
} else if let Some(s) =
res.and_then(|v| v.as_str())
{
let mut patch =
std::collections::HashMap::new(
);
patch.insert(
"success".to_string(),
s.to_string(),
);
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Finished,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Finished (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec!["Supervisor job.result reply missing recognizable fields".to_string()],
)
.await;
}
}
Err(e) => {
let _ = service_poll
@@ -329,15 +617,29 @@ async fn deliver_one(
caller_id,
id,
vec![format!(
"job.result fetch error for job {}: {}",
"job.result request error for job {}: {}",
job_id, e
)],
)
.await;
}
}
} else {
// Result already present; nothing to fetch
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Job {} already has result; no supervisor calls needed",
job_id
)],
)
.await;
}
// Mark message as processed
// Mark processed and stop polling for this message
let _ = service_poll
.update_message_status(
context_id,
@@ -352,24 +654,144 @@ async fn deliver_one(
caller_id,
id,
vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?})",
"Terminal job {} detected; stopping transport polling",
job_id
)],
)
.await;
break;
}
// Not terminal yet -> request supervisor job.status as before
_ => {
let sup = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_wait(job_id.to_string()).await
{
Ok((_out_id, reply_status)) => {
// Interpret status reply synchronously
let result_opt = reply_status.get("result");
let error_opt = reply_status.get("error");
if let Some(err_obj) = error_opt {
let _ = service_poll
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Error,
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor error for job {}: {} (sync)",
job_id, err_obj
)],
)
.await;
} else if let Some(res) = result_opt {
let status_candidate = res
.get("status")
.and_then(|v| v.as_str())
.or_else(|| res.as_str());
if let Some(remote_status) =
status_candidate
{
if let Some((mapped, terminal)) =
map_supervisor_job_status(
remote_status,
)
{
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, mapped.clone(),
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?}, sync)",
job_id, remote_status, mapped
)],
)
.await;
// If terminal, request job.result now (handled above for local terminal case)
if terminal {
// trigger job.result only if result empty to avoid spam
if let Ok(j_after) =
service_poll
.load_job(
context_id,
caller_id,
job_id,
)
.await
{
if j_after
.result
.is_empty()
{
let sup2 = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
let _ = sup2.job_result_wait(job_id.to_string()).await
.and_then(|(_oid, reply2)| {
// Minimal parse and store
let res2 = reply2.get("result");
if let Some(obj) = res2.and_then(|v| v.as_object()) {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
tokio::spawn({
let service_poll = service_poll.clone();
async move {
let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await;
}
} else {
});
}
}
Ok((String::new(), Value::Null))
});
}
}
// Mark processed and stop polling for this message
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Unknown supervisor status '{}' for job {}",
remote_status, job_id
"Terminal job {} detected from supervisor status; stopping transport polling",
job_id
)],
)
.await;
break;
}
}
}
}
}
Err(e) => {
@@ -378,13 +800,61 @@ async fn deliver_one(
context_id,
caller_id,
id,
vec![format!("job.status sync error: {}", e)],
vec![format!(
"job.status request error: {}",
e
)],
)
.await;
}
}
}
break;
}
}
// If we cannot load the job, fall back to requesting job.status
Err(_) => {
let sup = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_wait(job_id.to_string()).await {
Ok((_out_id, _reply_status)) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Requested supervisor job.status for job {} (fallback; load_job failed, sync)",
job_id
)],
)
.await;
}
Err(e) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"job.status request error: {}",
e
)],
)
.await;
}
}
}
}
// Ensure we only do this once
requested_job_check = true;
}
// break;
}
if matches!(s, TransportStatus::Failed) {
let _ = service_poll
@@ -500,157 +970,3 @@ pub fn start_router_auto(service: AppService, cfg: RouterConfig) -> tokio::task:
}
})
}
/// Start a single global inbound listener that reads Mycelium popMessage with topic filter,
/// decodes supervisor JSON-RPC replies, and updates correlated jobs/messages.
/// This listens for async replies like {"result":{"job_queued":...}} carrying the same inner JSON-RPC id.
pub fn start_inbound_listener(
service: AppService,
cfg: RouterConfig,
) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
// Initialize Mycelium client (retry loop)
let mycelium = loop {
match MyceliumClient::new(cfg.base_url.clone()) {
Ok(c) => break c,
Err(e) => {
error!(error=%e, "MyceliumClient init error (inbound listener)");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
};
loop {
// Poll for inbound supervisor messages on the configured topic
match mycelium.pop_message(Some(false), Some(20), None).await {
Ok(Some(inb)) => {
// Expect InboundMessage with base64 "payload"
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
// Not a payload-bearing message; ignore
continue;
};
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
let _ = service
.append_message_logs(
0, // unknown context yet
0,
0,
vec![
"Inbound payload base64 decode error (supervisor reply)".into(),
],
)
.await;
continue;
};
tracing::info!(
raw = %String::from_utf8_lossy(&raw),
"Read raw messge from mycelium"
);
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
// Invalid JSON payload
continue;
};
// Extract inner supervisor JSON-RPC id (number preferred; string fallback)
let inner_id_u64 = match rpc.get("id") {
Some(Value::Number(n)) => n.as_u64(),
Some(Value::String(s)) => s.parse::<u64>().ok(),
_ => None,
};
let Some(inner_id) = inner_id_u64 else {
// Cannot correlate without id
continue;
};
// Lookup correlation mapping
match service.supcorr_get(inner_id).await {
Ok(Some((context_id, caller_id, job_id, message_id))) => {
// Determine success/error from supervisor JSON-RPC envelope
let is_success = rpc
.get("result")
.map(|res| {
res.get("job_queued").is_some()
|| res.as_str().map(|s| s == "job_queued").unwrap_or(false)
})
.unwrap_or(false);
if is_success {
// Set to Dispatched (idempotent) per spec choice, and append log
let _ = service
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Dispatched,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Supervisor reply for job {}: job_queued",
job_id
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
} else if let Some(err_obj) = rpc.get("error") {
// Error path: set job Error and log details
let _ = service
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Error,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Supervisor error for job {}: {}",
job_id, err_obj
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
} else {
// Unknown result; keep correlation for a later, clearer reply
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![
"Supervisor reply did not contain job_queued or error"
.to_string(),
],
)
.await;
}
}
Ok(None) => {
// No correlation found; ignore or log once
}
Err(e) => {
error!(error=%e, "supcorr_get error");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
Ok(None) => {
// No message; continue polling
continue;
}
Err(e) => {
error!(error=%e, "popMessage error");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
})
}

View File

@@ -672,10 +672,16 @@ impl AppService {
let allowed = match current {
JobStatus::Dispatched => matches!(
new_status,
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error
JobStatus::WaitingForPrerequisites
| JobStatus::Started
| JobStatus::Finished
| JobStatus::Error
),
JobStatus::WaitingForPrerequisites => {
matches!(new_status, JobStatus::Started | JobStatus::Error)
matches!(
new_status,
JobStatus::Started | JobStatus::Finished | JobStatus::Error
)
}
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
JobStatus::Finished | JobStatus::Error => false,
@@ -714,10 +720,16 @@ impl AppService {
let allowed = match current {
JobStatus::Dispatched => matches!(
new_status,
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error
JobStatus::WaitingForPrerequisites
| JobStatus::Started
| JobStatus::Finished
| JobStatus::Error
),
JobStatus::WaitingForPrerequisites => {
matches!(new_status, JobStatus::Started | JobStatus::Error)
matches!(
new_status,
JobStatus::Started | JobStatus::Finished | JobStatus::Error
)
}
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
JobStatus::Finished | JobStatus::Error => false,
@@ -1182,10 +1194,7 @@ impl AppService {
&self,
inner_id: u64,
) -> Result<Option<(u32, u32, u32, u32)>, BoxError> {
self.redis
.supcorr_get(inner_id)
.await
.map_err(Into::into)
self.redis.supcorr_get(inner_id).await.map_err(Into::into)
}
/// Correlation map: delete mapping by inner supervisor JSON-RPC id.

View File

@@ -789,10 +789,7 @@ impl RedisDriver {
Ok(())
}
pub async fn supcorr_get(
&self,
inner_id: u64,
) -> Result<Option<(u32, u32, u32, u32)>> {
pub async fn supcorr_get(&self, inner_id: u64) -> Result<Option<(u32, u32, u32, u32)>> {
let mut cm = self.manager_for_db(0).await?;
let key = format!("supcorr:{}", inner_id);
let res: Option<String> = redis::cmd("GET")