Improve jsonrpc client to properly route replies

Signed-off-by: Lee Smet <lee.smet@hotmail.com>
This commit is contained in:
Lee Smet
2025-09-08 11:37:22 +02:00
parent 25f35ea8fc
commit 512c99db54
5 changed files with 683 additions and 653 deletions

View File

@@ -8,7 +8,7 @@ use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use crate::{
clients::{Destination, MyceliumClient, SupervisorClient},
clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub},
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
service::AppService,
};
@@ -20,6 +20,7 @@ pub struct RouterConfig {
pub concurrency: usize,
pub base_url: String, // e.g. http://127.0.0.1:8990
pub topic: String, // e.g. "supervisor.rpc"
pub sup_hub: Arc<SupervisorHub>, // global supervisor hub for replies
// Transport status polling configuration
pub transport_poll_interval_secs: u64, // e.g. 2
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
@@ -75,7 +76,7 @@ impl SupervisorClientCache {
async fn get_or_create(
&self,
mycelium: Arc<MyceliumClient>,
hub: Arc<SupervisorHub>,
dest: Destination,
topic: String,
secret: Option<String>,
@@ -95,10 +96,9 @@ impl SupervisorClientCache {
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)");
return existing.clone();
}
let client = Arc::new(SupervisorClient::new_with_client(
mycelium,
let client = Arc::new(SupervisorClient::new_with_hub(
hub,
dest,
topic.clone(),
secret.clone(),
));
guard.insert(key, client.clone());
@@ -122,16 +122,9 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
let handle = tokio::spawn(async move {
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
// Create a shared Mycelium client for this context loop (retry until available)
let mycelium = loop {
match MyceliumClient::new(cfg_cloned.base_url.clone()) {
Ok(c) => break Arc::new(c),
Err(e) => {
error!(context_id=ctx_id, error=%e, "MyceliumClient init error");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
};
// Use the global SupervisorHub and its Mycelium client
let sup_hub = cfg_cloned.sup_hub.clone();
let mycelium = sup_hub.mycelium();
let cache = Arc::new(SupervisorClientCache::new());
@@ -156,11 +149,12 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
tokio::spawn({
let mycelium = mycelium.clone();
let cache = cache.clone();
let sup_hub = sup_hub.clone();
async move {
// Ensure permit is dropped at end of task
let _permit = permit;
if let Err(e) =
deliver_one(&service_task, &cfg_task, ctx_id, &key, mycelium, cache.clone())
deliver_one(&service_task, &cfg_task, ctx_id, &key, mycelium, sup_hub, cache.clone())
.await
{
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
@@ -191,6 +185,7 @@ async fn deliver_one(
context_id: u32,
msg_key: &str,
mycelium: Arc<MyceliumClient>,
sup_hub: Arc<SupervisorHub>,
cache: Arc<SupervisorClientCache>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Parse "message:{caller_id}:{id}"
@@ -233,7 +228,7 @@ async fn deliver_one(
let secret_for_poller = runner.secret.clone();
let client = cache
.get_or_create(
mycelium.clone(),
sup_hub.clone(),
dest.clone(),
cfg.topic.clone(),
runner.secret.clone(),
@@ -247,15 +242,15 @@ async fn deliver_one(
// Send
// If this is a job.run and we have a secret configured on the client,
// prefer the typed wrapper that injects the secret into inner supervisor params,
// and also capture the inner supervisor JSON-RPC id for correlation.
let (out_id, inner_id_opt) = if method == "job.run" {
// and await the reply to capture job_queued immediately.
let (out_id, reply_opt) = if method == "job.run" {
if let Some(j) = msg.job.first() {
let jv = job_to_json(j)?;
// Returns (outbound message id, inner supervisor JSON-RPC id)
let (out, inner) = client.job_run_with_ids(jv).await?;
(out, Some(inner))
// Returns (outbound message id, reply envelope)
let (out, reply) = client.job_run_wait(jv).await?;
(out, Some(reply))
} else {
// Fallback: no embedded job, use the generic call
// Fallback: no embedded job, use the generic call (await reply, discard)
let out = client.call(&method, params).await?;
(out, None)
}
@@ -280,13 +275,59 @@ async fn deliver_one(
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
.await?;
// Record correlation (inner supervisor JSON-RPC id -> job/message) for inbound popMessage handling
if let (Some(inner_id), Some(job_id)) = (inner_id_opt, job_id_opt) {
let _ = service
.supcorr_set(inner_id, context_id, caller_id, job_id, id)
.await;
// If we got a job.run reply, interpret job_queued immediately
if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) {
let result_opt = reply.get("result");
let error_opt = reply.get("error");
// Handle job.run success (job_queued)
let is_job_queued = result_opt
.and_then(|res| {
if res.get("job_queued").is_some() {
Some(true)
} else if let Some(s) = res.as_str() {
Some(s == "job_queued")
} else {
None
}
})
.unwrap_or(false);
if is_job_queued {
let _ = service
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor reply for job {}: job_queued (processed synchronously)",
job_id
)],
)
.await;
} else if let Some(err_obj) = error_opt {
let _ = service
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor error for job {}: {} (processed synchronously)",
job_id, err_obj
)],
)
.await;
}
}
// No correlation map needed; replies are handled synchronously via SupervisorHub
// Spawn transport-status poller
{
let service_poll = service.clone();
@@ -365,28 +406,109 @@ async fn deliver_one(
if job.result.is_empty() {
let sup = cache
.get_or_create(
client.clone(),
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_result_with_ids(job_id.to_string()).await {
Ok((_out2, inner2)) => {
let _ = service_poll
.supcorr_set(inner2, context_id, caller_id, job_id, id)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Requested supervisor job.result for job {} (local terminal w/ empty result)",
job_id
)],
)
.await;
match sup.job_result_wait(job_id.to_string()).await {
Ok((_out2, reply2)) => {
// Interpret reply synchronously: success/error/bare string
let res = reply2.get("result");
if let Some(obj) = res.and_then(|v| v.as_object()) {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else if let Some(s) = obj.get("error").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("error".to_string(), s.to_string());
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (error, sync)",
job_id
)],
)
.await;
}
} else if let Some(s) = res.and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec!["Supervisor job.result reply missing recognizable fields".to_string()],
)
.await;
}
}
Err(e) => {
let _ = service_poll
@@ -421,31 +543,93 @@ async fn deliver_one(
_ => {
let sup = cache
.get_or_create(
client.clone(),
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_with_ids(job_id.to_string()).await {
Ok((_out_id, inner_id)) => {
// Correlate this status request to the message/job
let _ = service_poll
.supcorr_set(
inner_id, context_id, caller_id, job_id, id,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Requested supervisor job.status for job {}",
job_id
)],
)
.await;
match sup.job_status_wait(job_id.to_string()).await {
Ok((_out_id, reply_status)) => {
// Interpret status reply synchronously
let result_opt = reply_status.get("result");
let error_opt = reply_status.get("error");
if let Some(err_obj) = error_opt {
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Error,
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor error for job {}: {} (sync)",
job_id, err_obj
)],
)
.await;
} else if let Some(res) = result_opt {
let status_candidate = res
.get("status")
.and_then(|v| v.as_str())
.or_else(|| res.as_str());
if let Some(remote_status) = status_candidate {
if let Some((mapped, terminal)) =
map_supervisor_job_status(remote_status)
{
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, mapped.clone(),
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?}, sync)",
job_id, remote_status, mapped
)],
)
.await;
// If terminal, request job.result now (handled above for local terminal case)
if terminal {
// trigger job.result only if result empty to avoid spam
if let Ok(j_after) = service_poll.load_job(context_id, caller_id, job_id).await {
if j_after.result.is_empty() {
let sup2 = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
let _ = sup2.job_result_wait(job_id.to_string()).await
.and_then(|(_oid, reply2)| {
// Minimal parse and store
let res2 = reply2.get("result");
if let Some(obj) = res2.and_then(|v| v.as_object()) {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
tokio::spawn({
let service_poll = service_poll.clone();
async move {
let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await;
}
});
}
}
Ok((String::new(), Value::Null))
});
}
}
}
}
}
}
}
Err(e) => {
let _ = service_poll
@@ -465,26 +649,21 @@ async fn deliver_one(
Err(_) => {
let sup = cache
.get_or_create(
client.clone(),
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_with_ids(job_id.to_string()).await {
Ok((_out_id, inner_id)) => {
let _ = service_poll
.supcorr_set(
inner_id, context_id, caller_id, job_id, id,
)
.await;
match sup.job_status_wait(job_id.to_string()).await {
Ok((_out_id, _reply_status)) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Requested supervisor job.status for job {} (fallback; load_job failed)",
"Requested supervisor job.status for job {} (fallback; load_job failed, sync)",
job_id
)],
)
@@ -624,423 +803,3 @@ pub fn start_router_auto(service: AppService, cfg: RouterConfig) -> tokio::task:
})
}
/// Start a single global inbound listener that reads Mycelium popMessage with topic filter,
/// decodes supervisor JSON-RPC replies, and updates correlated jobs/messages.
/// This listens for async replies like {"result":{"job_queued":...}} carrying the same inner JSON-RPC id.
pub fn start_inbound_listener(
service: AppService,
cfg: RouterConfig,
) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
// Initialize Mycelium client (retry loop)
let mycelium = loop {
match MyceliumClient::new(cfg.base_url.clone()) {
Ok(c) => break Arc::new(c),
Err(e) => {
error!(error=%e, "MyceliumClient init error (inbound listener)");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
};
let cache = Arc::new(SupervisorClientCache::new());
loop {
// Poll for inbound supervisor messages on the configured topic
match mycelium.pop_message(Some(false), Some(20), None).await {
Ok(Some(inb)) => {
// Expect InboundMessage with base64 "payload"
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
// Not a payload-bearing message; ignore
continue;
};
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
let _ = service
.append_message_logs(
0, // unknown context yet
0,
0,
vec![
"Inbound payload base64 decode error (supervisor reply)".into(),
],
)
.await;
continue;
};
tracing::info!(
raw = %String::from_utf8_lossy(&raw),
"Read raw messge from mycelium"
);
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
// Invalid JSON payload
continue;
};
// Extract inner supervisor JSON-RPC id (number preferred; string fallback)
let inner_id_u64 = match rpc.get("id") {
Some(Value::Number(n)) => n.as_u64(),
Some(Value::String(s)) => s.parse::<u64>().ok(),
_ => None,
};
let Some(inner_id) = inner_id_u64 else {
// Cannot correlate without id
continue;
};
// Lookup correlation mapping
match service.supcorr_get(inner_id).await {
Ok(Some((context_id, caller_id, job_id, message_id))) => {
// Determine success/error from supervisor JSON-RPC envelope
// Inspect result/error to route job.run/job.status/job.result replies
let result_opt = rpc.get("result");
let error_opt = rpc.get("error");
// Handle job.run success (job_queued)
let is_job_queued = result_opt
.and_then(|res| {
if res.get("job_queued").is_some() {
Some(true)
} else if let Some(s) = res.as_str() {
Some(s == "job_queued")
} else {
None
}
})
.unwrap_or(false);
if is_job_queued {
// Set to Dispatched (idempotent) per spec, and append log
let _ = service
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Dispatched,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Supervisor reply for job {}: job_queued",
job_id
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
continue;
}
// Error envelope: set job Error and log
if let Some(err_obj) = error_opt {
let _ = service
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Error,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Supervisor error for job {}: {}",
job_id, err_obj
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
continue;
}
// If we have a result, try to interpret it as job.status or job.result
if let Some(res) = result_opt {
// Try job.status: object {status: "..."} or bare string
let status_candidate = res
.get("status")
.and_then(|v| v.as_str())
.or_else(|| res.as_str());
if let Some(remote_status) = status_candidate {
if let Some((mapped, terminal)) =
map_supervisor_job_status(remote_status)
{
// Update job status and log
let _ = service
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
mapped.clone(),
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?})",
job_id, remote_status, mapped
)],
)
.await;
// Done with this correlation id
let _ = service.supcorr_del(inner_id).await;
// If terminal, request job.result asynchronously now
if terminal {
// Load job to determine script_type for runner selection
match service
.load_job(context_id, caller_id, job_id)
.await
{
Ok(job) => {
match service.scan_runners(context_id).await {
Ok(runners) => {
if let Some(runner) =
runners.into_iter().find(|r| {
r.script_type == job.script_type
})
{
let dest = if !runner
.pubkey
.trim()
.is_empty()
{
Destination::Pk(
runner.pubkey.clone(),
)
} else {
Destination::Ip(runner.address)
};
let sup = cache
.get_or_create(
mycelium.clone(),
dest,
cfg.topic.clone(),
runner.secret.clone(),
)
.await;
match sup
.job_result_with_ids(
job_id.to_string(),
)
.await
{
Ok((_out2, inner2)) => {
let _ = service
.supcorr_set(
inner2, context_id,
caller_id, job_id,
message_id,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Requested supervisor job.result for job {}",
job_id
)],
)
.await;
}
Err(e) => {
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"job.result request error for job {}: {}",
job_id, e
)],
)
.await;
}
}
} else {
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"No runner with matching script_type found to request job.result for job {}",
job_id
)],
)
.await;
}
}
Err(e) => {
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"scan_runners error while requesting job.result for job {}: {}",
job_id, e
)],
)
.await;
}
}
}
Err(e) => {
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"load_job error while requesting job.result for job {}: {}",
job_id, e
)],
)
.await;
}
}
}
continue;
}
}
// Try job.result: object with success/error or bare string treated as success
if let Some(obj) = res.as_object() {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
let _ = service
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service
.update_message_status(
context_id,
caller_id,
message_id,
MessageStatus::Processed,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Stored supervisor job.result for job {} (success)",
job_id
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
continue;
}
if let Some(s) = obj.get("error").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("error".to_string(), s.to_string());
let _ = service
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service
.update_message_status(
context_id,
caller_id,
message_id,
MessageStatus::Processed,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Stored supervisor job.result for job {} (error)",
job_id
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
continue;
}
} else if let Some(s) = res.as_str() {
// Bare string => treat as success
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
let _ = service
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service
.update_message_status(
context_id,
caller_id,
message_id,
MessageStatus::Processed,
)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![format!(
"Stored supervisor job.result for job {} (success)",
job_id
)],
)
.await;
let _ = service.supcorr_del(inner_id).await;
continue;
}
}
// Unknown/unsupported supervisor reply; keep correlation for later
let _ = service
.append_message_logs(
context_id,
caller_id,
message_id,
vec![
"Supervisor reply did not contain recognizable job.run/status/result fields"
.to_string(),
],
)
.await;
}
Ok(None) => {
// No correlation found; ignore or log once
}
Err(e) => {
error!(error=%e, "supcorr_get error");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
Ok(None) => {
// No message; continue polling
continue;
}
Err(e) => {
error!(error=%e, "popMessage error");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
})
}