Fetch job results if a job is finished
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
This commit is contained in:
153
src/router.rs
153
src/router.rs
@@ -5,7 +5,7 @@ use tokio::sync::Semaphore;
|
||||
|
||||
use crate::{
|
||||
clients::{Destination, SupervisorClient, MyceliumClient},
|
||||
models::{Job, Message, MessageStatus, ScriptType, TransportStatus},
|
||||
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
|
||||
service::AppService,
|
||||
};
|
||||
|
||||
@@ -110,6 +110,8 @@ async fn deliver_one(
|
||||
|
||||
// Load message
|
||||
let msg: Message = service.load_message(context_id, caller_id, id).await?;
|
||||
// Embedded job id (if any)
|
||||
let job_id_opt: Option<u32> = msg.job.first().map(|j| j.id);
|
||||
|
||||
// Determine routing script_type
|
||||
let desired: ScriptType = determine_script_type(&msg);
|
||||
@@ -136,9 +138,12 @@ async fn deliver_one(
|
||||
} else {
|
||||
Destination::Ip(runner.address)
|
||||
};
|
||||
// Keep clones for poller usage
|
||||
let dest_for_poller = dest.clone();
|
||||
let topic_for_poller = cfg.topic.clone();
|
||||
let client = SupervisorClient::new_with_client(
|
||||
mycelium.clone(),
|
||||
dest,
|
||||
dest.clone(),
|
||||
cfg.topic.clone(),
|
||||
None, // secret
|
||||
);
|
||||
@@ -173,11 +178,22 @@ async fn deliver_one(
|
||||
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
|
||||
let out_id_cloned = out_id.clone();
|
||||
let mycelium = mycelium.clone();
|
||||
// Determine reply timeout for supervisor job.result: prefer message.timeout_result, fallback to router config timeout
|
||||
let job_result_reply_timeout: u64 = if msg.timeout_result > 0 {
|
||||
msg.timeout_result as u64
|
||||
} else {
|
||||
cfg.transport_poll_timeout_secs
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
let start = std::time::Instant::now();
|
||||
let client = mycelium;
|
||||
|
||||
// Supervisor call context captured for sync status checks
|
||||
let sup_dest = dest_for_poller;
|
||||
let sup_topic = topic_for_poller;
|
||||
let job_id_opt = job_id_opt;
|
||||
|
||||
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
|
||||
|
||||
loop {
|
||||
@@ -211,6 +227,128 @@ async fn deliver_one(
|
||||
|
||||
// Stop on terminal states
|
||||
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
|
||||
// On Read, fetch supervisor job.status and update local job/message if terminal
|
||||
if matches!(s, TransportStatus::Read) {
|
||||
if let Some(job_id) = job_id_opt {
|
||||
let sup = SupervisorClient::new_with_client(
|
||||
client.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
None,
|
||||
);
|
||||
match sup.job_status_sync(job_id.to_string(), 10).await {
|
||||
Ok(remote_status) => {
|
||||
if let Some((mapped, terminal)) =
|
||||
map_supervisor_job_status(&remote_status)
|
||||
{
|
||||
if terminal {
|
||||
let _ = service_poll
|
||||
.update_job_status_unchecked(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
mapped.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// After terminal status, fetch supervisor job.result and store into Job.result
|
||||
let sup = SupervisorClient::new_with_client(
|
||||
client.clone(),
|
||||
sup_dest.clone(),
|
||||
sup_topic.clone(),
|
||||
None,
|
||||
);
|
||||
match sup.job_result_sync(job_id.to_string(), job_result_reply_timeout).await {
|
||||
Ok(result_map) => {
|
||||
// Persist the result into the Job.result map (merge)
|
||||
let _ = service_poll
|
||||
.update_job_result_merge_unchecked(
|
||||
context_id,
|
||||
caller_id,
|
||||
job_id,
|
||||
result_map.clone(),
|
||||
)
|
||||
.await;
|
||||
// Log which key was stored (success or error)
|
||||
let key = result_map.keys().next().cloned().unwrap_or_else(|| "unknown".to_string());
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Stored supervisor job.result for job {} ({})",
|
||||
job_id, key
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"job.result fetch error for job {}: {}",
|
||||
job_id, e
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
// Mark message as processed
|
||||
let _ = service_poll
|
||||
.update_message_status(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
MessageStatus::Processed,
|
||||
)
|
||||
.await;
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Supervisor job.status for job {} -> {} (mapped to {:?})",
|
||||
job_id, remote_status, mapped
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
} else {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"Unknown supervisor status '{}' for job {}",
|
||||
remote_status, job_id
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = service_poll
|
||||
.append_message_logs(
|
||||
context_id,
|
||||
caller_id,
|
||||
id,
|
||||
vec![format!(
|
||||
"job.status sync error: {}",
|
||||
e
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
if matches!(s, TransportStatus::Failed) {
|
||||
@@ -287,6 +425,17 @@ fn parse_message_key(s: &str) -> Option<(u32, u32)> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Map supervisor job.status -> (local JobStatus, terminal)
|
||||
fn map_supervisor_job_status(s: &str) -> Option<(JobStatus, bool)> {
|
||||
match s {
|
||||
"created" | "queued" => Some((JobStatus::Dispatched, false)),
|
||||
"running" => Some((JobStatus::Started, false)),
|
||||
"completed" => Some((JobStatus::Finished, true)),
|
||||
"failed" | "timeout" => Some((JobStatus::Error, true)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Auto-discover contexts periodically and ensure a router loop exists for each.
|
||||
/// Returns a JoinHandle of the discovery task (router loops are detached).
|
||||
|
Reference in New Issue
Block a user