Compare commits
18 Commits
de6c799635
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
688c42493e
|
||
|
a75fb9c55d
|
||
|
5ed9739d68
|
||
|
3cd1a55768
|
||
|
c860553acd
|
||
|
78a776877a
|
||
|
8cea17f4ec
|
||
|
66c89d2485
|
||
|
512c99db54
|
||
|
25f35ea8fc
|
||
|
fb34b4e2f3
|
||
|
2c88114d45
|
||
|
8de2597f19
|
||
|
3220f52956
|
||
|
97bcb55aaa
|
||
|
c38937f1cb
|
||
|
059d5131e7
|
||
|
c6077623b0
|
26
README.md
26
README.md
@@ -1,2 +1,28 @@
|
|||||||
# herocoordinator
|
# herocoordinator
|
||||||
|
|
||||||
|
## Demo setup
|
||||||
|
|
||||||
|
A python script is provided in the [scripts directory](./scripts/supervisor_flow_demo.py). This script
|
||||||
|
generates some demo jobs to be run by [a supervisor](https://git.ourworld.tf/herocode/supervisor).
|
||||||
|
Communication happens over [mycelium](https://github.com/threefoldtech/mycelium). To run the demo a
|
||||||
|
supervisor must be running, which uses a mycelium instance to read and write messages. A __different__
|
||||||
|
mycelium instance needs to run for the coordinator (the supervisor can run on a different host than
|
||||||
|
the coordinator, so long as the 2 mycelium instances used can reach eachother).
|
||||||
|
|
||||||
|
An example of a local setup:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run a redis docker
|
||||||
|
docker run -it -d -p 6379:6379 --name redis redis
|
||||||
|
# Spawn mycelium node 1 with default settings. This also creates a TUN interface though that is not
|
||||||
|
# necessary for the messages
|
||||||
|
mycelium
|
||||||
|
# Spawn mycelium node 2, connect to the first node
|
||||||
|
mycelium --key-file key.bin --peers tcp://127.0.0.1:9651 --disable-quic --disable-peer-discovery --api-addr 127.0.0.1:9989 --jsonrpc-addr 127.0.0.1:9990 --no-tun -t 8651
|
||||||
|
# Start the supervisor
|
||||||
|
supervisor --admin-secret admin123 --user-secret user123 --register-secret register123 --mycelium-url http://127.0.0.1:9990 --topic supervisor.rpc
|
||||||
|
# Start the coordinator
|
||||||
|
cargo run # (alternatively if a compiled binary is present that can be run)
|
||||||
|
# Finally, invoke the demo script
|
||||||
|
python3 scripts/supervisor_flow_demo.py
|
||||||
|
```
|
||||||
|
@@ -3,6 +3,7 @@
|
|||||||
Supervisor flow demo for HeroCoordinator.
|
Supervisor flow demo for HeroCoordinator.
|
||||||
|
|
||||||
This script:
|
This script:
|
||||||
|
- Optionally pre-registers and starts a Python runner on the target Supervisor over Mycelium using an admin secret (--admin-secret). If the flag is not set, this step is skipped.
|
||||||
- Creates an actor
|
- Creates an actor
|
||||||
- Creates a context granting the actor admin/reader/executor privileges
|
- Creates a context granting the actor admin/reader/executor privileges
|
||||||
- Registers a Runner in the context targeting a Supervisor reachable via Mycelium (by public key or IP)
|
- Registers a Runner in the context targeting a Supervisor reachable via Mycelium (by public key or IP)
|
||||||
@@ -20,10 +21,13 @@ Notes:
|
|||||||
- Exactly one of --dst-ip or --dst-pk must be provided.
|
- Exactly one of --dst-ip or --dst-pk must be provided.
|
||||||
- Runner.topic defaults to "supervisor.rpc" (see main.rs).
|
- Runner.topic defaults to "supervisor.rpc" (see main.rs).
|
||||||
- The router auto-discovers contexts and will deliver job.run messages to the supervisor.
|
- The router auto-discovers contexts and will deliver job.run messages to the supervisor.
|
||||||
|
- Mycelium URL is read from MYCELIUM_URL (default http://127.0.0.1:8990).
|
||||||
|
- supervisor.register_runner uses static name="python" and queue="python".
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
|
import base64
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@@ -36,6 +40,9 @@ JSONRPC_VERSION = "2.0"
|
|||||||
def env_url() -> str:
|
def env_url() -> str:
|
||||||
return os.getenv("COORDINATOR_URL", "http://127.0.0.1:9652").rstrip("/")
|
return os.getenv("COORDINATOR_URL", "http://127.0.0.1:9652").rstrip("/")
|
||||||
|
|
||||||
|
def env_mycelium_url() -> str:
|
||||||
|
return os.getenv("MYCELIUM_URL", "http://127.0.0.1:8990").rstrip("/")
|
||||||
|
|
||||||
|
|
||||||
class JsonRpcClient:
|
class JsonRpcClient:
|
||||||
def __init__(self, url: str):
|
def __init__(self, url: str):
|
||||||
@@ -87,6 +94,108 @@ def print_header(title: str):
|
|||||||
def pretty(obj: Any):
|
def pretty(obj: Any):
|
||||||
print(json.dumps(obj, indent=2, sort_keys=True))
|
print(json.dumps(obj, indent=2, sort_keys=True))
|
||||||
|
|
||||||
|
def mycelium_register_runner(
|
||||||
|
myc: "JsonRpcClient",
|
||||||
|
dst_pk: Optional[str],
|
||||||
|
dst_ip: Optional[str],
|
||||||
|
topic: str,
|
||||||
|
admin_secret: str,
|
||||||
|
name: str = "python",
|
||||||
|
queue: str = "python",
|
||||||
|
timeout: int = 15,
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Send supervisor.register_runner over Mycelium using pushMessage and wait for the reply.
|
||||||
|
- myc: JsonRpcClient for the Mycelium API (MYCELIUM_URL)
|
||||||
|
- dst_pk/dst_ip: destination on the overlay; one of them must be provided
|
||||||
|
- topic: message topic (defaults to supervisor.rpc from args)
|
||||||
|
- admin_secret: supervisor admin secret to authorize the registration
|
||||||
|
- name/queue: static identifiers for the python runner on the supervisor
|
||||||
|
- timeout: seconds to wait for a reply
|
||||||
|
Returns the JSON-RPC 'result' from the supervisor or raises on error/timeout.
|
||||||
|
"""
|
||||||
|
envelope = {
|
||||||
|
"jsonrpc": JSONRPC_VERSION,
|
||||||
|
"id": 1,
|
||||||
|
"method": "register_runner",
|
||||||
|
"params": [{"secret": admin_secret, "name": name, "queue": queue}],
|
||||||
|
}
|
||||||
|
payload_b64 = base64.b64encode(json.dumps(envelope).encode("utf-8")).decode("ascii")
|
||||||
|
topic_b64 = base64.b64encode(topic.encode("utf-8")).decode("ascii")
|
||||||
|
|
||||||
|
if dst_pk:
|
||||||
|
dst = {"pk": dst_pk}
|
||||||
|
elif dst_ip:
|
||||||
|
dst = {"ip": dst_ip}
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Either dst_pk or dst_ip must be provided for Mycelium destination")
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"message": {"dst": dst, "topic": topic_b64, "payload": payload_b64},
|
||||||
|
}
|
||||||
|
resp = myc.call("pushMessage", params)
|
||||||
|
time.sleep(15)
|
||||||
|
|
||||||
|
# Expect an InboundMessage with a payload if a reply was received
|
||||||
|
# if isinstance(resp, dict) and "payload" in resp:
|
||||||
|
# try:
|
||||||
|
# reply = json.loads(base64.b64decode(resp["payload"]).decode("utf-8"))
|
||||||
|
# except Exception as e:
|
||||||
|
# raise RuntimeError(f"Invalid supervisor reply payload: {e}")
|
||||||
|
# if isinstance(reply, dict) and reply.get("error"):
|
||||||
|
# raise RuntimeError(f"Supervisor register_runner error: {json.dumps(reply['error'])}")
|
||||||
|
# return reply.get("result")
|
||||||
|
#
|
||||||
|
# raise RuntimeError("No reply received from supervisor for register_runner (timeout)")
|
||||||
|
|
||||||
|
|
||||||
|
def mycelium_start_runner(
|
||||||
|
myc: "JsonRpcClient",
|
||||||
|
dst_pk: Optional[str],
|
||||||
|
dst_ip: Optional[str],
|
||||||
|
topic: str,
|
||||||
|
secret: str,
|
||||||
|
actor_id: str = "python",
|
||||||
|
timeout: int = 15,
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Send supervisor.start_runner over Mycelium using pushMessage and wait for the reply.
|
||||||
|
- actor_id is set to the static name "python" by default to start the registered python runner.
|
||||||
|
Returns the JSON-RPC 'result' or raises on error/timeout.
|
||||||
|
"""
|
||||||
|
envelope = {
|
||||||
|
"jsonrpc": JSONRPC_VERSION,
|
||||||
|
"id": 1,
|
||||||
|
"method": "start_runner",
|
||||||
|
"params": [actor_id],
|
||||||
|
}
|
||||||
|
payload_b64 = base64.b64encode(json.dumps(envelope).encode("utf-8")).decode("ascii")
|
||||||
|
topic_b64 = base64.b64encode(topic.encode("utf-8")).decode("ascii")
|
||||||
|
|
||||||
|
if dst_pk:
|
||||||
|
dst = {"pk": dst_pk}
|
||||||
|
elif dst_ip:
|
||||||
|
dst = {"ip": dst_ip}
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Either dst_pk or dst_ip must be provided for Mycelium destination")
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"message": {"dst": dst, "topic": topic_b64, "payload": payload_b64},
|
||||||
|
}
|
||||||
|
resp = myc.call("pushMessage", params)
|
||||||
|
|
||||||
|
time.sleep(15)
|
||||||
|
# if isinstance(resp, dict) and "payload" in resp:
|
||||||
|
# try:
|
||||||
|
# reply = json.loads(base64.b64decode(resp["payload"]).decode("utf-8"))
|
||||||
|
# except Exception as e:
|
||||||
|
# raise RuntimeError(f"Invalid supervisor reply payload (start_runner): {e}")
|
||||||
|
# if isinstance(reply, dict) and reply.get("error"):
|
||||||
|
# raise RuntimeError(f"Supervisor start_runner error: {json.dumps(reply['error'])}")
|
||||||
|
# return reply.get("result")
|
||||||
|
#
|
||||||
|
# raise RuntimeError("No reply received from supervisor for start_runner (timeout)")
|
||||||
|
|
||||||
|
|
||||||
def try_create_or_load(client: JsonRpcClient, create_method: str, create_params: Dict[str, Any],
|
def try_create_or_load(client: JsonRpcClient, create_method: str, create_params: Dict[str, Any],
|
||||||
load_method: str, load_params: Dict[str, Any]) -> Any:
|
load_method: str, load_params: Dict[str, Any]) -> Any:
|
||||||
@@ -124,6 +233,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
)
|
)
|
||||||
p.add_argument("--topic", default="supervisor.rpc", help="Supervisor topic. Default: supervisor.rpc")
|
p.add_argument("--topic", default="supervisor.rpc", help="Supervisor topic. Default: supervisor.rpc")
|
||||||
p.add_argument("--secret", help="Optional supervisor secret used for authenticated supervisor calls")
|
p.add_argument("--secret", help="Optional supervisor secret used for authenticated supervisor calls")
|
||||||
|
p.add_argument("--admin-secret", help="Supervisor admin secret to pre-register a Python runner over Mycelium. If omitted, pre-registration is skipped.")
|
||||||
p.add_argument("--poll-interval", type=float, default=2.0, help="Flow poll interval seconds. Default: 2.0")
|
p.add_argument("--poll-interval", type=float, default=2.0, help="Flow poll interval seconds. Default: 2.0")
|
||||||
p.add_argument("--poll-timeout", type=int, default=600, help="Max seconds to wait for flow completion. Default: 600")
|
p.add_argument("--poll-timeout", type=int, default=600, help="Max seconds to wait for flow completion. Default: 600")
|
||||||
return p.parse_args()
|
return p.parse_args()
|
||||||
@@ -138,6 +248,9 @@ def main():
|
|||||||
url = env_url()
|
url = env_url()
|
||||||
client = JsonRpcClient(url)
|
client = JsonRpcClient(url)
|
||||||
|
|
||||||
|
mycelium_url = env_mycelium_url()
|
||||||
|
mycelium_client = JsonRpcClient(mycelium_url) if getattr(args, "admin_secret", None) else None
|
||||||
|
|
||||||
actor_id = int(args.actor_id)
|
actor_id = int(args.actor_id)
|
||||||
context_id = int(args.context_id)
|
context_id = int(args.context_id)
|
||||||
runner_id = int(args.runner_id)
|
runner_id = int(args.runner_id)
|
||||||
@@ -189,6 +302,41 @@ def main():
|
|||||||
runner_pubkey = args.dst_pk if args.dst_pk else ""
|
runner_pubkey = args.dst_pk if args.dst_pk else ""
|
||||||
runner_address = args.dst_ip if args.dst_ip else "127.0.0.1"
|
runner_address = args.dst_ip if args.dst_ip else "127.0.0.1"
|
||||||
|
|
||||||
|
# Optional: pre-register a Python runner on the Supervisor over Mycelium using an admin secret
|
||||||
|
if getattr(args, "admin_secret", None):
|
||||||
|
print_header("supervisor.register_runner (pre-register via Mycelium)")
|
||||||
|
try:
|
||||||
|
mycelium_result = mycelium_register_runner(
|
||||||
|
mycelium_client,
|
||||||
|
args.dst_pk if args.dst_pk else None,
|
||||||
|
args.dst_ip if args.dst_ip else None,
|
||||||
|
topic,
|
||||||
|
args.admin_secret,
|
||||||
|
name="Python",
|
||||||
|
queue="Python",
|
||||||
|
timeout=15,
|
||||||
|
)
|
||||||
|
print("Supervisor register_runner ->", mycelium_result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: Supervisor pre-registration failed: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print_header("supervisor.start_runner (start via Mycelium)")
|
||||||
|
try:
|
||||||
|
mycelium_result = mycelium_start_runner(
|
||||||
|
mycelium_client,
|
||||||
|
args.dst_pk if args.dst_pk else None,
|
||||||
|
args.dst_ip if args.dst_ip else None,
|
||||||
|
topic,
|
||||||
|
args.admin_secret,
|
||||||
|
actor_id="Python",
|
||||||
|
timeout=15,
|
||||||
|
)
|
||||||
|
print("Supervisor start_runner ->", mycelium_result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: Supervisor start failed: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
print_header("runner.create (or load)")
|
print_header("runner.create (or load)")
|
||||||
# runner.load requires both context_id and id
|
# runner.load requires both context_id and id
|
||||||
try:
|
try:
|
||||||
|
@@ -1,7 +1,9 @@
|
|||||||
pub mod mycelium_client;
|
pub mod mycelium_client;
|
||||||
pub mod supervisor_client;
|
pub mod supervisor_client;
|
||||||
|
pub mod supervisor_hub;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use mycelium_client::{MyceliumClient, MyceliumClientError};
|
pub use mycelium_client::{MyceliumClient, MyceliumClientError};
|
||||||
pub use supervisor_client::{SupervisorClient, SupervisorClientError};
|
pub use supervisor_client::{SupervisorClient, SupervisorClientError};
|
||||||
|
pub use supervisor_hub::SupervisorHub;
|
||||||
pub use types::Destination;
|
pub use types::Destination;
|
||||||
|
@@ -3,6 +3,8 @@ use std::sync::atomic::{AtomicU64, Ordering};
|
|||||||
|
|
||||||
use reqwest::Client as HttpClient;
|
use reqwest::Client as HttpClient;
|
||||||
|
|
||||||
|
use base64::Engine;
|
||||||
|
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
@@ -53,6 +55,8 @@ impl MyceliumClient {
|
|||||||
"method": method,
|
"method": method,
|
||||||
"params": [ params ]
|
"params": [ params ]
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tracing::info!(%req, "jsonrpc");
|
||||||
let resp = self.http.post(&self.base_url).json(&req).send().await?;
|
let resp = self.http.post(&self.base_url).json(&req).send().await?;
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
let body: Value = resp.json().await?;
|
let body: Value = resp.json().await?;
|
||||||
@@ -82,13 +86,13 @@ impl MyceliumClient {
|
|||||||
&self,
|
&self,
|
||||||
id_hex: &str,
|
id_hex: &str,
|
||||||
) -> Result<TransportStatus, MyceliumClientError> {
|
) -> Result<TransportStatus, MyceliumClientError> {
|
||||||
let params = json!({ "id": id_hex });
|
let params = json!(id_hex);
|
||||||
let body = self.jsonrpc("messageStatus", params).await?;
|
let body = self.jsonrpc("getMessageInfo", params).await?;
|
||||||
let result = body.get("result").ok_or_else(|| {
|
let result = body.get("result").ok_or_else(|| {
|
||||||
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
||||||
})?;
|
})?;
|
||||||
// Accept both { status: "..."} and bare "..."
|
// Accept both { state: "..."} and bare "..."
|
||||||
let status_str = if let Some(s) = result.get("status").and_then(|v| v.as_str()) {
|
let status_str = if let Some(s) = result.get("state").and_then(|v| v.as_str()) {
|
||||||
s.to_string()
|
s.to_string()
|
||||||
} else if let Some(s) = result.as_str() {
|
} else if let Some(s) = result.as_str() {
|
||||||
s.to_string()
|
s.to_string()
|
||||||
@@ -97,18 +101,19 @@ impl MyceliumClient {
|
|||||||
"unexpected result shape: {result}"
|
"unexpected result shape: {result}"
|
||||||
)));
|
)));
|
||||||
};
|
};
|
||||||
Self::map_status(&status_str).ok_or_else(|| {
|
let status = Self::map_status(&status_str).ok_or_else(|| {
|
||||||
MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}"))
|
MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}"))
|
||||||
})
|
});
|
||||||
|
tracing::info!(%id_hex, status = %status.as_ref().unwrap(), "queried messages status");
|
||||||
|
status
|
||||||
}
|
}
|
||||||
|
|
||||||
fn map_status(s: &str) -> Option<TransportStatus> {
|
fn map_status(s: &str) -> Option<TransportStatus> {
|
||||||
match s {
|
match s {
|
||||||
"queued" => Some(TransportStatus::Queued),
|
"pending" => Some(TransportStatus::Queued),
|
||||||
"sent" => Some(TransportStatus::Sent),
|
"received" => Some(TransportStatus::Delivered),
|
||||||
"delivered" => Some(TransportStatus::Delivered),
|
|
||||||
"read" => Some(TransportStatus::Read),
|
"read" => Some(TransportStatus::Read),
|
||||||
"failed" => Some(TransportStatus::Failed),
|
"aborted" => Some(TransportStatus::Failed),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,6 +164,83 @@ impl MyceliumClient {
|
|||||||
.and_then(|v| v.as_str())
|
.and_then(|v| v.as_str())
|
||||||
.map(|s| s.to_string())
|
.map(|s| s.to_string())
|
||||||
}
|
}
|
||||||
|
/// popMessage: retrieve an inbound message if available (optionally filtered by topic).
|
||||||
|
/// - peek: if true, do not remove the message from the queue
|
||||||
|
/// - timeout_secs: seconds to wait for a message (0 returns immediately)
|
||||||
|
/// - topic_plain: optional plain-text topic which will be base64-encoded per Mycelium spec
|
||||||
|
/// Returns:
|
||||||
|
/// - Ok(Some(result_json)) on success, where result_json matches InboundMessage schema
|
||||||
|
/// - Ok(None) when there is no message ready (Mycelium returns error code 204)
|
||||||
|
pub async fn pop_message(
|
||||||
|
&self,
|
||||||
|
peek: Option<bool>,
|
||||||
|
timeout_secs: Option<u64>,
|
||||||
|
topic_plain: Option<&str>,
|
||||||
|
) -> Result<Option<Value>, MyceliumClientError> {
|
||||||
|
// Build params array
|
||||||
|
let mut params_array = vec![];
|
||||||
|
if let Some(p) = peek {
|
||||||
|
params_array.push(serde_json::Value::Bool(p));
|
||||||
|
} else {
|
||||||
|
params_array.push(serde_json::Value::Null)
|
||||||
|
}
|
||||||
|
if let Some(t) = timeout_secs {
|
||||||
|
params_array.push(serde_json::Value::Number(t.into()));
|
||||||
|
} else {
|
||||||
|
params_array.push(serde_json::Value::Null)
|
||||||
|
}
|
||||||
|
if let Some(tp) = topic_plain {
|
||||||
|
let topic_b64 = BASE64_STANDARD.encode(tp.as_bytes());
|
||||||
|
params_array.push(serde_json::Value::String(topic_b64));
|
||||||
|
} else {
|
||||||
|
params_array.push(serde_json::Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
let req = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": self.next_id(),
|
||||||
|
"method": "popMessage",
|
||||||
|
"params": serde_json::Value::Array(params_array),
|
||||||
|
});
|
||||||
|
|
||||||
|
tracing::info!(%req, "calling popMessage");
|
||||||
|
|
||||||
|
let resp = self.http.post(&self.base_url).json(&req).send().await?;
|
||||||
|
let status = resp.status();
|
||||||
|
let body: Value = resp.json().await?;
|
||||||
|
|
||||||
|
// Handle JSON-RPC error envelope specially for code 204 (no message ready)
|
||||||
|
if let Some(err) = body.get("error") {
|
||||||
|
let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||||
|
let msg = err
|
||||||
|
.get("message")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("unknown error");
|
||||||
|
|
||||||
|
if code == 204 {
|
||||||
|
// No message ready
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
if code == 408 {
|
||||||
|
// Align with other transport timeout mapping
|
||||||
|
return Err(MyceliumClientError::TransportTimeout);
|
||||||
|
}
|
||||||
|
return Err(MyceliumClientError::RpcError(format!(
|
||||||
|
"code={code} msg={msg}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !status.is_success() {
|
||||||
|
return Err(MyceliumClientError::RpcError(format!(
|
||||||
|
"HTTP {status}, body {body}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = body.get("result").ok_or_else(|| {
|
||||||
|
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
|
||||||
|
})?;
|
||||||
|
Ok(Some(result.clone()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@@ -1,20 +1,20 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use base64::Engine;
|
use base64::Engine;
|
||||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
use crate::clients::{Destination, MyceliumClient, MyceliumClientError};
|
use crate::clients::{Destination, MyceliumClient, MyceliumClientError, SupervisorHub};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SupervisorClient {
|
pub struct SupervisorClient {
|
||||||
mycelium: Arc<MyceliumClient>, // Delegated Mycelium transport
|
hub: Arc<SupervisorHub>, // Global hub with background pop loop and shared id generator
|
||||||
destination: Destination, // ip or pk
|
destination: Destination, // ip or pk
|
||||||
topic: String, // e.g. "supervisor.rpc"
|
secret: Option<String>, // optional, required by several supervisor methods
|
||||||
secret: Option<String>, // optional, required by several supervisor methods
|
|
||||||
id_counter: Arc<AtomicU64>, // JSON-RPC id generator (for inner supervisor requests)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
@@ -46,24 +46,22 @@ impl From<MyceliumClientError> for SupervisorClientError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SupervisorClient {
|
impl SupervisorClient {
|
||||||
/// Preferred constructor: provide a shared Mycelium client.
|
/// Preferred constructor using a shared SupervisorHub (single global listener).
|
||||||
pub fn new_with_client(
|
pub fn new_with_hub(
|
||||||
mycelium: Arc<MyceliumClient>,
|
hub: Arc<SupervisorHub>,
|
||||||
destination: Destination,
|
destination: Destination,
|
||||||
topic: impl Into<String>,
|
|
||||||
secret: Option<String>,
|
secret: Option<String>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
mycelium,
|
hub,
|
||||||
destination,
|
destination,
|
||||||
topic: topic.into(),
|
|
||||||
secret,
|
secret,
|
||||||
id_counter: Arc::new(AtomicU64::new(1)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Backward-compatible constructor that builds a Mycelium client from base_url.
|
/// Backward-compatible constructor that builds a new Hub from base_url/topic.
|
||||||
/// base_url defaults to Mycelium spec "http://127.0.0.1:8990" if empty.
|
/// NOTE: This spawns a background popMessage listener for the given topic.
|
||||||
|
/// Prefer `new_with_hub` so the process has a single global hub.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
base_url: impl Into<String>,
|
base_url: impl Into<String>,
|
||||||
destination: Destination,
|
destination: Destination,
|
||||||
@@ -78,8 +76,16 @@ impl SupervisorClient {
|
|||||||
Ok(Self::new_with_client(mycelium, destination, topic, secret))
|
Ok(Self::new_with_client(mycelium, destination, topic, secret))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_id(&self) -> u64 {
|
/// Backward-compatible constructor that reuses an existing Mycelium client.
|
||||||
self.id_counter.fetch_add(1, Ordering::Relaxed)
|
/// NOTE: This creates a new hub and its own background listener. Prefer `new_with_hub`.
|
||||||
|
pub fn new_with_client(
|
||||||
|
mycelium: Arc<MyceliumClient>,
|
||||||
|
destination: Destination,
|
||||||
|
topic: impl Into<String>,
|
||||||
|
secret: Option<String>,
|
||||||
|
) -> Self {
|
||||||
|
let hub = SupervisorHub::new_with_client(mycelium, topic);
|
||||||
|
Self::new_with_hub(hub, destination, secret)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Internal helper used by tests to inspect dst JSON shape.
|
/// Internal helper used by tests to inspect dst JSON shape.
|
||||||
@@ -93,7 +99,17 @@ impl SupervisorClient {
|
|||||||
fn build_supervisor_payload(&self, method: &str, params: Value) -> Value {
|
fn build_supervisor_payload(&self, method: &str, params: Value) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"id": self.next_id(),
|
"id": self.hub.next_id(),
|
||||||
|
"method": method,
|
||||||
|
"params": params,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a supervisor JSON-RPC payload but force a specific id (used for correlation).
|
||||||
|
fn build_supervisor_payload_with_id(&self, method: &str, params: Value, id: u64) -> Value {
|
||||||
|
json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": id,
|
||||||
"method": method,
|
"method": method,
|
||||||
"params": params,
|
"params": params,
|
||||||
})
|
})
|
||||||
@@ -118,96 +134,6 @@ impl SupervisorClient {
|
|||||||
.map(|s| s.to_string())
|
.map(|s| s.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generic call: build supervisor JSON-RPC message, send via Mycelium pushMessage, return outbound message id (hex).
|
|
||||||
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
|
|
||||||
let inner = self.build_supervisor_payload(method, params);
|
|
||||||
let payload_b64 = Self::encode_payload(&inner)?;
|
|
||||||
let result = self
|
|
||||||
.mycelium
|
|
||||||
.push_message(
|
|
||||||
&self.destination,
|
|
||||||
&Self::encode_topic(self.topic.as_bytes()),
|
|
||||||
&payload_b64,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
|
|
||||||
return Ok(id);
|
|
||||||
}
|
|
||||||
// Some servers might return the oneOf wrapped, handle len==1 array defensively (not in spec but resilient)
|
|
||||||
if let Some(arr) = result.as_array()
|
|
||||||
&& arr.len() == 1
|
|
||||||
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
|
|
||||||
{
|
|
||||||
return Ok(id);
|
|
||||||
}
|
|
||||||
Err(SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"result did not contain message id: {result}"
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Synchronous variant: wait for a JSON-RPC reply via Mycelium reply_timeout, and return the inner JSON-RPC "result".
|
|
||||||
/// If the supervisor returns an error object, map to RpcError.
|
|
||||||
pub async fn call_sync(
|
|
||||||
&self,
|
|
||||||
method: &str,
|
|
||||||
params: Value,
|
|
||||||
reply_timeout_secs: u64,
|
|
||||||
) -> Result<Value, SupervisorClientError> {
|
|
||||||
let inner = self.build_supervisor_payload(method, params);
|
|
||||||
let payload_b64 = Self::encode_payload(&inner)?;
|
|
||||||
|
|
||||||
let result = self
|
|
||||||
.mycelium
|
|
||||||
.push_message(
|
|
||||||
&self.destination,
|
|
||||||
&self.topic,
|
|
||||||
&payload_b64,
|
|
||||||
Some(reply_timeout_secs),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Expect an InboundMessage-like with a base64 payload containing the supervisor JSON-RPC response
|
|
||||||
let payload_field = if let Some(p) = result.get("payload").and_then(|v| v.as_str()) {
|
|
||||||
p.to_string()
|
|
||||||
} else if let Some(arr) = result.as_array() {
|
|
||||||
// Defensive: handle single-element array shape
|
|
||||||
if let Some(one) = arr.get(0) {
|
|
||||||
one.get("payload")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
.ok_or_else(|| {
|
|
||||||
SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"missing payload in result: {result}"
|
|
||||||
))
|
|
||||||
})?
|
|
||||||
} else {
|
|
||||||
return Err(SupervisorClientError::TransportTimeout);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No payload => no reply received within timeout (Mycelium would have returned just an id)
|
|
||||||
return Err(SupervisorClientError::TransportTimeout);
|
|
||||||
};
|
|
||||||
|
|
||||||
let raw = BASE64_STANDARD
|
|
||||||
.decode(payload_field.as_bytes())
|
|
||||||
.map_err(|e| {
|
|
||||||
SupervisorClientError::InvalidResponse(format!("invalid base64 payload: {e}"))
|
|
||||||
})?;
|
|
||||||
let rpc_resp: Value = serde_json::from_slice(&raw)?;
|
|
||||||
|
|
||||||
if let Some(err) = rpc_resp.get("error") {
|
|
||||||
return Err(SupervisorClientError::RpcError(err.to_string()));
|
|
||||||
}
|
|
||||||
let res = rpc_resp.get("result").ok_or_else(|| {
|
|
||||||
SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"missing result in supervisor reply: {rpc_resp}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
Ok(res.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn need_secret(&self) -> Result<&str, SupervisorClientError> {
|
fn need_secret(&self) -> Result<&str, SupervisorClientError> {
|
||||||
self.secret
|
self.secret
|
||||||
.as_deref()
|
.as_deref()
|
||||||
@@ -215,13 +141,247 @@ impl SupervisorClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// -----------------------------
|
// -----------------------------
|
||||||
// Typed wrappers for Supervisor API
|
// Core: request-reply call via Hub with default 10s timeout
|
||||||
// Asynchronous-only: returns outbound message id
|
// -----------------------------
|
||||||
|
|
||||||
|
/// Send a supervisor JSON-RPC request and await its reply via the Hub.
|
||||||
|
/// Returns (outbound_message_id, reply_envelope_json).
|
||||||
|
pub async fn call_with_reply_timeout(
|
||||||
|
&self,
|
||||||
|
method: &str,
|
||||||
|
params: Value,
|
||||||
|
timeout_secs: u64,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let inner_id = self.hub.next_id();
|
||||||
|
// Register waiter before sending to avoid race
|
||||||
|
let rx = self.hub.register_waiter(inner_id).await;
|
||||||
|
|
||||||
|
let inner = self.build_supervisor_payload_with_id(method, params, inner_id);
|
||||||
|
let payload_b64 = Self::encode_payload(&inner)?;
|
||||||
|
|
||||||
|
let result = self
|
||||||
|
.hub
|
||||||
|
.mycelium()
|
||||||
|
.push_message(
|
||||||
|
&self.destination,
|
||||||
|
&Self::encode_topic(self.hub.topic().as_bytes()),
|
||||||
|
&payload_b64,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let out_id = if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
|
||||||
|
id
|
||||||
|
} else if let Some(arr) = result.as_array()
|
||||||
|
&& arr.len() == 1
|
||||||
|
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
|
||||||
|
{
|
||||||
|
id
|
||||||
|
} else {
|
||||||
|
// Clean pending entry to avoid leak
|
||||||
|
let _ = self.hub.remove_waiter(inner_id).await;
|
||||||
|
return Err(SupervisorClientError::InvalidResponse(format!(
|
||||||
|
"result did not contain message id: {result}"
|
||||||
|
)));
|
||||||
|
};
|
||||||
|
|
||||||
|
let d = Duration::from_secs(timeout_secs);
|
||||||
|
match timeout(d, rx).await {
|
||||||
|
Ok(Ok(reply)) => Ok((out_id, reply)),
|
||||||
|
Ok(Err(_canceled)) => Err(SupervisorClientError::InvalidResponse(
|
||||||
|
"oneshot canceled before receiving reply".into(),
|
||||||
|
)),
|
||||||
|
Err(_elapsed) => {
|
||||||
|
// Cleanup on timeout
|
||||||
|
let _ = self.hub.remove_waiter(inner_id).await;
|
||||||
|
Err(SupervisorClientError::TransportTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send and await with default 10s timeout.
|
||||||
|
pub async fn call_with_reply(
|
||||||
|
&self,
|
||||||
|
method: &str,
|
||||||
|
params: Value,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply_timeout(method, params, 60).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Back-compat: Send and await a reply but return only the outbound id (discard reply).
|
||||||
|
/// This keeps existing call sites working while the system migrates to reply-aware paths.
|
||||||
|
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
|
||||||
|
let (out_id, _reply) = self.call_with_reply(method, params).await?;
|
||||||
|
Ok(out_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------
|
||||||
|
// Typed wrappers for Supervisor API (await replies)
|
||||||
// -----------------------------
|
// -----------------------------
|
||||||
|
|
||||||
// Runners
|
// Runners
|
||||||
|
pub async fn list_runners_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("list_runners", json!([])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn register_runner_wait(
|
||||||
|
&self,
|
||||||
|
name: impl Into<String>,
|
||||||
|
queue: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"name": name.into(),
|
||||||
|
"queue": queue.into()
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("register_runner", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn remove_runner_wait(
|
||||||
|
&self,
|
||||||
|
actor_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("remove_runner", json!([actor_id.into()]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_runner_wait(
|
||||||
|
&self,
|
||||||
|
actor_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("start_runner", json!([actor_id.into()]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop_runner_wait(
|
||||||
|
&self,
|
||||||
|
actor_id: impl Into<String>,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("stop_runner", json!([actor_id.into(), force]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_runner_status_wait(
|
||||||
|
&self,
|
||||||
|
actor_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("get_runner_status", json!([actor_id.into()]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_all_runner_status_wait(
|
||||||
|
&self,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("get_all_runner_status", json!([]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_all_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("start_all", json!([])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stop_all_wait(
|
||||||
|
&self,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("stop_all", json!([force])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_all_status_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("get_all_status", json!([])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jobs (await)
|
||||||
|
pub async fn jobs_create_wait(
|
||||||
|
&self,
|
||||||
|
job: Value,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"job": job
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("jobs.create", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn jobs_list_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("jobs.list", json!([])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_run_wait(&self, job: Value) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"job": job
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("job.run", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_start_wait(
|
||||||
|
&self,
|
||||||
|
job_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"job_id": job_id.into()
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("job.start", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_status_wait(
|
||||||
|
&self,
|
||||||
|
job_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("job.status", json!([job_id.into()]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_result_wait(
|
||||||
|
&self,
|
||||||
|
job_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("job.result", json!([job_id.into()]))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_stop_wait(
|
||||||
|
&self,
|
||||||
|
job_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"job_id": job_id.into()
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("job.stop", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn job_delete_wait(
|
||||||
|
&self,
|
||||||
|
job_id: impl Into<String>,
|
||||||
|
) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
let secret = self.need_secret()?;
|
||||||
|
let params = json!([{
|
||||||
|
"secret": secret,
|
||||||
|
"job_id": job_id.into()
|
||||||
|
}]);
|
||||||
|
self.call_with_reply("job.delete", params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn rpc_discover_wait(&self) -> Result<(String, Value), SupervisorClientError> {
|
||||||
|
self.call_with_reply("rpc.discover", json!([])).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------
|
||||||
|
// Backward-compatible variants returning only outbound id (discarding reply)
|
||||||
|
// -----------------------------
|
||||||
|
|
||||||
pub async fn list_runners(&self) -> Result<String, SupervisorClientError> {
|
pub async fn list_runners(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("list_runners", json!([])).await
|
let (id, _) = self.list_runners_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn register_runner(
|
pub async fn register_runner(
|
||||||
@@ -229,27 +389,24 @@ impl SupervisorClient {
|
|||||||
name: impl Into<String>,
|
name: impl Into<String>,
|
||||||
queue: impl Into<String>,
|
queue: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.register_runner_wait(name, queue).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"name": name.into(),
|
|
||||||
"queue": queue.into()
|
|
||||||
}]);
|
|
||||||
self.call("register_runner", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove_runner(
|
pub async fn remove_runner(
|
||||||
&self,
|
&self,
|
||||||
actor_id: impl Into<String>,
|
actor_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("remove_runner", json!([actor_id.into()])).await
|
let (id, _) = self.remove_runner_wait(actor_id).await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start_runner(
|
pub async fn start_runner(
|
||||||
&self,
|
&self,
|
||||||
actor_id: impl Into<String>,
|
actor_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("start_runner", json!([actor_id.into()])).await
|
let (id, _) = self.start_runner_wait(actor_id).await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stop_runner(
|
pub async fn stop_runner(
|
||||||
@@ -257,171 +414,96 @@ impl SupervisorClient {
|
|||||||
actor_id: impl Into<String>,
|
actor_id: impl Into<String>,
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("stop_runner", json!([actor_id.into(), force]))
|
let (id, _) = self.stop_runner_wait(actor_id, force).await?;
|
||||||
.await
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_runner_status(
|
pub async fn get_runner_status(
|
||||||
&self,
|
&self,
|
||||||
actor_id: impl Into<String>,
|
actor_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("get_runner_status", json!([actor_id.into()]))
|
let (id, _) = self.get_runner_status_wait(actor_id).await?;
|
||||||
.await
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> {
|
pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("get_all_runner_status", json!([])).await
|
let (id, _) = self.get_all_runner_status_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start_all(&self) -> Result<String, SupervisorClientError> {
|
pub async fn start_all(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("start_all", json!([])).await
|
let (id, _) = self.start_all_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> {
|
pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> {
|
||||||
self.call("stop_all", json!([force])).await
|
let (id, _) = self.stop_all_wait(force).await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> {
|
pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("get_all_status", json!([])).await
|
let (id, _) = self.get_all_status_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Jobs
|
|
||||||
pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> {
|
pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.jobs_create_wait(job).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"job": job
|
|
||||||
}]);
|
|
||||||
self.call("jobs.create", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> {
|
pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("jobs.list", json!([])).await
|
let (id, _) = self.jobs_list_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> {
|
pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.job_run_wait(job).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"job": job
|
|
||||||
}]);
|
|
||||||
self.call("job.run", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_start(
|
pub async fn job_start(
|
||||||
&self,
|
&self,
|
||||||
job_id: impl Into<String>,
|
job_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.job_start_wait(job_id).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"job_id": job_id.into()
|
|
||||||
}]);
|
|
||||||
self.call("job.start", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_status(
|
pub async fn job_status(
|
||||||
&self,
|
&self,
|
||||||
job_id: impl Into<String>,
|
job_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("job.status", json!([job_id.into()])).await
|
let (id, _) = self.job_status_wait(job_id).await?;
|
||||||
}
|
Ok(id)
|
||||||
|
|
||||||
/// Synchronous job.status: waits for the supervisor to reply and returns the status string.
|
|
||||||
/// The supervisor result may be an object with { status: "..." } or a bare string.
|
|
||||||
pub async fn job_status_sync(
|
|
||||||
&self,
|
|
||||||
job_id: impl Into<String>,
|
|
||||||
reply_timeout_secs: u64,
|
|
||||||
) -> Result<String, SupervisorClientError> {
|
|
||||||
let res = self
|
|
||||||
.call_sync("job.status", json!([job_id.into()]), reply_timeout_secs)
|
|
||||||
.await?;
|
|
||||||
let status = if let Some(s) = res.get("status").and_then(|v| v.as_str()) {
|
|
||||||
s.to_string()
|
|
||||||
} else if let Some(s) = res.as_str() {
|
|
||||||
s.to_string()
|
|
||||||
} else {
|
|
||||||
return Err(SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"unexpected job.status result shape: {res}"
|
|
||||||
)));
|
|
||||||
};
|
|
||||||
Ok(status)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_result(
|
pub async fn job_result(
|
||||||
&self,
|
&self,
|
||||||
job_id: impl Into<String>,
|
job_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
self.call("job.result", json!([job_id.into()])).await
|
let (id, _) = self.job_result_wait(job_id).await?;
|
||||||
}
|
Ok(id)
|
||||||
|
|
||||||
/// Synchronous job.result: waits for the supervisor to reply and returns a map
|
|
||||||
/// containing exactly one of:
|
|
||||||
/// - {"success": "..."} on success
|
|
||||||
/// - {"error": "..."} on error reported by the runner
|
|
||||||
/// Some servers may return a bare string; we treat that as {"success": "<string>"}.
|
|
||||||
pub async fn job_result_sync(
|
|
||||||
&self,
|
|
||||||
job_id: impl Into<String>,
|
|
||||||
reply_timeout_secs: u64,
|
|
||||||
) -> Result<std::collections::HashMap<String, String>, SupervisorClientError> {
|
|
||||||
let res = self
|
|
||||||
.call_sync("job.result", json!([job_id.into()]), reply_timeout_secs)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
let mut out: HashMap<String, String> = HashMap::new();
|
|
||||||
|
|
||||||
if let Some(obj) = res.as_object() {
|
|
||||||
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
|
|
||||||
out.insert("success".to_string(), s.to_string());
|
|
||||||
return Ok(out);
|
|
||||||
}
|
|
||||||
if let Some(s) = obj.get("error").and_then(|v| v.as_str()) {
|
|
||||||
out.insert("error".to_string(), s.to_string());
|
|
||||||
return Ok(out);
|
|
||||||
}
|
|
||||||
return Err(SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"unexpected job.result result shape: {res}"
|
|
||||||
)));
|
|
||||||
} else if let Some(s) = res.as_str() {
|
|
||||||
out.insert("success".to_string(), s.to_string());
|
|
||||||
return Ok(out);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(SupervisorClientError::InvalidResponse(format!(
|
|
||||||
"unexpected job.result result shape: {res}"
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_stop(
|
pub async fn job_stop(
|
||||||
&self,
|
&self,
|
||||||
job_id: impl Into<String>,
|
job_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.job_stop_wait(job_id).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"job_id": job_id.into()
|
|
||||||
}]);
|
|
||||||
self.call("job.stop", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn job_delete(
|
pub async fn job_delete(
|
||||||
&self,
|
&self,
|
||||||
job_id: impl Into<String>,
|
job_id: impl Into<String>,
|
||||||
) -> Result<String, SupervisorClientError> {
|
) -> Result<String, SupervisorClientError> {
|
||||||
let secret = self.need_secret()?;
|
let (id, _) = self.job_delete_wait(job_id).await?;
|
||||||
let params = json!([{
|
Ok(id)
|
||||||
"secret": secret,
|
|
||||||
"job_id": job_id.into()
|
|
||||||
}]);
|
|
||||||
self.call("job.delete", params).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discovery
|
|
||||||
pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> {
|
pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> {
|
||||||
self.call("rpc.discover", json!([])).await
|
let (id, _) = self.rpc_discover_wait().await?;
|
||||||
|
Ok(id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -434,27 +516,27 @@ mod tests {
|
|||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
|
||||||
fn mk_client() -> SupervisorClient {
|
fn mk_client() -> SupervisorClient {
|
||||||
// Uses the legacy constructor but will not issue real network calls in these tests.
|
// Build a hub but it won't issue real network calls in these serializer-only tests.
|
||||||
SupervisorClient::new(
|
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
|
||||||
"http://127.0.0.1:8990",
|
let hub = SupervisorHub::new_with_client(mycelium, "supervisor.rpc");
|
||||||
|
SupervisorClient::new_with_hub(
|
||||||
|
hub,
|
||||||
Destination::Pk(
|
Destination::Pk(
|
||||||
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(),
|
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(),
|
||||||
),
|
),
|
||||||
"supervisor.rpc",
|
|
||||||
Some("secret".to_string()),
|
Some("secret".to_string()),
|
||||||
)
|
)
|
||||||
.unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn builds_dst_ip_and_pk() {
|
fn builds_dst_ip_and_pk() {
|
||||||
let c_ip = SupervisorClient::new(
|
let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
|
||||||
"http://127.0.0.1:8990",
|
let hub_ip = SupervisorHub::new_with_client(mycelium.clone(), "supervisor.rpc");
|
||||||
|
let c_ip = SupervisorClient::new_with_hub(
|
||||||
|
hub_ip,
|
||||||
Destination::Ip("2001:db8::1".parse().unwrap()),
|
Destination::Ip("2001:db8::1".parse().unwrap()),
|
||||||
"supervisor.rpc",
|
|
||||||
None,
|
None,
|
||||||
)
|
);
|
||||||
.unwrap();
|
|
||||||
let v_ip = c_ip.build_dst();
|
let v_ip = c_ip.build_dst();
|
||||||
assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1");
|
assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1");
|
||||||
|
|
||||||
|
143
src/clients/supervisor_hub.rs
Normal file
143
src/clients/supervisor_hub.rs
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
|
||||||
|
use base64::Engine;
|
||||||
|
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||||
|
use serde_json::Value;
|
||||||
|
use tokio::sync::{Mutex, oneshot};
|
||||||
|
|
||||||
|
use crate::clients::mycelium_client::MyceliumClient;
|
||||||
|
|
||||||
|
/// Global hub that:
|
||||||
|
/// - Owns a single MyceliumClient
|
||||||
|
/// - Spawns a background popMessage loop filtered by topic
|
||||||
|
/// - Correlates supervisor JSON-RPC replies by inner id to waiting callers via oneshot channels
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct SupervisorHub {
|
||||||
|
mycelium: Arc<MyceliumClient>,
|
||||||
|
topic: String,
|
||||||
|
pending: Arc<Mutex<HashMap<u64, oneshot::Sender<Value>>>>,
|
||||||
|
id_counter: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SupervisorHub {
|
||||||
|
/// Create a new hub and start the background popMessage task.
|
||||||
|
/// - base_url: Mycelium JSON-RPC endpoint, e.g. "http://127.0.0.1:8990"
|
||||||
|
/// - topic: plain-text topic (e.g., "supervisor.rpc")
|
||||||
|
pub fn new(
|
||||||
|
base_url: impl Into<String>,
|
||||||
|
topic: impl Into<String>,
|
||||||
|
) -> Result<Arc<Self>, crate::clients::MyceliumClientError> {
|
||||||
|
let myc = Arc::new(MyceliumClient::new(base_url)?);
|
||||||
|
Ok(Self::new_with_client(myc, topic))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Variant that reuses an existing Mycelium client.
|
||||||
|
pub fn new_with_client(mycelium: Arc<MyceliumClient>, topic: impl Into<String>) -> Arc<Self> {
|
||||||
|
let hub = Arc::new(Self {
|
||||||
|
mycelium,
|
||||||
|
topic: topic.into(),
|
||||||
|
pending: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
id_counter: Arc::new(AtomicU64::new(1)),
|
||||||
|
});
|
||||||
|
Self::spawn_pop_loop(hub.clone());
|
||||||
|
hub
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_pop_loop(hub: Arc<Self>) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
match hub.mycelium.pop_message(Some(false), Some(20), None).await {
|
||||||
|
Ok(Some(inb)) => {
|
||||||
|
// Extract and decode payload
|
||||||
|
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
|
||||||
|
// Not a payload-bearing message; ignore
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
|
||||||
|
tracing::warn!(target: "supervisor_hub", "Failed to decode inbound payload base64");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
|
||||||
|
tracing::warn!(target: "supervisor_hub", "Failed to parse inbound payload JSON");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Extract inner JSON-RPC id
|
||||||
|
let inner_id_u64 = match rpc.get("id") {
|
||||||
|
Some(Value::Number(n)) => n.as_u64(),
|
||||||
|
Some(Value::String(s)) => s.parse::<u64>().ok(),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(inner_id) = inner_id_u64 {
|
||||||
|
// Try to deliver to a pending waiter
|
||||||
|
let sender_opt = {
|
||||||
|
let mut guard = hub.pending.lock().await;
|
||||||
|
guard.remove(&inner_id)
|
||||||
|
};
|
||||||
|
if let Some(tx) = sender_opt {
|
||||||
|
let _ = tx.send(rpc);
|
||||||
|
} else {
|
||||||
|
tracing::warn!(
|
||||||
|
target: "supervisor_hub",
|
||||||
|
inner_id,
|
||||||
|
payload = %String::from_utf8_lossy(&raw),
|
||||||
|
"Unmatched supervisor reply; no waiter registered"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tracing::warn!(target: "supervisor_hub", "Inbound supervisor reply missing id; dropping");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
// No message; continue polling
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(target: "supervisor_hub", error = %e, "popMessage error; backing off");
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocate a new inner supervisor JSON-RPC id.
|
||||||
|
pub fn next_id(&self) -> u64 {
|
||||||
|
self.id_counter.fetch_add(1, Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a oneshot sender for the given inner id and return the receiver side.
|
||||||
|
pub async fn register_waiter(&self, inner_id: u64) -> oneshot::Receiver<Value> {
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
let mut guard = self.pending.lock().await;
|
||||||
|
guard.insert(inner_id, tx);
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove a pending waiter for a given id (used to cleanup on timeout).
|
||||||
|
pub async fn remove_waiter(&self, inner_id: u64) -> Option<oneshot::Sender<Value>> {
|
||||||
|
let mut guard = self.pending.lock().await;
|
||||||
|
guard.remove(&inner_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access to underlying Mycelium client (for pushMessage).
|
||||||
|
pub fn mycelium(&self) -> Arc<MyceliumClient> {
|
||||||
|
self.mycelium.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access configured topic.
|
||||||
|
pub fn topic(&self) -> &str {
|
||||||
|
&self.topic
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for SupervisorHub {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("SupervisorHub")
|
||||||
|
.field("topic", &self.topic)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
43
src/dag.rs
43
src/dag.rs
@@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet, VecDeque};
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
models::{Flow, Job, ScriptType},
|
models::{Flow, Job, JobStatus, ScriptType},
|
||||||
storage::RedisDriver,
|
storage::RedisDriver,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -212,6 +212,41 @@ pub async fn build_flow_dag(
|
|||||||
edges.sort_unstable();
|
edges.sort_unstable();
|
||||||
reverse_edges.sort_unstable();
|
reverse_edges.sort_unstable();
|
||||||
|
|
||||||
|
// Populate runtime execution state from persisted Job.status()
|
||||||
|
let mut started_set: HashSet<u32> = HashSet::new();
|
||||||
|
let mut completed_set: HashSet<u32> = HashSet::new();
|
||||||
|
let mut error_ids: Vec<u32> = Vec::new();
|
||||||
|
|
||||||
|
for (&jid, job) in &jobs {
|
||||||
|
match job.status() {
|
||||||
|
JobStatus::Finished => {
|
||||||
|
completed_set.insert(jid);
|
||||||
|
}
|
||||||
|
JobStatus::Started => {
|
||||||
|
started_set.insert(jid);
|
||||||
|
}
|
||||||
|
JobStatus::Dispatched => {
|
||||||
|
// Consider Dispatched as "in-flight" for DAG runtime started set,
|
||||||
|
// so queued/running work is visible in periodic snapshots.
|
||||||
|
started_set.insert(jid);
|
||||||
|
}
|
||||||
|
JobStatus::Error => {
|
||||||
|
error_ids.push(jid);
|
||||||
|
}
|
||||||
|
JobStatus::WaitingForPrerequisites => {
|
||||||
|
// Neither started nor completed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose a deterministic failed job if any errors exist (smallest job id)
|
||||||
|
let failed_job = if error_ids.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
error_ids.sort_unstable();
|
||||||
|
Some(error_ids[0])
|
||||||
|
};
|
||||||
|
|
||||||
let dag = FlowDag {
|
let dag = FlowDag {
|
||||||
flow_id,
|
flow_id,
|
||||||
caller_id,
|
caller_id,
|
||||||
@@ -222,9 +257,9 @@ pub async fn build_flow_dag(
|
|||||||
roots,
|
roots,
|
||||||
leaves,
|
leaves,
|
||||||
levels,
|
levels,
|
||||||
started: HashSet::new(),
|
started: started_set,
|
||||||
completed: HashSet::new(),
|
completed: completed_set,
|
||||||
failed_job: None,
|
failed_job,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(dag)
|
Ok(dag)
|
||||||
|
@@ -99,17 +99,24 @@ async fn main() {
|
|||||||
// Shared application state
|
// Shared application state
|
||||||
let state = Arc::new(herocoordinator::rpc::AppState::new(service));
|
let state = Arc::new(herocoordinator::rpc::AppState::new(service));
|
||||||
|
|
||||||
// Start router workers (auto-discovered contexts)
|
// Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener)
|
||||||
{
|
{
|
||||||
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
|
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
|
||||||
|
let hub = herocoordinator::clients::SupervisorHub::new(
|
||||||
|
base_url.clone(),
|
||||||
|
"supervisor.rpc".to_string(),
|
||||||
|
)
|
||||||
|
.expect("Failed to initialize SupervisorHub");
|
||||||
let cfg = herocoordinator::router::RouterConfig {
|
let cfg = herocoordinator::router::RouterConfig {
|
||||||
context_ids: Vec::new(), // ignored by start_router_auto
|
context_ids: Vec::new(), // ignored by start_router_auto
|
||||||
concurrency: 32,
|
concurrency: 32,
|
||||||
base_url,
|
base_url,
|
||||||
topic: "supervisor.rpc".to_string(),
|
topic: "supervisor.rpc".to_string(),
|
||||||
|
sup_hub: hub.clone(),
|
||||||
transport_poll_interval_secs: 2,
|
transport_poll_interval_secs: 2,
|
||||||
transport_poll_timeout_secs: 300,
|
transport_poll_timeout_secs: 300,
|
||||||
};
|
};
|
||||||
|
// Per-context outbound delivery loops (replies handled by SupervisorHub)
|
||||||
let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg);
|
let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -59,6 +59,18 @@ pub enum TransportStatus {
|
|||||||
Failed,
|
Failed,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for TransportStatus {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
TransportStatus::Queued => f.write_str("queued"),
|
||||||
|
TransportStatus::Sent => f.write_str("sent"),
|
||||||
|
TransportStatus::Delivered => f.write_str("delivered"),
|
||||||
|
TransportStatus::Read => f.write_str("read"),
|
||||||
|
TransportStatus::Failed => f.write_str("failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub enum MessageFormatType {
|
pub enum MessageFormatType {
|
||||||
Html,
|
Html,
|
||||||
|
762
src/router.rs
762
src/router.rs
@@ -1,10 +1,17 @@
|
|||||||
use std::{collections::HashSet, sync::Arc};
|
use std::{
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use base64::Engine;
|
||||||
|
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
use tokio::sync::Semaphore;
|
use std::collections::hash_map::DefaultHasher;
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
|
use tokio::sync::{Mutex, Semaphore};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
clients::{Destination, MyceliumClient, SupervisorClient},
|
clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub},
|
||||||
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
|
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
|
||||||
service::AppService,
|
service::AppService,
|
||||||
};
|
};
|
||||||
@@ -14,13 +21,91 @@ use tracing::{error, info};
|
|||||||
pub struct RouterConfig {
|
pub struct RouterConfig {
|
||||||
pub context_ids: Vec<u32>,
|
pub context_ids: Vec<u32>,
|
||||||
pub concurrency: usize,
|
pub concurrency: usize,
|
||||||
pub base_url: String, // e.g. http://127.0.0.1:8990
|
pub base_url: String, // e.g. http://127.0.0.1:8990
|
||||||
pub topic: String, // e.g. "supervisor.rpc"
|
pub topic: String, // e.g. "supervisor.rpc"
|
||||||
|
pub sup_hub: Arc<SupervisorHub>, // global supervisor hub for replies
|
||||||
// Transport status polling configuration
|
// Transport status polling configuration
|
||||||
pub transport_poll_interval_secs: u64, // e.g. 2
|
pub transport_poll_interval_secs: u64, // e.g. 2
|
||||||
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
|
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
SupervisorClient reuse cache (Router-local):
|
||||||
|
|
||||||
|
Rationale:
|
||||||
|
- SupervisorClient maintains an internal JSON-RPC id_counter per instance.
|
||||||
|
- Rebuilding a client for each message resets this counter, causing inner JSON-RPC ids to restart at 1.
|
||||||
|
- We reuse one SupervisorClient per (destination, topic, secret) to preserve monotonically increasing ids.
|
||||||
|
|
||||||
|
Scope:
|
||||||
|
- Cache is per Router loop (and a separate one for the inbound listener).
|
||||||
|
- If cross-loop/process reuse becomes necessary later, promote to a process-global cache.
|
||||||
|
|
||||||
|
Keying:
|
||||||
|
- Key: destination + topic + secret-presence (secret content hashed; not stored in plaintext).
|
||||||
|
|
||||||
|
Concurrency:
|
||||||
|
- tokio::Mutex protects a HashMap<String, Arc<SupervisorClient>>.
|
||||||
|
- Values are Arc so call sites clone cheaply and share the same id_counter.
|
||||||
|
*/
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct SupervisorClientCache {
|
||||||
|
map: Arc<Mutex<HashMap<String, Arc<SupervisorClient>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SupervisorClientCache {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
map: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_key(dest: &Destination, topic: &str, secret: &Option<String>) -> String {
|
||||||
|
let dst = match dest {
|
||||||
|
Destination::Ip(ip) => format!("ip:{ip}"),
|
||||||
|
Destination::Pk(pk) => format!("pk:{pk}"),
|
||||||
|
};
|
||||||
|
// Hash the secret to avoid storing plaintext in keys while still differentiating values
|
||||||
|
let sec_hash = match secret {
|
||||||
|
Some(s) if !s.is_empty() => {
|
||||||
|
let mut hasher = DefaultHasher::new();
|
||||||
|
s.hash(&mut hasher);
|
||||||
|
format!("s:{}", hasher.finish())
|
||||||
|
}
|
||||||
|
_ => "s:none".to_string(),
|
||||||
|
};
|
||||||
|
format!("{dst}|t:{topic}|{sec_hash}")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_or_create(
|
||||||
|
&self,
|
||||||
|
hub: Arc<SupervisorHub>,
|
||||||
|
dest: Destination,
|
||||||
|
topic: String,
|
||||||
|
secret: Option<String>,
|
||||||
|
) -> Arc<SupervisorClient> {
|
||||||
|
let key = Self::make_key(&dest, &topic, &secret);
|
||||||
|
|
||||||
|
{
|
||||||
|
let guard = self.map.lock().await;
|
||||||
|
if let Some(existing) = guard.get(&key) {
|
||||||
|
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup");
|
||||||
|
return existing.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut guard = self.map.lock().await;
|
||||||
|
if let Some(existing) = guard.get(&key) {
|
||||||
|
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)");
|
||||||
|
return existing.clone();
|
||||||
|
}
|
||||||
|
let client = Arc::new(SupervisorClient::new_with_hub(hub, dest, secret.clone()));
|
||||||
|
guard.insert(key, client.clone());
|
||||||
|
tracing::debug!(target: "router", cache="supervisor", hit=false, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache insert");
|
||||||
|
client
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Start background router loops, one per context.
|
/// Start background router loops, one per context.
|
||||||
/// Each loop:
|
/// Each loop:
|
||||||
/// - BRPOP msg_out with 1s timeout
|
/// - BRPOP msg_out with 1s timeout
|
||||||
@@ -36,16 +121,11 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
|
|||||||
let handle = tokio::spawn(async move {
|
let handle = tokio::spawn(async move {
|
||||||
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
|
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
|
||||||
|
|
||||||
// Create a shared Mycelium client for this context loop (retry until available)
|
// Use the global SupervisorHub and its Mycelium client
|
||||||
let mycelium = loop {
|
let sup_hub = cfg_cloned.sup_hub.clone();
|
||||||
match MyceliumClient::new(cfg_cloned.base_url.clone()) {
|
let mycelium = sup_hub.mycelium();
|
||||||
Ok(c) => break Arc::new(c),
|
|
||||||
Err(e) => {
|
let cache = Arc::new(SupervisorClientCache::new());
|
||||||
error!(context_id=ctx_id, error=%e, "MyceliumClient init error");
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// Pop next message key (blocking with timeout)
|
// Pop next message key (blocking with timeout)
|
||||||
@@ -67,12 +147,21 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
|
|||||||
let cfg_task = cfg_cloned.clone();
|
let cfg_task = cfg_cloned.clone();
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let mycelium = mycelium.clone();
|
let mycelium = mycelium.clone();
|
||||||
|
let cache = cache.clone();
|
||||||
|
let sup_hub = sup_hub.clone();
|
||||||
async move {
|
async move {
|
||||||
// Ensure permit is dropped at end of task
|
// Ensure permit is dropped at end of task
|
||||||
let _permit = permit;
|
let _permit = permit;
|
||||||
if let Err(e) =
|
if let Err(e) = deliver_one(
|
||||||
deliver_one(&service_task, &cfg_task, ctx_id, &key, mycelium)
|
&service_task,
|
||||||
.await
|
&cfg_task,
|
||||||
|
ctx_id,
|
||||||
|
&key,
|
||||||
|
mycelium,
|
||||||
|
sup_hub,
|
||||||
|
cache.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
|
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
|
||||||
}
|
}
|
||||||
@@ -102,6 +191,8 @@ async fn deliver_one(
|
|||||||
context_id: u32,
|
context_id: u32,
|
||||||
msg_key: &str,
|
msg_key: &str,
|
||||||
mycelium: Arc<MyceliumClient>,
|
mycelium: Arc<MyceliumClient>,
|
||||||
|
sup_hub: Arc<SupervisorHub>,
|
||||||
|
cache: Arc<SupervisorClientCache>,
|
||||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
// Parse "message:{caller_id}:{id}"
|
// Parse "message:{caller_id}:{id}"
|
||||||
let (caller_id, id) = parse_message_key(msg_key)
|
let (caller_id, id) = parse_message_key(msg_key)
|
||||||
@@ -141,19 +232,38 @@ async fn deliver_one(
|
|||||||
let dest_for_poller = dest.clone();
|
let dest_for_poller = dest.clone();
|
||||||
let topic_for_poller = cfg.topic.clone();
|
let topic_for_poller = cfg.topic.clone();
|
||||||
let secret_for_poller = runner.secret.clone();
|
let secret_for_poller = runner.secret.clone();
|
||||||
let client = SupervisorClient::new_with_client(
|
let client = cache
|
||||||
mycelium.clone(),
|
.get_or_create(
|
||||||
dest.clone(),
|
sup_hub.clone(),
|
||||||
cfg.topic.clone(),
|
dest.clone(),
|
||||||
runner.secret.clone(),
|
cfg.topic.clone(),
|
||||||
);
|
runner.secret.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
// Build supervisor method and params from Message
|
// Build supervisor method and params from Message
|
||||||
let method = msg.message.clone();
|
let method = msg.message.clone();
|
||||||
let params = build_params(&msg)?;
|
let params = build_params(&msg)?;
|
||||||
|
|
||||||
// Send
|
// Send
|
||||||
let out_id = client.call(&method, params).await?;
|
// If this is a job.run and we have a secret configured on the client,
|
||||||
|
// prefer the typed wrapper that injects the secret into inner supervisor params,
|
||||||
|
// and await the reply to capture job_queued immediately.
|
||||||
|
let (out_id, reply_opt) = if method == "job.run" {
|
||||||
|
if let Some(j) = msg.job.first() {
|
||||||
|
let jv = job_to_json(j)?;
|
||||||
|
// Returns (outbound message id, reply envelope)
|
||||||
|
let (out, reply) = client.job_run_wait(jv).await?;
|
||||||
|
(out, Some(reply))
|
||||||
|
} else {
|
||||||
|
// Fallback: no embedded job, use the generic call (await reply, discard)
|
||||||
|
let out = client.call(&method, params).await?;
|
||||||
|
(out, None)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let out = client.call(&method, params).await?;
|
||||||
|
(out, None)
|
||||||
|
};
|
||||||
|
|
||||||
// Store transport id and initial Sent status
|
// Store transport id and initial Sent status
|
||||||
let _ = service
|
let _ = service
|
||||||
@@ -171,6 +281,59 @@ async fn deliver_one(
|
|||||||
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
|
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
// If we got a job.run reply, interpret job_queued immediately
|
||||||
|
if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) {
|
||||||
|
let result_opt = reply.get("result");
|
||||||
|
let error_opt = reply.get("error");
|
||||||
|
|
||||||
|
// Handle job.run success (job_queued)
|
||||||
|
let is_job_queued = result_opt
|
||||||
|
.and_then(|res| {
|
||||||
|
if res.get("job_queued").is_some() {
|
||||||
|
Some(true)
|
||||||
|
} else if let Some(s) = res.as_str() {
|
||||||
|
Some(s == "job_queued")
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
if is_job_queued {
|
||||||
|
let _ = service
|
||||||
|
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched)
|
||||||
|
.await;
|
||||||
|
let _ = service
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Supervisor reply for job {}: job_queued (processed synchronously)",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else if let Some(err_obj) = error_opt {
|
||||||
|
let _ = service
|
||||||
|
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error)
|
||||||
|
.await;
|
||||||
|
let _ = service
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Supervisor error for job {}: {} (processed synchronously)",
|
||||||
|
job_id, err_obj
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No correlation map needed; replies are handled synchronously via SupervisorHub
|
||||||
|
|
||||||
// Spawn transport-status poller
|
// Spawn transport-status poller
|
||||||
{
|
{
|
||||||
let service_poll = service.clone();
|
let service_poll = service.clone();
|
||||||
@@ -178,12 +341,6 @@ async fn deliver_one(
|
|||||||
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
|
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
|
||||||
let out_id_cloned = out_id.clone();
|
let out_id_cloned = out_id.clone();
|
||||||
let mycelium = mycelium.clone();
|
let mycelium = mycelium.clone();
|
||||||
// Determine reply timeout for supervisor job.result: prefer message.timeout_result, fallback to router config timeout
|
|
||||||
let job_result_reply_timeout: u64 = if msg.timeout_result > 0 {
|
|
||||||
msg.timeout_result as u64
|
|
||||||
} else {
|
|
||||||
cfg.transport_poll_timeout_secs
|
|
||||||
};
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let start = std::time::Instant::now();
|
let start = std::time::Instant::now();
|
||||||
@@ -195,6 +352,8 @@ async fn deliver_one(
|
|||||||
let job_id_opt = job_id_opt;
|
let job_id_opt = job_id_opt;
|
||||||
|
|
||||||
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
|
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
|
||||||
|
// Ensure we only request supervisor job.status or job.result once per outbound message
|
||||||
|
let mut requested_job_check: bool = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if start.elapsed() >= poll_timeout {
|
if start.elapsed() >= poll_timeout {
|
||||||
@@ -227,91 +386,260 @@ async fn deliver_one(
|
|||||||
|
|
||||||
// Stop on terminal states
|
// Stop on terminal states
|
||||||
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
|
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
|
||||||
// On Read, fetch supervisor job.status and update local job/message if terminal
|
if let Some(job_id) = job_id_opt {
|
||||||
if matches!(s, TransportStatus::Read)
|
// First consult Redis for the latest job state in case we already have a terminal update
|
||||||
&& let Some(job_id) = job_id_opt
|
match service_poll.load_job(context_id, caller_id, job_id).await {
|
||||||
{
|
Ok(job) => {
|
||||||
let sup = SupervisorClient::new_with_client(
|
// Promote to Started as soon as transport is delivered/read,
|
||||||
client.clone(),
|
// if currently Dispatched or WaitingForPrerequisites.
|
||||||
sup_dest.clone(),
|
// This makes DAG.started reflect "in-flight" work even when jobs
|
||||||
sup_topic.clone(),
|
// complete too quickly to observe an intermediate supervisor "running" status.
|
||||||
secret_for_poller.clone(),
|
if matches!(
|
||||||
);
|
job.status(),
|
||||||
match sup.job_status_sync(job_id.to_string(), 10).await {
|
JobStatus::Dispatched
|
||||||
Ok(remote_status) => {
|
| JobStatus::WaitingForPrerequisites
|
||||||
if let Some((mapped, terminal)) =
|
) {
|
||||||
map_supervisor_job_status(&remote_status)
|
let _ = service_poll
|
||||||
{
|
.update_job_status_unchecked(
|
||||||
if terminal {
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
job_id,
|
||||||
|
JobStatus::Started,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
match job.status() {
|
||||||
|
JobStatus::Finished | JobStatus::Error => {
|
||||||
|
// Local job is already terminal; skip supervisor job.status
|
||||||
let _ = service_poll
|
let _ = service_poll
|
||||||
.update_job_status_unchecked(
|
.append_message_logs(
|
||||||
context_id,
|
context_id,
|
||||||
caller_id,
|
caller_id,
|
||||||
job_id,
|
id,
|
||||||
mapped.clone(),
|
vec![format!(
|
||||||
)
|
"Local job {} status is terminal ({:?}); skipping supervisor job.status",
|
||||||
.await;
|
job_id,
|
||||||
|
job.status()
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
// After terminal status, fetch supervisor job.result and store into Job.result
|
// If result is still empty, immediately request supervisor job.result
|
||||||
let sup = SupervisorClient::new_with_client(
|
if job.result.is_empty() {
|
||||||
client.clone(),
|
let sup = cache
|
||||||
sup_dest.clone(),
|
.get_or_create(
|
||||||
sup_topic.clone(),
|
sup_hub.clone(),
|
||||||
secret_for_poller.clone(),
|
sup_dest.clone(),
|
||||||
);
|
sup_topic.clone(),
|
||||||
match sup
|
secret_for_poller.clone(),
|
||||||
.job_result_sync(
|
)
|
||||||
job_id.to_string(),
|
.await;
|
||||||
job_result_reply_timeout,
|
match sup
|
||||||
)
|
.job_result_wait(job_id.to_string())
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(result_map) => {
|
Ok((_out2, reply2)) => {
|
||||||
// Persist the result into the Job.result map (merge)
|
// Interpret reply synchronously: success/error/bare string
|
||||||
let _ = service_poll
|
let res = reply2.get("result");
|
||||||
.update_job_result_merge_unchecked(
|
if let Some(obj) =
|
||||||
|
res.and_then(|v| v.as_object())
|
||||||
|
{
|
||||||
|
if let Some(s) = obj
|
||||||
|
.get("success")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
let mut patch = std::collections::HashMap::new();
|
||||||
|
patch.insert(
|
||||||
|
"success".to_string(),
|
||||||
|
s.to_string(),
|
||||||
|
);
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_result_merge_unchecked(
|
||||||
|
context_id, caller_id, job_id, patch,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.update_message_status(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
MessageStatus::Processed,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_status_unchecked(
|
||||||
|
context_id, caller_id, job_id, JobStatus::Finished,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Updated job {} status to Finished (sync)", job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Existing log about storing result
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Stored supervisor job.result for job {} (success, sync)",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else if let Some(s) = obj
|
||||||
|
.get("error")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
let mut patch = std::collections::HashMap::new();
|
||||||
|
patch.insert(
|
||||||
|
"error".to_string(),
|
||||||
|
s.to_string(),
|
||||||
|
);
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_result_merge_unchecked(
|
||||||
|
context_id, caller_id, job_id, patch,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.update_message_status(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
MessageStatus::Processed,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Also mark job as Error so the flow can handle failure (ignore invalid transitions)
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_status_unchecked(
|
||||||
|
context_id, caller_id, job_id, JobStatus::Error,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Updated job {} status to Error (sync)", job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Existing log about storing result
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Stored supervisor job.result for job {} (error, sync)",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
} else if let Some(s) =
|
||||||
|
res.and_then(|v| v.as_str())
|
||||||
|
{
|
||||||
|
let mut patch =
|
||||||
|
std::collections::HashMap::new(
|
||||||
|
);
|
||||||
|
patch.insert(
|
||||||
|
"success".to_string(),
|
||||||
|
s.to_string(),
|
||||||
|
);
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_result_merge_unchecked(
|
||||||
|
context_id, caller_id, job_id, patch,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.update_message_status(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
MessageStatus::Processed,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_status_unchecked(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
job_id,
|
||||||
|
JobStatus::Finished,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Updated job {} status to Finished (sync)", job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Existing log about storing result
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Stored supervisor job.result for job {} (success, sync)",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec!["Supervisor job.result reply missing recognizable fields".to_string()],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"job.result request error for job {}: {}",
|
||||||
|
job_id, e
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Result already present; nothing to fetch
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
context_id,
|
context_id,
|
||||||
caller_id,
|
caller_id,
|
||||||
job_id,
|
id,
|
||||||
result_map.clone(),
|
vec![format!(
|
||||||
|
"Job {} already has result; no supervisor calls needed",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
// Log which key was stored (success or error)
|
|
||||||
let key = result_map
|
|
||||||
.keys()
|
|
||||||
.next()
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
"unknown".to_string()
|
|
||||||
});
|
|
||||||
let _ = service_poll
|
|
||||||
.append_message_logs(
|
|
||||||
context_id,
|
|
||||||
caller_id,
|
|
||||||
id,
|
|
||||||
vec![format!(
|
|
||||||
"Stored supervisor job.result for job {} ({})",
|
|
||||||
job_id, key
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
let _ = service_poll
|
|
||||||
.append_message_logs(
|
|
||||||
context_id,
|
|
||||||
caller_id,
|
|
||||||
id,
|
|
||||||
vec![format!(
|
|
||||||
"job.result fetch error for job {}: {}",
|
|
||||||
job_id, e
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark message as processed
|
// Mark processed and stop polling for this message
|
||||||
let _ = service_poll
|
let _ = service_poll
|
||||||
.update_message_status(
|
.update_message_status(
|
||||||
context_id,
|
context_id,
|
||||||
@@ -326,39 +654,207 @@ async fn deliver_one(
|
|||||||
caller_id,
|
caller_id,
|
||||||
id,
|
id,
|
||||||
vec![format!(
|
vec![format!(
|
||||||
"Supervisor job.status for job {} -> {} (mapped to {:?})",
|
"Terminal job {} detected; stopping transport polling",
|
||||||
job_id, remote_status, mapped
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Not terminal yet -> request supervisor job.status as before
|
||||||
|
_ => {
|
||||||
|
let sup = cache
|
||||||
|
.get_or_create(
|
||||||
|
sup_hub.clone(),
|
||||||
|
sup_dest.clone(),
|
||||||
|
sup_topic.clone(),
|
||||||
|
secret_for_poller.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match sup.job_status_wait(job_id.to_string()).await
|
||||||
|
{
|
||||||
|
Ok((_out_id, reply_status)) => {
|
||||||
|
// Interpret status reply synchronously
|
||||||
|
let result_opt = reply_status.get("result");
|
||||||
|
let error_opt = reply_status.get("error");
|
||||||
|
if let Some(err_obj) = error_opt {
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_status_unchecked(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
job_id,
|
||||||
|
JobStatus::Error,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id, caller_id, id,
|
||||||
|
vec![format!(
|
||||||
|
"Supervisor error for job {}: {} (sync)",
|
||||||
|
job_id, err_obj
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else if let Some(res) = result_opt {
|
||||||
|
let status_candidate = res
|
||||||
|
.get("status")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.or_else(|| res.as_str());
|
||||||
|
if let Some(remote_status) =
|
||||||
|
status_candidate
|
||||||
|
{
|
||||||
|
if let Some((mapped, terminal)) =
|
||||||
|
map_supervisor_job_status(
|
||||||
|
remote_status,
|
||||||
|
)
|
||||||
|
{
|
||||||
|
let _ = service_poll
|
||||||
|
.update_job_status_unchecked(
|
||||||
|
context_id, caller_id, job_id, mapped.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id, caller_id, id,
|
||||||
|
vec![format!(
|
||||||
|
"Supervisor job.status for job {} -> {} (mapped to {:?}, sync)",
|
||||||
|
job_id, remote_status, mapped
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// If terminal, request job.result now (handled above for local terminal case)
|
||||||
|
if terminal {
|
||||||
|
// trigger job.result only if result empty to avoid spam
|
||||||
|
if let Ok(j_after) =
|
||||||
|
service_poll
|
||||||
|
.load_job(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
job_id,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
if j_after
|
||||||
|
.result
|
||||||
|
.is_empty()
|
||||||
|
{
|
||||||
|
let sup2 = cache
|
||||||
|
.get_or_create(
|
||||||
|
sup_hub.clone(),
|
||||||
|
sup_dest.clone(),
|
||||||
|
sup_topic.clone(),
|
||||||
|
secret_for_poller.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = sup2.job_result_wait(job_id.to_string()).await
|
||||||
|
.and_then(|(_oid, reply2)| {
|
||||||
|
// Minimal parse and store
|
||||||
|
let res2 = reply2.get("result");
|
||||||
|
if let Some(obj) = res2.and_then(|v| v.as_object()) {
|
||||||
|
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
|
||||||
|
let mut patch = std::collections::HashMap::new();
|
||||||
|
patch.insert("success".to_string(), s.to_string());
|
||||||
|
tokio::spawn({
|
||||||
|
let service_poll = service_poll.clone();
|
||||||
|
async move {
|
||||||
|
let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok((String::new(), Value::Null))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark processed and stop polling for this message
|
||||||
|
let _ = service_poll
|
||||||
|
.update_message_status(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
MessageStatus::Processed,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Terminal job {} detected from supervisor status; stopping transport polling",
|
||||||
|
job_id
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"job.status request error: {}",
|
||||||
|
e
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If we cannot load the job, fall back to requesting job.status
|
||||||
|
Err(_) => {
|
||||||
|
let sup = cache
|
||||||
|
.get_or_create(
|
||||||
|
sup_hub.clone(),
|
||||||
|
sup_dest.clone(),
|
||||||
|
sup_topic.clone(),
|
||||||
|
secret_for_poller.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match sup.job_status_wait(job_id.to_string()).await {
|
||||||
|
Ok((_out_id, _reply_status)) => {
|
||||||
|
let _ = service_poll
|
||||||
|
.append_message_logs(
|
||||||
|
context_id,
|
||||||
|
caller_id,
|
||||||
|
id,
|
||||||
|
vec![format!(
|
||||||
|
"Requested supervisor job.status for job {} (fallback; load_job failed, sync)",
|
||||||
|
job_id
|
||||||
)],
|
)],
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
} else {
|
Err(e) => {
|
||||||
let _ = service_poll
|
let _ = service_poll
|
||||||
.append_message_logs(
|
.append_message_logs(
|
||||||
context_id,
|
context_id,
|
||||||
caller_id,
|
caller_id,
|
||||||
id,
|
id,
|
||||||
vec![format!(
|
vec![format!(
|
||||||
"Unknown supervisor status '{}' for job {}",
|
"job.status request error: {}",
|
||||||
remote_status, job_id
|
e
|
||||||
)],
|
)],
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
|
||||||
let _ = service_poll
|
|
||||||
.append_message_logs(
|
|
||||||
context_id,
|
|
||||||
caller_id,
|
|
||||||
id,
|
|
||||||
vec![format!("job.status sync error: {}", e)],
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// Ensure we only do this once
|
||||||
|
requested_job_check = true;
|
||||||
}
|
}
|
||||||
break;
|
// break;
|
||||||
}
|
}
|
||||||
if matches!(s, TransportStatus::Failed) {
|
if matches!(s, TransportStatus::Failed) {
|
||||||
let _ = service_poll
|
let _ = service_poll
|
||||||
|
@@ -672,10 +672,16 @@ impl AppService {
|
|||||||
let allowed = match current {
|
let allowed = match current {
|
||||||
JobStatus::Dispatched => matches!(
|
JobStatus::Dispatched => matches!(
|
||||||
new_status,
|
new_status,
|
||||||
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error
|
JobStatus::WaitingForPrerequisites
|
||||||
|
| JobStatus::Started
|
||||||
|
| JobStatus::Finished
|
||||||
|
| JobStatus::Error
|
||||||
),
|
),
|
||||||
JobStatus::WaitingForPrerequisites => {
|
JobStatus::WaitingForPrerequisites => {
|
||||||
matches!(new_status, JobStatus::Started | JobStatus::Error)
|
matches!(
|
||||||
|
new_status,
|
||||||
|
JobStatus::Started | JobStatus::Finished | JobStatus::Error
|
||||||
|
)
|
||||||
}
|
}
|
||||||
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
|
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
|
||||||
JobStatus::Finished | JobStatus::Error => false,
|
JobStatus::Finished | JobStatus::Error => false,
|
||||||
@@ -714,10 +720,16 @@ impl AppService {
|
|||||||
let allowed = match current {
|
let allowed = match current {
|
||||||
JobStatus::Dispatched => matches!(
|
JobStatus::Dispatched => matches!(
|
||||||
new_status,
|
new_status,
|
||||||
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error
|
JobStatus::WaitingForPrerequisites
|
||||||
|
| JobStatus::Started
|
||||||
|
| JobStatus::Finished
|
||||||
|
| JobStatus::Error
|
||||||
),
|
),
|
||||||
JobStatus::WaitingForPrerequisites => {
|
JobStatus::WaitingForPrerequisites => {
|
||||||
matches!(new_status, JobStatus::Started | JobStatus::Error)
|
matches!(
|
||||||
|
new_status,
|
||||||
|
JobStatus::Started | JobStatus::Finished | JobStatus::Error
|
||||||
|
)
|
||||||
}
|
}
|
||||||
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
|
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
|
||||||
JobStatus::Finished | JobStatus::Error => false,
|
JobStatus::Finished | JobStatus::Error => false,
|
||||||
@@ -1161,6 +1173,34 @@ impl AppService {
|
|||||||
pub async fn scan_runners(&self, context_id: u32) -> Result<Vec<Runner>, BoxError> {
|
pub async fn scan_runners(&self, context_id: u32) -> Result<Vec<Runner>, BoxError> {
|
||||||
self.redis.scan_runners(context_id).await
|
self.redis.scan_runners(context_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Correlation map: store mapping from inner supervisor JSON-RPC id to context/caller/job/message.
|
||||||
|
pub async fn supcorr_set(
|
||||||
|
&self,
|
||||||
|
inner_id: u64,
|
||||||
|
context_id: u32,
|
||||||
|
caller_id: u32,
|
||||||
|
job_id: u32,
|
||||||
|
message_id: u32,
|
||||||
|
) -> Result<(), BoxError> {
|
||||||
|
self.redis
|
||||||
|
.supcorr_set(inner_id, context_id, caller_id, job_id, message_id)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Correlation map: load mapping by inner supervisor JSON-RPC id.
|
||||||
|
pub async fn supcorr_get(
|
||||||
|
&self,
|
||||||
|
inner_id: u64,
|
||||||
|
) -> Result<Option<(u32, u32, u32, u32)>, BoxError> {
|
||||||
|
self.redis.supcorr_get(inner_id).await.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Correlation map: delete mapping by inner supervisor JSON-RPC id.
|
||||||
|
pub async fn supcorr_del(&self, inner_id: u64) -> Result<(), BoxError> {
|
||||||
|
self.redis.supcorr_del(inner_id).await.map_err(Into::into)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Auto-discovery helpers for contexts (wrappers over RedisDriver)
|
/// Auto-discovery helpers for contexts (wrappers over RedisDriver)
|
||||||
|
@@ -751,4 +751,77 @@ impl RedisDriver {
|
|||||||
out.sort_unstable();
|
out.sort_unstable();
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// -----------------------------
|
||||||
|
// Supervisor correlation mapping (DB 0)
|
||||||
|
// Key: "supcorr:{inner_id_decimal}"
|
||||||
|
// Value: JSON {"context_id":u32,"caller_id":u32,"job_id":u32,"message_id":u32}
|
||||||
|
// TTL: 1 hour to avoid leaks in case of crashes
|
||||||
|
pub async fn supcorr_set(
|
||||||
|
&self,
|
||||||
|
inner_id: u64,
|
||||||
|
context_id: u32,
|
||||||
|
caller_id: u32,
|
||||||
|
job_id: u32,
|
||||||
|
message_id: u32,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut cm = self.manager_for_db(0).await?;
|
||||||
|
let key = format!("supcorr:{}", inner_id);
|
||||||
|
let val = serde_json::json!({
|
||||||
|
"context_id": context_id,
|
||||||
|
"caller_id": caller_id,
|
||||||
|
"job_id": job_id,
|
||||||
|
"message_id": message_id,
|
||||||
|
})
|
||||||
|
.to_string();
|
||||||
|
// SET key val EX 3600
|
||||||
|
let _: () = redis::cmd("SET")
|
||||||
|
.arg(&key)
|
||||||
|
.arg(&val)
|
||||||
|
.arg("EX")
|
||||||
|
.arg(3600)
|
||||||
|
.query_async(&mut cm)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(db=0, key=%key, error=%e, "SET supcorr_set failed");
|
||||||
|
e
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn supcorr_get(&self, inner_id: u64) -> Result<Option<(u32, u32, u32, u32)>> {
|
||||||
|
let mut cm = self.manager_for_db(0).await?;
|
||||||
|
let key = format!("supcorr:{}", inner_id);
|
||||||
|
let res: Option<String> = redis::cmd("GET")
|
||||||
|
.arg(&key)
|
||||||
|
.query_async(&mut cm)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(db=0, key=%key, error=%e, "GET supcorr_get failed");
|
||||||
|
e
|
||||||
|
})?;
|
||||||
|
if let Some(s) = res {
|
||||||
|
let v: Value = serde_json::from_str(&s)?;
|
||||||
|
let ctx = v.get("context_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||||
|
let caller = v.get("caller_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||||
|
let job = v.get("job_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||||
|
let msg = v.get("message_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
|
||||||
|
return Ok(Some((ctx, caller, job, msg)));
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn supcorr_del(&self, inner_id: u64) -> Result<()> {
|
||||||
|
let mut cm = self.manager_for_db(0).await?;
|
||||||
|
let key = format!("supcorr:{}", inner_id);
|
||||||
|
let _: i64 = redis::cmd("DEL")
|
||||||
|
.arg(&key)
|
||||||
|
.query_async(&mut cm)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(db=0, key=%key, error=%e, "DEL supcorr_del failed");
|
||||||
|
e
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user