Compare commits

...

26 Commits

Author SHA1 Message Date
Lee Smet
3cd1a55768 Fix job status transitions
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 13:37:42 +02:00
Lee Smet
c860553acd Stop polling when a job reached terminal status
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 12:07:26 +02:00
Lee Smet
78a776877a Fetch the result of a job more than once if needed
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:54:15 +02:00
Lee Smet
8cea17f4ec Increase supervisor hub popmessage timeout
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:43:44 +02:00
Lee Smet
66c89d2485 Format codebase
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:37:51 +02:00
Lee Smet
512c99db54 Improve jsonrpc client to properly route replies
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-08 11:37:22 +02:00
Lee Smet
25f35ea8fc Check job status in redis db as well before sending rpc call
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 19:58:52 +02:00
Lee Smet
fb34b4e2f3 Use single cached supervisorclient
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 17:09:57 +02:00
Lee Smet
2c88114d45 Remove notion of sync calls
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 13:23:48 +02:00
Lee Smet
8de2597f19 Fix loading message status from mycelium
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 12:22:54 +02:00
Lee Smet
3220f52956 Add display impl for TransportStatus
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 12:22:26 +02:00
Lee Smet
97bcb55aaa Start python runner through demo script
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-05 11:10:04 +02:00
Lee Smet
c38937f1cb Register runner when starting script
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-04 17:36:21 +02:00
Lee Smet
059d5131e7 Listen for responses of supervisors
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-04 16:24:15 +02:00
Lee Smet
c6077623b0 Use proper secret injected method for supervisor
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-04 14:46:37 +02:00
Lee Smet
de6c799635 Set runner secret
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-04 14:05:03 +02:00
Lee Smet
c4971aa794 Add full flow script example
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 20:17:12 +02:00
Lee Smet
7aa35b6d06 Fix remainder of HSET return value deconding
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 20:16:53 +02:00
Lee Smet
60946af1df Fix pushMessage parameter encoding
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 20:11:10 +02:00
Lee Smet
83990cf16a Properly encode topic in mycelium rpc
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 20:09:47 +02:00
Lee Smet
dbb9493bcb Improve code format in router
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 14:54:11 +02:00
Lee Smet
d921dca75c Fix default mycelium jsonrpc api port
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 14:50:45 +02:00
Lee Smet
4a15269442 Fix more HSET types in redis driver
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 14:46:55 +02:00
Lee Smet
43fd61d662 Remove unused imports
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-09-03 11:29:26 +02:00
Lee Smet
38709e06f3 Add script to test actor/context/job/flow create and flow dag
Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-08-29 15:43:32 +02:00
Lee Smet
08de312cd9 Fix HSET response decoding
The command internally uses (the deprecated) HMSET which just returns OK
on success instead of the amount of fields written

Signed-off-by: Lee Smet <lee.smet@hotmail.com>
2025-08-29 11:30:41 +02:00
14 changed files with 2189 additions and 386 deletions

376
scripts/jsonrpc_demo.py Normal file
View File

@@ -0,0 +1,376 @@
#!/usr/bin/env python3
"""
Demo script for HeroCoordinator JSON-RPC API.
- Creates an actor
- Verifies by loading the actor
- Creates a context with the actor as admin/reader/executor
- Creates three jobs with dependencies
- Creates a flow referencing those jobs
- Fetches and prints the flow DAG
Usage:
COORDINATOR_URL=http://127.0.0.1:9652 python3 scripts/jsonrpc_demo.py
Defaults to http://127.0.0.1:9652 if COORDINATOR_URL is not set.
"""
import os
import json
import sys
from urllib import request, error
from typing import Any, Dict, List, Tuple
JSONRPC_VERSION = "2.0"
class JsonRpcClient:
def __init__(self, url: str):
self.url = url.rstrip("/")
self._id = 0
def call(self, method: str, params: Dict[str, Any]) -> Any:
self._id += 1
payload = {
"jsonrpc": JSONRPC_VERSION,
"id": self._id,
"method": method,
"params": params,
}
data = json.dumps(payload).encode("utf-8")
req = request.Request(self.url, data=data, headers={"Content-Type": "application/json"})
try:
with request.urlopen(req) as resp:
body = resp.read()
except error.HTTPError as e:
try:
details = e.read().decode("utf-8", "ignore")
except Exception:
details = ""
raise RuntimeError(f"HTTP error {e.code}: {details}") from e
except error.URLError as e:
raise RuntimeError(f"URL error: {e.reason}") from e
try:
obj = json.loads(body.decode("utf-8"))
except Exception as e:
raise RuntimeError(f"Invalid JSON response: {body!r}") from e
# JSON-RPC single response expected
if isinstance(obj, list):
raise RuntimeError("Batch responses are not supported in this demo")
if obj.get("error"):
raise RuntimeError(f"RPC error: {json.dumps(obj['error'])}")
return obj.get("result")
def print_header(title: str):
print("\n" + "=" * 80)
print(title)
print("=" * 80)
def pretty_print(obj: Any):
print(json.dumps(obj, indent=2, sort_keys=True))
def summarize_dag(dag: Dict[str, Any]):
print_header("Flow DAG Summary")
flow_id = dag.get("flow_id")
caller_id = dag.get("caller_id")
context_id = dag.get("context_id")
print(f"flow_id={flow_id} caller_id={caller_id} context_id={context_id}")
edges: List[Tuple[int, int]] = dag.get("edges", [])
roots: List[int] = dag.get("roots", [])
leaves: List[int] = dag.get("leaves", [])
levels: List[List[int]] = dag.get("levels", [])
nodes: Dict[str, Any] = dag.get("nodes", {})
print("Edges:")
for a, b in edges:
print(f" {a} -> {b}")
print(f"Roots: {roots}")
print(f"Leaves: {leaves}")
print("Levels:")
for i, lvl in enumerate(levels):
print(f" L{i}: {lvl}")
# Show nodes and their dependencies (from JobSummary)
print("Nodes:")
for k, v in nodes.items():
depends = v.get("depends", [])
prerequisites = v.get("prerequisites", [])
stype = v.get("script_type")
print(f" Job {k}: depends={depends} prerequisites={prerequisites} script_type={stype}")
def assert_edges(edges: List[Tuple[int, int]], required: List[Tuple[int, int]]):
edge_set = {(int(a), int(b)) for a, b in edges}
missing = [e for e in required if e not in edge_set]
if missing:
raise AssertionError(f"Missing expected edges in DAG: {missing}; got={sorted(edge_set)}")
def main():
url = os.getenv("COORDINATOR_URL", "http://127.0.0.1:9652")
client = JsonRpcClient(url)
# Deterministic demo IDs; change if collisions happen
actor_id = 1001
context_id = 1 # Redis DB indices are 0-15; keep <= 15
job_a = 3001
job_b = 3002
job_c = 3003
job_d = 3004
job_e = 3005
job_f = 3006
job_g = 3007
job_h = 3008
job_i = 3009
flow_id = 4001
runner_id = 2001
print_header("actor.create")
actor = client.call("actor.create", {
"actor": {
"id": actor_id,
"pubkey": "demo-pubkey",
"address": ["127.0.0.1"]
}
})
pretty_print(actor)
print_header("actor.load")
actor_loaded = client.call("actor.load", {"id": actor_id})
pretty_print(actor_loaded)
print_header("context.create")
context = client.call("context.create", {
"context": {
"id": context_id,
"admins": [actor_id],
"readers": [actor_id],
"executors": [actor_id]
}
})
pretty_print(context)
print_header("runner.create")
runner = client.call("runner.create", {
"context_id": context_id,
"runner": {
"id": runner_id,
"pubkey": "", # leave empty to route by IP
"address": "127.0.0.1",
"topic": f"runner{runner_id}",
"script_type": "Python",
"local": True,
"secret": "demo-secret"
}
})
pretty_print(runner)
print_header("job.create - A (root)")
jobA = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_a,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('A')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": []
}
})
pretty_print(jobA)
print_header("job.create - B (root)")
jobB = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_b,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('B')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": []
}
})
pretty_print(jobB)
print_header("job.create - C (depends on A and B)")
jobC = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_c,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('C')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_a, job_b]
}
})
pretty_print(jobC)
print_header("job.create - D (depends on A)")
jobD = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_d,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('D')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_a]
}
})
pretty_print(jobD)
print_header("job.create - E (depends on B)")
jobE = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_e,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('E')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_b]
}
})
pretty_print(jobE)
print_header("job.create - F (depends on C and D)")
jobF = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_f,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('F')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_c, job_d]
}
})
pretty_print(jobF)
print_header("job.create - G (depends on C and E)")
jobG = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_g,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('G')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_c, job_e]
}
})
pretty_print(jobG)
print_header("job.create - H (leaf; depends on F and G)")
jobH = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_h,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('H')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_f, job_g]
}
})
pretty_print(jobH)
print_header("job.create - I (leaf; depends on F and G)")
jobI = client.call("job.create", {
"context_id": context_id,
"job": {
"id": job_i,
"caller_id": actor_id,
"context_id": context_id,
"script": "print('I')",
"script_type": "Python",
"timeout": 30,
"retries": 0,
"env_vars": {},
"prerequisites": [],
"depends": [job_f, job_g]
}
})
pretty_print(jobI)
print_header("flow.create")
flow = client.call("flow.create", {
"context_id": context_id,
"flow": {
"id": flow_id,
"caller_id": actor_id,
"context_id": context_id,
"jobs": [job_a, job_b, job_c, job_d, job_e, job_f, job_g, job_h, job_i],
"env_vars": {}
}
})
pretty_print(flow)
print_header("flow.dag")
dag = client.call("flow.dag", {"context_id": context_id, "id": flow_id})
summarize_dag(dag)
# Validate roots and leaves
got_roots = list(map(int, dag.get("roots", [])))
if got_roots != sorted([job_a, job_b]):
print("WARNING: Unexpected roots:", got_roots, file=sys.stderr)
got_leaves = {int(x) for x in dag.get("leaves", [])}
expected_leaves = {job_h, job_i}
if got_leaves != expected_leaves:
print("WARNING: Unexpected leaves:", got_leaves, "expected:", expected_leaves, file=sys.stderr)
# Check edges reflect the expanded DAG
expected_edges = [
(job_a, job_c), (job_b, job_c),
(job_a, job_d), (job_b, job_e),
(job_c, job_f), (job_d, job_f),
(job_c, job_g), (job_e, job_g),
(job_f, job_h), (job_g, job_h),
(job_f, job_i), (job_g, job_i),
]
try:
assert_edges(dag.get("edges", []), expected_edges)
print("DAG edges contain expected dependencies:", expected_edges)
except AssertionError as e:
print("WARNING:", e, file=sys.stderr)
if __name__ == "__main__":
try:
main()
except Exception as e:
print_header("Error")
print(str(e))
sys.exit(1)

View File

@@ -0,0 +1,502 @@
#!/usr/bin/env python3
"""
Supervisor flow demo for HeroCoordinator.
This script:
- Optionally pre-registers and starts a Python runner on the target Supervisor over Mycelium using an admin secret (--admin-secret). If the flag is not set, this step is skipped.
- Creates an actor
- Creates a context granting the actor admin/reader/executor privileges
- Registers a Runner in the context targeting a Supervisor reachable via Mycelium (by public key or IP)
- Creates simple Python jobs (text jobs) with a small dependency chain
- Creates a flow referencing those jobs
- Starts the flow and polls until it finishes (or errors)
Transport: JSON-RPC over HTTP to the Coordinator (default COORDINATOR_URL=http://127.0.0.1:9652).
Example usage:
COORDINATOR_URL=http://127.0.0.1:9652 python3 scripts/supervisor_flow_demo.py --dst-ip 2001:db8::1 [--secret your-secret]
COORDINATOR_URL=http://127.0.0.1:9652 python3 scripts/supervisor_flow_demo.py --dst-pk bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32 [--secret your-secret]
Notes:
- Exactly one of --dst-ip or --dst-pk must be provided.
- Runner.topic defaults to "supervisor.rpc" (see main.rs).
- The router auto-discovers contexts and will deliver job.run messages to the supervisor.
- Mycelium URL is read from MYCELIUM_URL (default http://127.0.0.1:8990).
- supervisor.register_runner uses static name="python" and queue="python".
"""
import argparse
import json
import base64
import os
import sys
import time
from typing import Any, Dict, List, Optional, Tuple
from urllib import request, error
JSONRPC_VERSION = "2.0"
def env_url() -> str:
return os.getenv("COORDINATOR_URL", "http://127.0.0.1:9652").rstrip("/")
def env_mycelium_url() -> str:
return os.getenv("MYCELIUM_URL", "http://127.0.0.1:8990").rstrip("/")
class JsonRpcClient:
def __init__(self, url: str):
self.url = url
self._id = 0
def call(self, method: str, params: Dict[str, Any]) -> Any:
self._id += 1
payload = {
"jsonrpc": JSONRPC_VERSION,
"id": self._id,
"method": method,
"params": params,
}
data = json.dumps(payload).encode("utf-8")
req = request.Request(self.url, data=data, headers={"Content-Type": "application/json"})
try:
with request.urlopen(req) as resp:
body = resp.read()
except error.HTTPError as e:
try:
details = e.read().decode("utf-8", "ignore")
except Exception:
details = ""
raise RuntimeError(f"HTTP error {e.code}: {details}") from e
except error.URLError as e:
raise RuntimeError(f"URL error: {e.reason}") from e
try:
obj = json.loads(body.decode("utf-8"))
except Exception as e:
raise RuntimeError(f"Invalid JSON response: {body!r}") from e
if isinstance(obj, list):
raise RuntimeError("Batch responses are not supported")
if obj.get("error"):
raise RuntimeError(f"RPC error: {json.dumps(obj['error'])}")
return obj.get("result")
def print_header(title: str):
print("\n" + "=" * 80)
print(title)
print("=" * 80)
def pretty(obj: Any):
print(json.dumps(obj, indent=2, sort_keys=True))
def mycelium_register_runner(
myc: "JsonRpcClient",
dst_pk: Optional[str],
dst_ip: Optional[str],
topic: str,
admin_secret: str,
name: str = "python",
queue: str = "python",
timeout: int = 15,
) -> Any:
"""
Send supervisor.register_runner over Mycelium using pushMessage and wait for the reply.
- myc: JsonRpcClient for the Mycelium API (MYCELIUM_URL)
- dst_pk/dst_ip: destination on the overlay; one of them must be provided
- topic: message topic (defaults to supervisor.rpc from args)
- admin_secret: supervisor admin secret to authorize the registration
- name/queue: static identifiers for the python runner on the supervisor
- timeout: seconds to wait for a reply
Returns the JSON-RPC 'result' from the supervisor or raises on error/timeout.
"""
envelope = {
"jsonrpc": JSONRPC_VERSION,
"id": 1,
"method": "register_runner",
"params": [{"secret": admin_secret, "name": name, "queue": queue}],
}
payload_b64 = base64.b64encode(json.dumps(envelope).encode("utf-8")).decode("ascii")
topic_b64 = base64.b64encode(topic.encode("utf-8")).decode("ascii")
if dst_pk:
dst = {"pk": dst_pk}
elif dst_ip:
dst = {"ip": dst_ip}
else:
raise RuntimeError("Either dst_pk or dst_ip must be provided for Mycelium destination")
params = {
"message": {"dst": dst, "topic": topic_b64, "payload": payload_b64},
}
resp = myc.call("pushMessage", params)
time.sleep(15)
# Expect an InboundMessage with a payload if a reply was received
# if isinstance(resp, dict) and "payload" in resp:
# try:
# reply = json.loads(base64.b64decode(resp["payload"]).decode("utf-8"))
# except Exception as e:
# raise RuntimeError(f"Invalid supervisor reply payload: {e}")
# if isinstance(reply, dict) and reply.get("error"):
# raise RuntimeError(f"Supervisor register_runner error: {json.dumps(reply['error'])}")
# return reply.get("result")
#
# raise RuntimeError("No reply received from supervisor for register_runner (timeout)")
def mycelium_start_runner(
myc: "JsonRpcClient",
dst_pk: Optional[str],
dst_ip: Optional[str],
topic: str,
secret: str,
actor_id: str = "python",
timeout: int = 15,
) -> Any:
"""
Send supervisor.start_runner over Mycelium using pushMessage and wait for the reply.
- actor_id is set to the static name "python" by default to start the registered python runner.
Returns the JSON-RPC 'result' or raises on error/timeout.
"""
envelope = {
"jsonrpc": JSONRPC_VERSION,
"id": 1,
"method": "start_runner",
"params": [actor_id],
}
payload_b64 = base64.b64encode(json.dumps(envelope).encode("utf-8")).decode("ascii")
topic_b64 = base64.b64encode(topic.encode("utf-8")).decode("ascii")
if dst_pk:
dst = {"pk": dst_pk}
elif dst_ip:
dst = {"ip": dst_ip}
else:
raise RuntimeError("Either dst_pk or dst_ip must be provided for Mycelium destination")
params = {
"message": {"dst": dst, "topic": topic_b64, "payload": payload_b64},
}
resp = myc.call("pushMessage", params)
time.sleep(15)
# if isinstance(resp, dict) and "payload" in resp:
# try:
# reply = json.loads(base64.b64decode(resp["payload"]).decode("utf-8"))
# except Exception as e:
# raise RuntimeError(f"Invalid supervisor reply payload (start_runner): {e}")
# if isinstance(reply, dict) and reply.get("error"):
# raise RuntimeError(f"Supervisor start_runner error: {json.dumps(reply['error'])}")
# return reply.get("result")
#
# raise RuntimeError("No reply received from supervisor for start_runner (timeout)")
def try_create_or_load(client: JsonRpcClient, create_method: str, create_params: Dict[str, Any],
load_method: str, load_params: Dict[str, Any]) -> Any:
"""Attempt a create; if it fails due to existence, try load."""
try:
return client.call(create_method, create_params)
except RuntimeError as e:
msg = str(e)
# Server maps AlreadyExists to StorageError, we don't have a structured error code here.
if "Already exists" in msg or "Storage Error" in msg or "Invalid params" in msg:
# Fall back to load
return client.call(load_method, load_params)
raise
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Create actor/context/runner/jobs/flow; start and wait until completion.")
group = p.add_mutually_exclusive_group(required=True)
group.add_argument("--dst-ip", help="Supervisor Mycelium IP address (IPv4 or IPv6)")
group.add_argument("--dst-pk", help="Supervisor public key (64-hex)")
p.add_argument("--context-id", type=int, default=2, help="Context id (Redis DB index; 0-15). Default: 2")
p.add_argument("--actor-id", type=int, default=11001, help="Actor id. Default: 11001")
p.add_argument("--runner-id", type=int, default=12001, help="Runner id. Default: 12001")
p.add_argument("--flow-id", type=int, default=13001, help="Flow id. Default: 13001")
p.add_argument("--base-job-id", type=int, default=20000, help="Base job id for first job; subsequent jobs increment. Default: 20000")
p.add_argument("--jobs", type=int, default=3, help="Number of jobs to create (>=1). Forms a simple chain. Default: 3")
p.add_argument("--timeout-secs", type=int, default=60, help="Per-job timeout seconds. Default: 60")
p.add_argument("--retries", type=int, default=0, help="Per-job retries (0-255). Default: 0")
p.add_argument(
"--script-type",
choices=["Python", "V", "Osis", "Sal"],
default="Python",
help="ScriptType for jobs/runner. Default: Python"
)
p.add_argument("--topic", default="supervisor.rpc", help="Supervisor topic. Default: supervisor.rpc")
p.add_argument("--secret", help="Optional supervisor secret used for authenticated supervisor calls")
p.add_argument("--admin-secret", help="Supervisor admin secret to pre-register a Python runner over Mycelium. If omitted, pre-registration is skipped.")
p.add_argument("--poll-interval", type=float, default=2.0, help="Flow poll interval seconds. Default: 2.0")
p.add_argument("--poll-timeout", type=int, default=600, help="Max seconds to wait for flow completion. Default: 600")
return p.parse_args()
def main():
args = parse_args()
if args.jobs < 1:
print("ERROR: --jobs must be >= 1", file=sys.stderr)
sys.exit(2)
url = env_url()
client = JsonRpcClient(url)
mycelium_url = env_mycelium_url()
mycelium_client = JsonRpcClient(mycelium_url) if getattr(args, "admin_secret", None) else None
actor_id = int(args.actor_id)
context_id = int(args.context_id)
runner_id = int(args.runner_id)
flow_id = int(args.flow_id)
base_job_id = int(args.base_job_id)
script_type = args.script_type
timeout = int(args.timeout_secs)
retries = int(args.retries)
topic = args.topic
# 1) Actor
print_header("actor.create (or load)")
actor = try_create_or_load(
client,
"actor.create",
{
"actor": {
"id": actor_id,
"pubkey": "demo-pubkey",
"address": ["127.0.0.1"],
}
},
"actor.load",
{"id": actor_id},
)
pretty(actor)
# 2) Context
print_header("context.create (or load)")
context = try_create_or_load(
client,
"context.create",
{
"context": {
"id": context_id,
"admins": [actor_id],
"readers": [actor_id],
"executors": [actor_id],
}
},
"context.load",
{"id": context_id},
)
pretty(context)
# 3) Runner in this context
# Router picks pubkey if non-empty, else IP address.
# However, RunnerCreate requires both fields; we fill both and control routing via pubkey empty/non-empty.
runner_pubkey = args.dst_pk if args.dst_pk else ""
runner_address = args.dst_ip if args.dst_ip else "127.0.0.1"
# Optional: pre-register a Python runner on the Supervisor over Mycelium using an admin secret
if getattr(args, "admin_secret", None):
print_header("supervisor.register_runner (pre-register via Mycelium)")
try:
mycelium_result = mycelium_register_runner(
mycelium_client,
args.dst_pk if args.dst_pk else None,
args.dst_ip if args.dst_ip else None,
topic,
args.admin_secret,
name="Python",
queue="Python",
timeout=15,
)
print("Supervisor register_runner ->", mycelium_result)
except Exception as e:
print(f"ERROR: Supervisor pre-registration failed: {e}", file=sys.stderr)
sys.exit(1)
print_header("supervisor.start_runner (start via Mycelium)")
try:
mycelium_result = mycelium_start_runner(
mycelium_client,
args.dst_pk if args.dst_pk else None,
args.dst_ip if args.dst_ip else None,
topic,
args.admin_secret,
actor_id="Python",
timeout=15,
)
print("Supervisor start_runner ->", mycelium_result)
except Exception as e:
print(f"ERROR: Supervisor start failed: {e}", file=sys.stderr)
sys.exit(1)
print_header("runner.create (or load)")
# runner.load requires both context_id and id
try:
runner_payload = {
"id": runner_id,
"pubkey": runner_pubkey,
"address": runner_address,
"topic": topic,
"script_type": script_type,
"local": False,
}
# Optional supervisor secret used by router for authenticated supervisor calls
if getattr(args, "secret", None):
runner_payload["secret"] = args.secret
runner = client.call("runner.create", {
"context_id": context_id,
"runner": runner_payload
})
except RuntimeError as e:
msg = str(e)
if "Already exists" in msg or "Storage Error" in msg or "Invalid params" in msg:
runner = client.call("runner.load", {"context_id": context_id, "id": runner_id})
else:
raise
pretty(runner)
# 4) Jobs
# Build a simple chain: J0 (root), J1 depends on J0, J2 depends on J1, ... up to N-1
job_ids: List[int] = []
for i in range(args.jobs):
jid = base_job_id + i
depends = [] if i == 0 else [base_job_id + (i - 1)]
job_payload = {
"id": jid,
"caller_id": actor_id,
"context_id": context_id,
"script": f"print('Job {i} running')",
"script_type": script_type,
"timeout": timeout,
"retries": retries,
"env_vars": {},
"prerequisites": [],
"depends": depends,
}
print_header(f"job.create - {jid} {'(root)' if not depends else f'(depends on {depends})'}")
try:
job = client.call("job.create", {
"context_id": context_id,
"job": job_payload
})
except RuntimeError as e:
msg = str(e)
if "Already exists" in msg or "Storage Error" in msg or "Invalid params" in msg:
job = client.call("job.load", {
"context_id": context_id,
"caller_id": actor_id,
"id": jid
})
else:
raise
pretty(job)
job_ids.append(jid)
# 5) Flow
print_header("flow.create (or load)")
try:
flow = client.call("flow.create", {
"context_id": context_id,
"flow": {
"id": flow_id,
"caller_id": actor_id,
"context_id": context_id,
"jobs": job_ids,
"env_vars": {}
}
})
except RuntimeError as e:
msg = str(e)
if "Already exists" in msg or "Storage Error" in msg or "Invalid params" in msg:
flow = client.call("flow.load", {"context_id": context_id, "id": flow_id})
else:
raise
pretty(flow)
# Optional: show DAG
try:
print_header("flow.dag")
dag = client.call("flow.dag", {"context_id": context_id, "id": flow_id})
pretty(dag)
except Exception as e:
print(f"WARN: flow.dag failed: {e}", file=sys.stderr)
# 6) Start flow (idempotent; returns bool whether scheduler started)
print_header("flow.start")
started = client.call("flow.start", {"context_id": context_id, "id": flow_id})
print(f"flow.start -> {started}")
# 7) Poll until Finished or Error (or timeout)
print_header("Polling flow.load until completion")
t0 = time.time()
status = None
last_status_print = 0.0
poll_count = 0
while True:
poll_count += 1
flow = client.call("flow.load", {"context_id": context_id, "id": flow_id})
status = flow.get("status")
now = time.time()
if now - last_status_print >= max(1.0, float(args.poll_interval)):
print(f"[{int(now - t0)}s] flow.status = {status}")
last_status_print = now
# Every 5th poll, print the current flow DAG
if (poll_count % 5) == 0:
try:
print_header("flow.dag (periodic)")
dag = client.call("flow.dag", {"context_id": context_id, "id": flow_id})
pretty(dag)
except Exception as e:
print(f"WARN: periodic flow.dag failed: {e}", file=sys.stderr)
if status in ("Finished", "Error"):
break
if (now - t0) > args.poll_timeout:
print(f"ERROR: Flow did not complete within {args.poll_timeout}s (status={status})", file=sys.stderr)
break
time.sleep(float(args.poll_interval))
# 8) Final summary: job statuses
print_header("Final job statuses")
for jid in job_ids:
try:
j = client.call("job.load", {
"context_id": context_id,
"caller_id": actor_id,
"id": jid
})
print(f"Job {jid}: status={j.get('status')} result={j.get('result')}")
except Exception as e:
print(f"Job {jid}: load failed: {e}", file=sys.stderr)
# Exit code
if status == "Finished":
print_header("Result")
print("Flow finished successfully.")
sys.exit(0)
else:
print_header("Result")
print(f"Flow ended with status={status}")
sys.exit(1)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nInterrupted.")
sys.exit(130)
except Exception as e:
print_header("Error")
print(str(e))
sys.exit(1)

View File

@@ -563,6 +563,9 @@
"local": { "local": {
"type": "boolean" "type": "boolean"
}, },
"secret": {
"type": "string"
},
"created_at": { "created_at": {
"type": "integer", "type": "integer",
"format": "int64" "format": "int64"
@@ -1001,6 +1004,9 @@
}, },
"local": { "local": {
"type": "boolean" "type": "boolean"
},
"secret": {
"type": "string"
} }
} }
}, },

View File

@@ -1,7 +1,9 @@
pub mod mycelium_client; pub mod mycelium_client;
pub mod supervisor_client; pub mod supervisor_client;
pub mod supervisor_hub;
pub mod types; pub mod types;
pub use mycelium_client::{MyceliumClient, MyceliumClientError}; pub use mycelium_client::{MyceliumClient, MyceliumClientError};
pub use supervisor_client::{SupervisorClient, SupervisorClientError}; pub use supervisor_client::{SupervisorClient, SupervisorClientError};
pub use supervisor_hub::SupervisorHub;
pub use types::Destination; pub use types::Destination;

View File

@@ -3,6 +3,8 @@ use std::sync::atomic::{AtomicU64, Ordering};
use reqwest::Client as HttpClient; use reqwest::Client as HttpClient;
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::{Value, json}; use serde_json::{Value, json};
use thiserror::Error; use thiserror::Error;
@@ -53,6 +55,8 @@ impl MyceliumClient {
"method": method, "method": method,
"params": [ params ] "params": [ params ]
}); });
tracing::info!(%req, "jsonrpc");
let resp = self.http.post(&self.base_url).json(&req).send().await?; let resp = self.http.post(&self.base_url).json(&req).send().await?;
let status = resp.status(); let status = resp.status();
let body: Value = resp.json().await?; let body: Value = resp.json().await?;
@@ -82,13 +86,13 @@ impl MyceliumClient {
&self, &self,
id_hex: &str, id_hex: &str,
) -> Result<TransportStatus, MyceliumClientError> { ) -> Result<TransportStatus, MyceliumClientError> {
let params = json!({ "id": id_hex }); let params = json!(id_hex);
let body = self.jsonrpc("messageStatus", params).await?; let body = self.jsonrpc("getMessageInfo", params).await?;
let result = body.get("result").ok_or_else(|| { let result = body.get("result").ok_or_else(|| {
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}")) MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
})?; })?;
// Accept both { status: "..."} and bare "..." // Accept both { state: "..."} and bare "..."
let status_str = if let Some(s) = result.get("status").and_then(|v| v.as_str()) { let status_str = if let Some(s) = result.get("state").and_then(|v| v.as_str()) {
s.to_string() s.to_string()
} else if let Some(s) = result.as_str() { } else if let Some(s) = result.as_str() {
s.to_string() s.to_string()
@@ -97,18 +101,19 @@ impl MyceliumClient {
"unexpected result shape: {result}" "unexpected result shape: {result}"
))); )));
}; };
Self::map_status(&status_str).ok_or_else(|| { let status = Self::map_status(&status_str).ok_or_else(|| {
MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}")) MyceliumClientError::InvalidResponse(format!("unknown status: {status_str}"))
}) });
tracing::info!(%id_hex, status = %status.as_ref().unwrap(), "queried messages status");
status
} }
fn map_status(s: &str) -> Option<TransportStatus> { fn map_status(s: &str) -> Option<TransportStatus> {
match s { match s {
"queued" => Some(TransportStatus::Queued), "pending" => Some(TransportStatus::Queued),
"sent" => Some(TransportStatus::Sent), "received" => Some(TransportStatus::Delivered),
"delivered" => Some(TransportStatus::Delivered),
"read" => Some(TransportStatus::Read), "read" => Some(TransportStatus::Read),
"failed" => Some(TransportStatus::Failed), "aborted" => Some(TransportStatus::Failed),
_ => None, _ => None,
} }
} }
@@ -125,16 +130,15 @@ impl MyceliumClient {
Destination::Ip(ip) => json!({ "ip": ip.to_string() }), Destination::Ip(ip) => json!({ "ip": ip.to_string() }),
Destination::Pk(pk) => json!({ "pk": pk }), Destination::Pk(pk) => json!({ "pk": pk }),
}; };
let message = json!({ let mut message = json!({
"dst": dst_v, "dst": dst_v,
"topic": topic, "topic": topic,
"payload": payload_b64, "payload": payload_b64,
}); });
let mut params = json!({ "message": message });
if let Some(rt) = reply_timeout { if let Some(rt) = reply_timeout {
params["reply_timeout"] = json!(rt); message["reply_timeout"] = json!(rt);
} }
params message
} }
/// pushMessage: send a message with dst/topic/payload. Optional reply_timeout for sync replies. /// pushMessage: send a message with dst/topic/payload. Optional reply_timeout for sync replies.
@@ -160,6 +164,83 @@ impl MyceliumClient {
.and_then(|v| v.as_str()) .and_then(|v| v.as_str())
.map(|s| s.to_string()) .map(|s| s.to_string())
} }
/// popMessage: retrieve an inbound message if available (optionally filtered by topic).
/// - peek: if true, do not remove the message from the queue
/// - timeout_secs: seconds to wait for a message (0 returns immediately)
/// - topic_plain: optional plain-text topic which will be base64-encoded per Mycelium spec
/// Returns:
/// - Ok(Some(result_json)) on success, where result_json matches InboundMessage schema
/// - Ok(None) when there is no message ready (Mycelium returns error code 204)
pub async fn pop_message(
&self,
peek: Option<bool>,
timeout_secs: Option<u64>,
topic_plain: Option<&str>,
) -> Result<Option<Value>, MyceliumClientError> {
// Build params array
let mut params_array = vec![];
if let Some(p) = peek {
params_array.push(serde_json::Value::Bool(p));
} else {
params_array.push(serde_json::Value::Null)
}
if let Some(t) = timeout_secs {
params_array.push(serde_json::Value::Number(t.into()));
} else {
params_array.push(serde_json::Value::Null)
}
if let Some(tp) = topic_plain {
let topic_b64 = BASE64_STANDARD.encode(tp.as_bytes());
params_array.push(serde_json::Value::String(topic_b64));
} else {
params_array.push(serde_json::Value::Null)
}
let req = json!({
"jsonrpc": "2.0",
"id": self.next_id(),
"method": "popMessage",
"params": serde_json::Value::Array(params_array),
});
tracing::info!(%req, "calling popMessage");
let resp = self.http.post(&self.base_url).json(&req).send().await?;
let status = resp.status();
let body: Value = resp.json().await?;
// Handle JSON-RPC error envelope specially for code 204 (no message ready)
if let Some(err) = body.get("error") {
let code = err.get("code").and_then(|v| v.as_i64()).unwrap_or(0);
let msg = err
.get("message")
.and_then(|v| v.as_str())
.unwrap_or("unknown error");
if code == 204 {
// No message ready
return Ok(None);
}
if code == 408 {
// Align with other transport timeout mapping
return Err(MyceliumClientError::TransportTimeout);
}
return Err(MyceliumClientError::RpcError(format!(
"code={code} msg={msg}"
)));
}
if !status.is_success() {
return Err(MyceliumClientError::RpcError(format!(
"HTTP {status}, body {body}"
)));
}
let result = body.get("result").ok_or_else(|| {
MyceliumClientError::InvalidResponse(format!("missing result in response: {body}"))
})?;
Ok(Some(result.clone()))
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,20 +1,20 @@
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
use base64::Engine; use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::{Value, json}; use serde_json::{Value, json};
use thiserror::Error; use thiserror::Error;
use tokio::time::timeout;
use crate::clients::{Destination, MyceliumClient, MyceliumClientError}; use crate::clients::{Destination, MyceliumClient, MyceliumClientError, SupervisorHub};
#[derive(Clone)] #[derive(Clone)]
pub struct SupervisorClient { pub struct SupervisorClient {
mycelium: Arc<MyceliumClient>, // Delegated Mycelium transport hub: Arc<SupervisorHub>, // Global hub with background pop loop and shared id generator
destination: Destination, // ip or pk destination: Destination, // ip or pk
topic: String, // e.g. "supervisor.rpc" secret: Option<String>, // optional, required by several supervisor methods
secret: Option<String>, // optional, required by several supervisor methods
id_counter: Arc<AtomicU64>, // JSON-RPC id generator (for inner supervisor requests)
} }
#[derive(Debug, Error)] #[derive(Debug, Error)]
@@ -46,24 +46,22 @@ impl From<MyceliumClientError> for SupervisorClientError {
} }
impl SupervisorClient { impl SupervisorClient {
/// Preferred constructor: provide a shared Mycelium client. /// Preferred constructor using a shared SupervisorHub (single global listener).
pub fn new_with_client( pub fn new_with_hub(
mycelium: Arc<MyceliumClient>, hub: Arc<SupervisorHub>,
destination: Destination, destination: Destination,
topic: impl Into<String>,
secret: Option<String>, secret: Option<String>,
) -> Self { ) -> Self {
Self { Self {
mycelium, hub,
destination, destination,
topic: topic.into(),
secret, secret,
id_counter: Arc::new(AtomicU64::new(1)),
} }
} }
/// Backward-compatible constructor that builds a Mycelium client from base_url. /// Backward-compatible constructor that builds a new Hub from base_url/topic.
/// base_url defaults to Mycelium spec "http://127.0.0.1:8990" if empty. /// NOTE: This spawns a background popMessage listener for the given topic.
/// Prefer `new_with_hub` so the process has a single global hub.
pub fn new( pub fn new(
base_url: impl Into<String>, base_url: impl Into<String>,
destination: Destination, destination: Destination,
@@ -78,8 +76,16 @@ impl SupervisorClient {
Ok(Self::new_with_client(mycelium, destination, topic, secret)) Ok(Self::new_with_client(mycelium, destination, topic, secret))
} }
fn next_id(&self) -> u64 { /// Backward-compatible constructor that reuses an existing Mycelium client.
self.id_counter.fetch_add(1, Ordering::Relaxed) /// NOTE: This creates a new hub and its own background listener. Prefer `new_with_hub`.
pub fn new_with_client(
mycelium: Arc<MyceliumClient>,
destination: Destination,
topic: impl Into<String>,
secret: Option<String>,
) -> Self {
let hub = SupervisorHub::new_with_client(mycelium, topic);
Self::new_with_hub(hub, destination, secret)
} }
/// Internal helper used by tests to inspect dst JSON shape. /// Internal helper used by tests to inspect dst JSON shape.
@@ -93,7 +99,17 @@ impl SupervisorClient {
fn build_supervisor_payload(&self, method: &str, params: Value) -> Value { fn build_supervisor_payload(&self, method: &str, params: Value) -> Value {
json!({ json!({
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": self.next_id(), "id": self.hub.next_id(),
"method": method,
"params": params,
})
}
/// Build a supervisor JSON-RPC payload but force a specific id (used for correlation).
fn build_supervisor_payload_with_id(&self, method: &str, params: Value, id: u64) -> Value {
json!({
"jsonrpc": "2.0",
"id": id,
"method": method, "method": method,
"params": params, "params": params,
}) })
@@ -104,6 +120,10 @@ impl SupervisorClient {
Ok(BASE64_STANDARD.encode(s.as_bytes())) Ok(BASE64_STANDARD.encode(s.as_bytes()))
} }
fn encode_topic(topic: &[u8]) -> String {
BASE64_STANDARD.encode(topic)
}
fn extract_message_id_from_result(result: &Value) -> Option<String> { fn extract_message_id_from_result(result: &Value) -> Option<String> {
// Two possibilities per Mycelium spec oneOf: // Two possibilities per Mycelium spec oneOf:
// - PushMessageResponseId: { "id": "0123456789abcdef" } // - PushMessageResponseId: { "id": "0123456789abcdef" }
@@ -114,91 +134,6 @@ impl SupervisorClient {
.map(|s| s.to_string()) .map(|s| s.to_string())
} }
/// Generic call: build supervisor JSON-RPC message, send via Mycelium pushMessage, return outbound message id (hex).
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
let inner = self.build_supervisor_payload(method, params);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.mycelium
.push_message(&self.destination, &self.topic, &payload_b64, None)
.await?;
if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
return Ok(id);
}
// Some servers might return the oneOf wrapped, handle len==1 array defensively (not in spec but resilient)
if let Some(arr) = result.as_array()
&& arr.len() == 1
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
{
return Ok(id);
}
Err(SupervisorClientError::InvalidResponse(format!(
"result did not contain message id: {result}"
)))
}
/// Synchronous variant: wait for a JSON-RPC reply via Mycelium reply_timeout, and return the inner JSON-RPC "result".
/// If the supervisor returns an error object, map to RpcError.
pub async fn call_sync(
&self,
method: &str,
params: Value,
reply_timeout_secs: u64,
) -> Result<Value, SupervisorClientError> {
let inner = self.build_supervisor_payload(method, params);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.mycelium
.push_message(
&self.destination,
&self.topic,
&payload_b64,
Some(reply_timeout_secs),
)
.await?;
// Expect an InboundMessage-like with a base64 payload containing the supervisor JSON-RPC response
let payload_field = if let Some(p) = result.get("payload").and_then(|v| v.as_str()) {
p.to_string()
} else if let Some(arr) = result.as_array() {
// Defensive: handle single-element array shape
if let Some(one) = arr.get(0) {
one.get("payload")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.ok_or_else(|| {
SupervisorClientError::InvalidResponse(format!(
"missing payload in result: {result}"
))
})?
} else {
return Err(SupervisorClientError::TransportTimeout);
}
} else {
// No payload => no reply received within timeout (Mycelium would have returned just an id)
return Err(SupervisorClientError::TransportTimeout);
};
let raw = BASE64_STANDARD
.decode(payload_field.as_bytes())
.map_err(|e| {
SupervisorClientError::InvalidResponse(format!("invalid base64 payload: {e}"))
})?;
let rpc_resp: Value = serde_json::from_slice(&raw)?;
if let Some(err) = rpc_resp.get("error") {
return Err(SupervisorClientError::RpcError(err.to_string()));
}
let res = rpc_resp.get("result").ok_or_else(|| {
SupervisorClientError::InvalidResponse(format!(
"missing result in supervisor reply: {rpc_resp}"
))
})?;
Ok(res.clone())
}
fn need_secret(&self) -> Result<&str, SupervisorClientError> { fn need_secret(&self) -> Result<&str, SupervisorClientError> {
self.secret self.secret
.as_deref() .as_deref()
@@ -206,13 +141,247 @@ impl SupervisorClient {
} }
// ----------------------------- // -----------------------------
// Typed wrappers for Supervisor API // Core: request-reply call via Hub with default 10s timeout
// Asynchronous-only: returns outbound message id // -----------------------------
/// Send a supervisor JSON-RPC request and await its reply via the Hub.
/// Returns (outbound_message_id, reply_envelope_json).
pub async fn call_with_reply_timeout(
&self,
method: &str,
params: Value,
timeout_secs: u64,
) -> Result<(String, Value), SupervisorClientError> {
let inner_id = self.hub.next_id();
// Register waiter before sending to avoid race
let rx = self.hub.register_waiter(inner_id).await;
let inner = self.build_supervisor_payload_with_id(method, params, inner_id);
let payload_b64 = Self::encode_payload(&inner)?;
let result = self
.hub
.mycelium()
.push_message(
&self.destination,
&Self::encode_topic(self.hub.topic().as_bytes()),
&payload_b64,
None,
)
.await?;
let out_id = if let Some(id) = MyceliumClient::extract_message_id_from_result(&result) {
id
} else if let Some(arr) = result.as_array()
&& arr.len() == 1
&& let Some(id) = MyceliumClient::extract_message_id_from_result(&arr[0])
{
id
} else {
// Clean pending entry to avoid leak
let _ = self.hub.remove_waiter(inner_id).await;
return Err(SupervisorClientError::InvalidResponse(format!(
"result did not contain message id: {result}"
)));
};
let d = Duration::from_secs(timeout_secs);
match timeout(d, rx).await {
Ok(Ok(reply)) => Ok((out_id, reply)),
Ok(Err(_canceled)) => Err(SupervisorClientError::InvalidResponse(
"oneshot canceled before receiving reply".into(),
)),
Err(_elapsed) => {
// Cleanup on timeout
let _ = self.hub.remove_waiter(inner_id).await;
Err(SupervisorClientError::TransportTimeout)
}
}
}
/// Send and await with default 10s timeout.
pub async fn call_with_reply(
&self,
method: &str,
params: Value,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply_timeout(method, params, 60).await
}
/// Back-compat: Send and await a reply but return only the outbound id (discard reply).
/// This keeps existing call sites working while the system migrates to reply-aware paths.
pub async fn call(&self, method: &str, params: Value) -> Result<String, SupervisorClientError> {
let (out_id, _reply) = self.call_with_reply(method, params).await?;
Ok(out_id)
}
// -----------------------------
// Typed wrappers for Supervisor API (await replies)
// ----------------------------- // -----------------------------
// Runners // Runners
pub async fn list_runners_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("list_runners", json!([])).await
}
pub async fn register_runner_wait(
&self,
name: impl Into<String>,
queue: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"name": name.into(),
"queue": queue.into()
}]);
self.call_with_reply("register_runner", params).await
}
pub async fn remove_runner_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("remove_runner", json!([actor_id.into()]))
.await
}
pub async fn start_runner_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("start_runner", json!([actor_id.into()]))
.await
}
pub async fn stop_runner_wait(
&self,
actor_id: impl Into<String>,
force: bool,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("stop_runner", json!([actor_id.into(), force]))
.await
}
pub async fn get_runner_status_wait(
&self,
actor_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_runner_status", json!([actor_id.into()]))
.await
}
pub async fn get_all_runner_status_wait(
&self,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_all_runner_status", json!([]))
.await
}
pub async fn start_all_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("start_all", json!([])).await
}
pub async fn stop_all_wait(
&self,
force: bool,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("stop_all", json!([force])).await
}
pub async fn get_all_status_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("get_all_status", json!([])).await
}
// Jobs (await)
pub async fn jobs_create_wait(
&self,
job: Value,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call_with_reply("jobs.create", params).await
}
pub async fn jobs_list_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("jobs.list", json!([])).await
}
pub async fn job_run_wait(&self, job: Value) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job": job
}]);
self.call_with_reply("job.run", params).await
}
pub async fn job_start_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.start", params).await
}
pub async fn job_status_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("job.status", json!([job_id.into()]))
.await
}
pub async fn job_result_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("job.result", json!([job_id.into()]))
.await
}
pub async fn job_stop_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.stop", params).await
}
pub async fn job_delete_wait(
&self,
job_id: impl Into<String>,
) -> Result<(String, Value), SupervisorClientError> {
let secret = self.need_secret()?;
let params = json!([{
"secret": secret,
"job_id": job_id.into()
}]);
self.call_with_reply("job.delete", params).await
}
pub async fn rpc_discover_wait(&self) -> Result<(String, Value), SupervisorClientError> {
self.call_with_reply("rpc.discover", json!([])).await
}
// -----------------------------
// Backward-compatible variants returning only outbound id (discarding reply)
// -----------------------------
pub async fn list_runners(&self) -> Result<String, SupervisorClientError> { pub async fn list_runners(&self) -> Result<String, SupervisorClientError> {
self.call("list_runners", json!([])).await let (id, _) = self.list_runners_wait().await?;
Ok(id)
} }
pub async fn register_runner( pub async fn register_runner(
@@ -220,27 +389,24 @@ impl SupervisorClient {
name: impl Into<String>, name: impl Into<String>,
queue: impl Into<String>, queue: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.register_runner_wait(name, queue).await?;
let params = json!([{ Ok(id)
"secret": secret,
"name": name.into(),
"queue": queue.into()
}]);
self.call("register_runner", params).await
} }
pub async fn remove_runner( pub async fn remove_runner(
&self, &self,
actor_id: impl Into<String>, actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("remove_runner", json!([actor_id.into()])).await let (id, _) = self.remove_runner_wait(actor_id).await?;
Ok(id)
} }
pub async fn start_runner( pub async fn start_runner(
&self, &self,
actor_id: impl Into<String>, actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("start_runner", json!([actor_id.into()])).await let (id, _) = self.start_runner_wait(actor_id).await?;
Ok(id)
} }
pub async fn stop_runner( pub async fn stop_runner(
@@ -248,171 +414,96 @@ impl SupervisorClient {
actor_id: impl Into<String>, actor_id: impl Into<String>,
force: bool, force: bool,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("stop_runner", json!([actor_id.into(), force])) let (id, _) = self.stop_runner_wait(actor_id, force).await?;
.await Ok(id)
} }
pub async fn get_runner_status( pub async fn get_runner_status(
&self, &self,
actor_id: impl Into<String>, actor_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("get_runner_status", json!([actor_id.into()])) let (id, _) = self.get_runner_status_wait(actor_id).await?;
.await Ok(id)
} }
pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> { pub async fn get_all_runner_status(&self) -> Result<String, SupervisorClientError> {
self.call("get_all_runner_status", json!([])).await let (id, _) = self.get_all_runner_status_wait().await?;
Ok(id)
} }
pub async fn start_all(&self) -> Result<String, SupervisorClientError> { pub async fn start_all(&self) -> Result<String, SupervisorClientError> {
self.call("start_all", json!([])).await let (id, _) = self.start_all_wait().await?;
Ok(id)
} }
pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> { pub async fn stop_all(&self, force: bool) -> Result<String, SupervisorClientError> {
self.call("stop_all", json!([force])).await let (id, _) = self.stop_all_wait(force).await?;
Ok(id)
} }
pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> { pub async fn get_all_status(&self) -> Result<String, SupervisorClientError> {
self.call("get_all_status", json!([])).await let (id, _) = self.get_all_status_wait().await?;
Ok(id)
} }
// Jobs
pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> { pub async fn jobs_create(&self, job: Value) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.jobs_create_wait(job).await?;
let params = json!([{ Ok(id)
"secret": secret,
"job": job
}]);
self.call("jobs.create", params).await
} }
pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> { pub async fn jobs_list(&self) -> Result<String, SupervisorClientError> {
self.call("jobs.list", json!([])).await let (id, _) = self.jobs_list_wait().await?;
Ok(id)
} }
pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> { pub async fn job_run(&self, job: Value) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.job_run_wait(job).await?;
let params = json!([{ Ok(id)
"secret": secret,
"job": job
}]);
self.call("job.run", params).await
} }
pub async fn job_start( pub async fn job_start(
&self, &self,
job_id: impl Into<String>, job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.job_start_wait(job_id).await?;
let params = json!([{ Ok(id)
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.start", params).await
} }
pub async fn job_status( pub async fn job_status(
&self, &self,
job_id: impl Into<String>, job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("job.status", json!([job_id.into()])).await let (id, _) = self.job_status_wait(job_id).await?;
} Ok(id)
/// Synchronous job.status: waits for the supervisor to reply and returns the status string.
/// The supervisor result may be an object with { status: "..." } or a bare string.
pub async fn job_status_sync(
&self,
job_id: impl Into<String>,
reply_timeout_secs: u64,
) -> Result<String, SupervisorClientError> {
let res = self
.call_sync("job.status", json!([job_id.into()]), reply_timeout_secs)
.await?;
let status = if let Some(s) = res.get("status").and_then(|v| v.as_str()) {
s.to_string()
} else if let Some(s) = res.as_str() {
s.to_string()
} else {
return Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.status result shape: {res}"
)));
};
Ok(status)
} }
pub async fn job_result( pub async fn job_result(
&self, &self,
job_id: impl Into<String>, job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
self.call("job.result", json!([job_id.into()])).await let (id, _) = self.job_result_wait(job_id).await?;
} Ok(id)
/// Synchronous job.result: waits for the supervisor to reply and returns a map
/// containing exactly one of:
/// - {"success": "..."} on success
/// - {"error": "..."} on error reported by the runner
/// Some servers may return a bare string; we treat that as {"success": "<string>"}.
pub async fn job_result_sync(
&self,
job_id: impl Into<String>,
reply_timeout_secs: u64,
) -> Result<std::collections::HashMap<String, String>, SupervisorClientError> {
let res = self
.call_sync("job.result", json!([job_id.into()]), reply_timeout_secs)
.await?;
use std::collections::HashMap;
let mut out: HashMap<String, String> = HashMap::new();
if let Some(obj) = res.as_object() {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
out.insert("success".to_string(), s.to_string());
return Ok(out);
}
if let Some(s) = obj.get("error").and_then(|v| v.as_str()) {
out.insert("error".to_string(), s.to_string());
return Ok(out);
}
return Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.result result shape: {res}"
)));
} else if let Some(s) = res.as_str() {
out.insert("success".to_string(), s.to_string());
return Ok(out);
}
Err(SupervisorClientError::InvalidResponse(format!(
"unexpected job.result result shape: {res}"
)))
} }
pub async fn job_stop( pub async fn job_stop(
&self, &self,
job_id: impl Into<String>, job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.job_stop_wait(job_id).await?;
let params = json!([{ Ok(id)
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.stop", params).await
} }
pub async fn job_delete( pub async fn job_delete(
&self, &self,
job_id: impl Into<String>, job_id: impl Into<String>,
) -> Result<String, SupervisorClientError> { ) -> Result<String, SupervisorClientError> {
let secret = self.need_secret()?; let (id, _) = self.job_delete_wait(job_id).await?;
let params = json!([{ Ok(id)
"secret": secret,
"job_id": job_id.into()
}]);
self.call("job.delete", params).await
} }
// Discovery
pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> { pub async fn rpc_discover(&self) -> Result<String, SupervisorClientError> {
self.call("rpc.discover", json!([])).await let (id, _) = self.rpc_discover_wait().await?;
Ok(id)
} }
} }
@@ -425,27 +516,27 @@ mod tests {
use std::net::IpAddr; use std::net::IpAddr;
fn mk_client() -> SupervisorClient { fn mk_client() -> SupervisorClient {
// Uses the legacy constructor but will not issue real network calls in these tests. // Build a hub but it won't issue real network calls in these serializer-only tests.
SupervisorClient::new( let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
"http://127.0.0.1:8990", let hub = SupervisorHub::new_with_client(mycelium, "supervisor.rpc");
SupervisorClient::new_with_hub(
hub,
Destination::Pk( Destination::Pk(
"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(), "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32".to_string(),
), ),
"supervisor.rpc",
Some("secret".to_string()), Some("secret".to_string()),
) )
.unwrap()
} }
#[test] #[test]
fn builds_dst_ip_and_pk() { fn builds_dst_ip_and_pk() {
let c_ip = SupervisorClient::new( let mycelium = Arc::new(MyceliumClient::new("http://127.0.0.1:8990").unwrap());
"http://127.0.0.1:8990", let hub_ip = SupervisorHub::new_with_client(mycelium.clone(), "supervisor.rpc");
let c_ip = SupervisorClient::new_with_hub(
hub_ip,
Destination::Ip("2001:db8::1".parse().unwrap()), Destination::Ip("2001:db8::1".parse().unwrap()),
"supervisor.rpc",
None, None,
) );
.unwrap();
let v_ip = c_ip.build_dst(); let v_ip = c_ip.build_dst();
assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1"); assert_eq!(v_ip.get("ip").unwrap().as_str().unwrap(), "2001:db8::1");

View File

@@ -0,0 +1,143 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::Value;
use tokio::sync::{Mutex, oneshot};
use crate::clients::mycelium_client::MyceliumClient;
/// Global hub that:
/// - Owns a single MyceliumClient
/// - Spawns a background popMessage loop filtered by topic
/// - Correlates supervisor JSON-RPC replies by inner id to waiting callers via oneshot channels
#[derive(Clone)]
pub struct SupervisorHub {
mycelium: Arc<MyceliumClient>,
topic: String,
pending: Arc<Mutex<HashMap<u64, oneshot::Sender<Value>>>>,
id_counter: Arc<AtomicU64>,
}
impl SupervisorHub {
/// Create a new hub and start the background popMessage task.
/// - base_url: Mycelium JSON-RPC endpoint, e.g. "http://127.0.0.1:8990"
/// - topic: plain-text topic (e.g., "supervisor.rpc")
pub fn new(
base_url: impl Into<String>,
topic: impl Into<String>,
) -> Result<Arc<Self>, crate::clients::MyceliumClientError> {
let myc = Arc::new(MyceliumClient::new(base_url)?);
Ok(Self::new_with_client(myc, topic))
}
/// Variant that reuses an existing Mycelium client.
pub fn new_with_client(mycelium: Arc<MyceliumClient>, topic: impl Into<String>) -> Arc<Self> {
let hub = Arc::new(Self {
mycelium,
topic: topic.into(),
pending: Arc::new(Mutex::new(HashMap::new())),
id_counter: Arc::new(AtomicU64::new(1)),
});
Self::spawn_pop_loop(hub.clone());
hub
}
fn spawn_pop_loop(hub: Arc<Self>) {
tokio::spawn(async move {
loop {
match hub.mycelium.pop_message(Some(false), Some(20), None).await {
Ok(Some(inb)) => {
// Extract and decode payload
let Some(payload_b64) = inb.get("payload").and_then(|v| v.as_str()) else {
// Not a payload-bearing message; ignore
continue;
};
let Ok(raw) = BASE64_STANDARD.decode(payload_b64.as_bytes()) else {
tracing::warn!(target: "supervisor_hub", "Failed to decode inbound payload base64");
continue;
};
let Ok(rpc): Result<Value, _> = serde_json::from_slice(&raw) else {
tracing::warn!(target: "supervisor_hub", "Failed to parse inbound payload JSON");
continue;
};
// Extract inner JSON-RPC id
let inner_id_u64 = match rpc.get("id") {
Some(Value::Number(n)) => n.as_u64(),
Some(Value::String(s)) => s.parse::<u64>().ok(),
_ => None,
};
if let Some(inner_id) = inner_id_u64 {
// Try to deliver to a pending waiter
let sender_opt = {
let mut guard = hub.pending.lock().await;
guard.remove(&inner_id)
};
if let Some(tx) = sender_opt {
let _ = tx.send(rpc);
} else {
tracing::warn!(
target: "supervisor_hub",
inner_id,
payload = %String::from_utf8_lossy(&raw),
"Unmatched supervisor reply; no waiter registered"
);
}
} else {
tracing::warn!(target: "supervisor_hub", "Inbound supervisor reply missing id; dropping");
}
}
Ok(None) => {
// No message; continue polling
continue;
}
Err(e) => {
tracing::warn!(target: "supervisor_hub", error = %e, "popMessage error; backing off");
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
}
}
});
}
/// Allocate a new inner supervisor JSON-RPC id.
pub fn next_id(&self) -> u64 {
self.id_counter.fetch_add(1, Ordering::Relaxed)
}
/// Register a oneshot sender for the given inner id and return the receiver side.
pub async fn register_waiter(&self, inner_id: u64) -> oneshot::Receiver<Value> {
let (tx, rx) = oneshot::channel();
let mut guard = self.pending.lock().await;
guard.insert(inner_id, tx);
rx
}
/// Remove a pending waiter for a given id (used to cleanup on timeout).
pub async fn remove_waiter(&self, inner_id: u64) -> Option<oneshot::Sender<Value>> {
let mut guard = self.pending.lock().await;
guard.remove(&inner_id)
}
/// Access to underlying Mycelium client (for pushMessage).
pub fn mycelium(&self) -> Arc<MyceliumClient> {
self.mycelium.clone()
}
/// Access configured topic.
pub fn topic(&self) -> &str {
&self.topic
}
}
impl std::fmt::Debug for SupervisorHub {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SupervisorHub")
.field("topic", &self.topic)
.finish()
}
}

View File

@@ -2,8 +2,8 @@ use clap::Parser;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
use tracing::{error, info, warn}; use tracing::{error, info};
use tracing_subscriber::{EnvFilter, fmt}; use tracing_subscriber::EnvFilter;
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
#[command( #[command(
name = "herocoordinator", name = "herocoordinator",
@@ -25,8 +25,8 @@ struct Cli {
long = "mycelium-port", long = "mycelium-port",
short = 'p', short = 'p',
env = "MYCELIUM_PORT", env = "MYCELIUM_PORT",
default_value_t = 9651u16, default_value_t = 8990u16,
help = "Port for Mycelium JSON-RPC (default: 9651)" help = "Port for Mycelium JSON-RPC (default: 8990)"
)] )]
mycelium_port: u16, mycelium_port: u16,
@@ -99,17 +99,24 @@ async fn main() {
// Shared application state // Shared application state
let state = Arc::new(herocoordinator::rpc::AppState::new(service)); let state = Arc::new(herocoordinator::rpc::AppState::new(service));
// Start router workers (auto-discovered contexts) // Start router workers (auto-discovered contexts) using a single global SupervisorHub (no separate inbound listener)
{ {
let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port); let base_url = format!("http://{}:{}", cli.mycelium_ip, cli.mycelium_port);
let hub = herocoordinator::clients::SupervisorHub::new(
base_url.clone(),
"supervisor.rpc".to_string(),
)
.expect("Failed to initialize SupervisorHub");
let cfg = herocoordinator::router::RouterConfig { let cfg = herocoordinator::router::RouterConfig {
context_ids: Vec::new(), // ignored by start_router_auto context_ids: Vec::new(), // ignored by start_router_auto
concurrency: 32, concurrency: 32,
base_url, base_url,
topic: "supervisor.rpc".to_string(), topic: "supervisor.rpc".to_string(),
sup_hub: hub.clone(),
transport_poll_interval_secs: 2, transport_poll_interval_secs: 2,
transport_poll_timeout_secs: 300, transport_poll_timeout_secs: 300,
}; };
// Per-context outbound delivery loops (replies handled by SupervisorHub)
let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg); let _auto_handle = herocoordinator::router::start_router_auto(service_for_router, cfg);
} }

View File

@@ -59,6 +59,18 @@ pub enum TransportStatus {
Failed, Failed,
} }
impl std::fmt::Display for TransportStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransportStatus::Queued => f.write_str("queued"),
TransportStatus::Sent => f.write_str("sent"),
TransportStatus::Delivered => f.write_str("delivered"),
TransportStatus::Read => f.write_str("read"),
TransportStatus::Failed => f.write_str("failed"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MessageFormatType { pub enum MessageFormatType {
Html, Html,

View File

@@ -18,6 +18,8 @@ pub struct Runner {
pub script_type: ScriptType, pub script_type: ScriptType,
/// If this is true, the runner also listens on a local redis queue /// If this is true, the runner also listens on a local redis queue
pub local: bool, pub local: bool,
/// Optional secret used for authenticated supervisor calls (if required)
pub secret: Option<String>,
pub created_at: Timestamp, pub created_at: Timestamp,
pub updated_at: Timestamp, pub updated_at: Timestamp,
} }

View File

@@ -1,26 +1,111 @@
use std::{collections::HashSet, sync::Arc}; use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use base64::Engine;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use serde_json::{Value, json}; use serde_json::{Value, json};
use tokio::sync::Semaphore; use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use tokio::sync::{Mutex, Semaphore};
use crate::{ use crate::{
clients::{Destination, MyceliumClient, SupervisorClient}, clients::{Destination, MyceliumClient, SupervisorClient, SupervisorHub},
models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus}, models::{Job, JobStatus, Message, MessageStatus, ScriptType, TransportStatus},
service::AppService, service::AppService,
}; };
use tracing::{error, info, warn}; use tracing::{error, info};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RouterConfig { pub struct RouterConfig {
pub context_ids: Vec<u32>, pub context_ids: Vec<u32>,
pub concurrency: usize, pub concurrency: usize,
pub base_url: String, // e.g. http://127.0.0.1:8990 pub base_url: String, // e.g. http://127.0.0.1:8990
pub topic: String, // e.g. "supervisor.rpc" pub topic: String, // e.g. "supervisor.rpc"
pub sup_hub: Arc<SupervisorHub>, // global supervisor hub for replies
// Transport status polling configuration // Transport status polling configuration
pub transport_poll_interval_secs: u64, // e.g. 2 pub transport_poll_interval_secs: u64, // e.g. 2
pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes) pub transport_poll_timeout_secs: u64, // e.g. 300 (5 minutes)
} }
/*
SupervisorClient reuse cache (Router-local):
Rationale:
- SupervisorClient maintains an internal JSON-RPC id_counter per instance.
- Rebuilding a client for each message resets this counter, causing inner JSON-RPC ids to restart at 1.
- We reuse one SupervisorClient per (destination, topic, secret) to preserve monotonically increasing ids.
Scope:
- Cache is per Router loop (and a separate one for the inbound listener).
- If cross-loop/process reuse becomes necessary later, promote to a process-global cache.
Keying:
- Key: destination + topic + secret-presence (secret content hashed; not stored in plaintext).
Concurrency:
- tokio::Mutex protects a HashMap<String, Arc<SupervisorClient>>.
- Values are Arc so call sites clone cheaply and share the same id_counter.
*/
#[derive(Clone)]
struct SupervisorClientCache {
map: Arc<Mutex<HashMap<String, Arc<SupervisorClient>>>>,
}
impl SupervisorClientCache {
fn new() -> Self {
Self {
map: Arc::new(Mutex::new(HashMap::new())),
}
}
fn make_key(dest: &Destination, topic: &str, secret: &Option<String>) -> String {
let dst = match dest {
Destination::Ip(ip) => format!("ip:{ip}"),
Destination::Pk(pk) => format!("pk:{pk}"),
};
// Hash the secret to avoid storing plaintext in keys while still differentiating values
let sec_hash = match secret {
Some(s) if !s.is_empty() => {
let mut hasher = DefaultHasher::new();
s.hash(&mut hasher);
format!("s:{}", hasher.finish())
}
_ => "s:none".to_string(),
};
format!("{dst}|t:{topic}|{sec_hash}")
}
async fn get_or_create(
&self,
hub: Arc<SupervisorHub>,
dest: Destination,
topic: String,
secret: Option<String>,
) -> Arc<SupervisorClient> {
let key = Self::make_key(&dest, &topic, &secret);
{
let guard = self.map.lock().await;
if let Some(existing) = guard.get(&key) {
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup");
return existing.clone();
}
}
let mut guard = self.map.lock().await;
if let Some(existing) = guard.get(&key) {
tracing::debug!(target: "router", cache="supervisor", hit=true, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache lookup (double-checked)");
return existing.clone();
}
let client = Arc::new(SupervisorClient::new_with_hub(hub, dest, secret.clone()));
guard.insert(key, client.clone());
tracing::debug!(target: "router", cache="supervisor", hit=false, %topic, secret = %if secret.is_some() { "set" } else { "none" }, "SupervisorClient cache insert");
client
}
}
/// Start background router loops, one per context. /// Start background router loops, one per context.
/// Each loop: /// Each loop:
/// - BRPOP msg_out with 1s timeout /// - BRPOP msg_out with 1s timeout
@@ -36,16 +121,11 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
let handle = tokio::spawn(async move { let handle = tokio::spawn(async move {
let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency)); let sem = Arc::new(Semaphore::new(cfg_cloned.concurrency));
// Create a shared Mycelium client for this context loop (retry until available) // Use the global SupervisorHub and its Mycelium client
let mycelium = loop { let sup_hub = cfg_cloned.sup_hub.clone();
match MyceliumClient::new(cfg_cloned.base_url.clone()) { let mycelium = sup_hub.mycelium();
Ok(c) => break Arc::new(c),
Err(e) => { let cache = Arc::new(SupervisorClientCache::new());
error!(context_id=ctx_id, error=%e, "MyceliumClient init error");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
}
};
loop { loop {
// Pop next message key (blocking with timeout) // Pop next message key (blocking with timeout)
@@ -67,12 +147,21 @@ pub fn start_router(service: AppService, cfg: RouterConfig) -> Vec<tokio::task::
let cfg_task = cfg_cloned.clone(); let cfg_task = cfg_cloned.clone();
tokio::spawn({ tokio::spawn({
let mycelium = mycelium.clone(); let mycelium = mycelium.clone();
let cache = cache.clone();
let sup_hub = sup_hub.clone();
async move { async move {
// Ensure permit is dropped at end of task // Ensure permit is dropped at end of task
let _permit = permit; let _permit = permit;
if let Err(e) = if let Err(e) = deliver_one(
deliver_one(&service_task, &cfg_task, ctx_id, &key, mycelium) &service_task,
.await &cfg_task,
ctx_id,
&key,
mycelium,
sup_hub,
cache.clone(),
)
.await
{ {
error!(context_id=ctx_id, key=%key, error=%e, "Delivery error"); error!(context_id=ctx_id, key=%key, error=%e, "Delivery error");
} }
@@ -102,6 +191,8 @@ async fn deliver_one(
context_id: u32, context_id: u32,
msg_key: &str, msg_key: &str,
mycelium: Arc<MyceliumClient>, mycelium: Arc<MyceliumClient>,
sup_hub: Arc<SupervisorHub>,
cache: Arc<SupervisorClientCache>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Parse "message:{caller_id}:{id}" // Parse "message:{caller_id}:{id}"
let (caller_id, id) = parse_message_key(msg_key) let (caller_id, id) = parse_message_key(msg_key)
@@ -140,19 +231,39 @@ async fn deliver_one(
// Keep clones for poller usage // Keep clones for poller usage
let dest_for_poller = dest.clone(); let dest_for_poller = dest.clone();
let topic_for_poller = cfg.topic.clone(); let topic_for_poller = cfg.topic.clone();
let client = SupervisorClient::new_with_client( let secret_for_poller = runner.secret.clone();
mycelium.clone(), let client = cache
dest.clone(), .get_or_create(
cfg.topic.clone(), sup_hub.clone(),
None, // secret dest.clone(),
); cfg.topic.clone(),
runner.secret.clone(),
)
.await;
// Build supervisor method and params from Message // Build supervisor method and params from Message
let method = msg.message.clone(); let method = msg.message.clone();
let params = build_params(&msg)?; let params = build_params(&msg)?;
// Send // Send
let out_id = client.call(&method, params).await?; // If this is a job.run and we have a secret configured on the client,
// prefer the typed wrapper that injects the secret into inner supervisor params,
// and await the reply to capture job_queued immediately.
let (out_id, reply_opt) = if method == "job.run" {
if let Some(j) = msg.job.first() {
let jv = job_to_json(j)?;
// Returns (outbound message id, reply envelope)
let (out, reply) = client.job_run_wait(jv).await?;
(out, Some(reply))
} else {
// Fallback: no embedded job, use the generic call (await reply, discard)
let out = client.call(&method, params).await?;
(out, None)
}
} else {
let out = client.call(&method, params).await?;
(out, None)
};
// Store transport id and initial Sent status // Store transport id and initial Sent status
let _ = service let _ = service
@@ -170,6 +281,59 @@ async fn deliver_one(
.update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged) .update_message_status(context_id, caller_id, id, MessageStatus::Acknowledged)
.await?; .await?;
// If we got a job.run reply, interpret job_queued immediately
if let (Some(reply), Some(job_id)) = (reply_opt, msg.job.first().map(|j| j.id)) {
let result_opt = reply.get("result");
let error_opt = reply.get("error");
// Handle job.run success (job_queued)
let is_job_queued = result_opt
.and_then(|res| {
if res.get("job_queued").is_some() {
Some(true)
} else if let Some(s) = res.as_str() {
Some(s == "job_queued")
} else {
None
}
})
.unwrap_or(false);
if is_job_queued {
let _ = service
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Dispatched)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor reply for job {}: job_queued (processed synchronously)",
job_id
)],
)
.await;
} else if let Some(err_obj) = error_opt {
let _ = service
.update_job_status_unchecked(context_id, caller_id, job_id, JobStatus::Error)
.await;
let _ = service
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Supervisor error for job {}: {} (processed synchronously)",
job_id, err_obj
)],
)
.await;
}
}
// No correlation map needed; replies are handled synchronously via SupervisorHub
// Spawn transport-status poller // Spawn transport-status poller
{ {
let service_poll = service.clone(); let service_poll = service.clone();
@@ -177,12 +341,6 @@ async fn deliver_one(
let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs); let poll_timeout = std::time::Duration::from_secs(cfg.transport_poll_timeout_secs);
let out_id_cloned = out_id.clone(); let out_id_cloned = out_id.clone();
let mycelium = mycelium.clone(); let mycelium = mycelium.clone();
// Determine reply timeout for supervisor job.result: prefer message.timeout_result, fallback to router config timeout
let job_result_reply_timeout: u64 = if msg.timeout_result > 0 {
msg.timeout_result as u64
} else {
cfg.transport_poll_timeout_secs
};
tokio::spawn(async move { tokio::spawn(async move {
let start = std::time::Instant::now(); let start = std::time::Instant::now();
@@ -194,6 +352,8 @@ async fn deliver_one(
let job_id_opt = job_id_opt; let job_id_opt = job_id_opt;
let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent); let mut last_status: Option<TransportStatus> = Some(TransportStatus::Sent);
// Ensure we only request supervisor job.status or job.result once per outbound message
let mut requested_job_check: bool = false;
loop { loop {
if start.elapsed() >= poll_timeout { if start.elapsed() >= poll_timeout {
@@ -226,91 +386,240 @@ async fn deliver_one(
// Stop on terminal states // Stop on terminal states
if matches!(s, TransportStatus::Delivered | TransportStatus::Read) { if matches!(s, TransportStatus::Delivered | TransportStatus::Read) {
// On Read, fetch supervisor job.status and update local job/message if terminal if let Some(job_id) = job_id_opt {
if matches!(s, TransportStatus::Read) { // First consult Redis for the latest job state in case we already have a terminal update
if let Some(job_id) = job_id_opt { match service_poll.load_job(context_id, caller_id, job_id).await {
let sup = SupervisorClient::new_with_client( Ok(job) => {
client.clone(), match job.status() {
sup_dest.clone(), JobStatus::Finished | JobStatus::Error => {
sup_topic.clone(), // Local job is already terminal; skip supervisor job.status
None, let _ = service_poll
); .append_message_logs(
match sup.job_status_sync(job_id.to_string(), 10).await {
Ok(remote_status) => {
if let Some((mapped, terminal)) =
map_supervisor_job_status(&remote_status)
{
if terminal {
let _ = service_poll
.update_job_status_unchecked(
context_id, context_id,
caller_id, caller_id,
job_id, id,
mapped.clone(), vec![format!(
"Local job {} status is terminal ({:?}); skipping supervisor job.status",
job_id,
job.status()
)],
) )
.await; .await;
// After terminal status, fetch supervisor job.result and store into Job.result // If result is still empty, immediately request supervisor job.result
let sup = SupervisorClient::new_with_client( if job.result.is_empty() {
client.clone(), let sup = cache
sup_dest.clone(), .get_or_create(
sup_topic.clone(), sup_hub.clone(),
None, sup_dest.clone(),
); sup_topic.clone(),
match sup secret_for_poller.clone(),
.job_result_sync(
job_id.to_string(),
job_result_reply_timeout,
) )
.await;
match sup
.job_result_wait(job_id.to_string())
.await .await
{ {
Ok(result_map) => { Ok((_out2, reply2)) => {
// Persist the result into the Job.result map (merge) // Interpret reply synchronously: success/error/bare string
let _ = service_poll let res = reply2.get("result");
.update_job_result_merge_unchecked( if let Some(obj) =
context_id, res.and_then(|v| v.as_object())
caller_id, {
job_id, if let Some(s) = obj
result_map.clone(), .get("success")
) .and_then(|v| v.as_str())
.await; {
// Log which key was stored (success or error) let mut patch = std::collections::HashMap::new();
let key = result_map patch.insert(
.keys() "success".to_string(),
.next() s.to_string(),
.cloned() );
.unwrap_or_else(|| { let _ = service_poll
"unknown".to_string() .update_job_result_merge_unchecked(
}); context_id, caller_id, job_id, patch,
let _ = service_poll )
.append_message_logs( .await;
context_id, let _ = service_poll
caller_id, .update_message_status(
id, context_id,
vec![format!( caller_id,
"Stored supervisor job.result for job {} ({})", id,
job_id, key MessageStatus::Processed,
)], )
) .await;
.await; // Also mark job as Finished so the flow can progress (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Finished,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Finished (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else if let Some(s) = obj
.get("error")
.and_then(|v| v.as_str())
{
let mut patch = std::collections::HashMap::new();
patch.insert(
"error".to_string(),
s.to_string(),
);
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
// Also mark job as Error so the flow can handle failure (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Error,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Error (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (error, sync)",
job_id
)],
)
.await;
}
} else if let Some(s) =
res.and_then(|v| v.as_str())
{
let mut patch =
std::collections::HashMap::new(
);
patch.insert(
"success".to_string(),
s.to_string(),
);
let _ = service_poll
.update_job_result_merge_unchecked(
context_id, caller_id, job_id, patch,
)
.await;
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
// Also mark job as Finished so the flow can progress (ignore invalid transitions)
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, JobStatus::Finished,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Updated job {} status to Finished (sync)", job_id
)],
)
.await;
// Existing log about storing result
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Stored supervisor job.result for job {} (success, sync)",
job_id
)],
)
.await;
} else {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec!["Supervisor job.result reply missing recognizable fields".to_string()],
)
.await;
}
} }
Err(e) => { Err(e) => {
let _ = service_poll let _ = service_poll
.append_message_logs( .append_message_logs(
context_id, context_id,
caller_id, caller_id,
id, id,
vec![format!( vec![format!(
"job.result fetch error for job {}: {}", "job.result request error for job {}: {}",
job_id, e job_id, e
)], )],
) )
.await; .await;
} }
} }
} else {
// Mark message as processed // Result already present; nothing to fetch
let _ = service_poll let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Job {} already has result; no supervisor calls needed",
job_id
)],
)
.await;
}
// Mark processed and stop polling for this message
let _ = service_poll
.update_message_status( .update_message_status(
context_id, context_id,
caller_id, caller_id,
@@ -318,46 +627,213 @@ async fn deliver_one(
MessageStatus::Processed, MessageStatus::Processed,
) )
.await; .await;
let _ = service_poll let _ = service_poll
.append_message_logs( .append_message_logs(
context_id, context_id,
caller_id, caller_id,
id, id,
vec![format!( vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?})", "Terminal job {} detected; stopping transport polling",
job_id, remote_status, mapped job_id
)], )],
) )
.await; .await;
break;
}
// Not terminal yet -> request supervisor job.status as before
_ => {
let sup = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_wait(job_id.to_string()).await
{
Ok((_out_id, reply_status)) => {
// Interpret status reply synchronously
let result_opt = reply_status.get("result");
let error_opt = reply_status.get("error");
if let Some(err_obj) = error_opt {
let _ = service_poll
.update_job_status_unchecked(
context_id,
caller_id,
job_id,
JobStatus::Error,
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor error for job {}: {} (sync)",
job_id, err_obj
)],
)
.await;
} else if let Some(res) = result_opt {
let status_candidate = res
.get("status")
.and_then(|v| v.as_str())
.or_else(|| res.as_str());
if let Some(remote_status) =
status_candidate
{
if let Some((mapped, terminal)) =
map_supervisor_job_status(
remote_status,
)
{
let _ = service_poll
.update_job_status_unchecked(
context_id, caller_id, job_id, mapped.clone(),
)
.await;
let _ = service_poll
.append_message_logs(
context_id, caller_id, id,
vec![format!(
"Supervisor job.status for job {} -> {} (mapped to {:?}, sync)",
job_id, remote_status, mapped
)],
)
.await;
// If terminal, request job.result now (handled above for local terminal case)
if terminal {
// trigger job.result only if result empty to avoid spam
if let Ok(j_after) =
service_poll
.load_job(
context_id,
caller_id,
job_id,
)
.await
{
if j_after
.result
.is_empty()
{
let sup2 = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
let _ = sup2.job_result_wait(job_id.to_string()).await
.and_then(|(_oid, reply2)| {
// Minimal parse and store
let res2 = reply2.get("result");
if let Some(obj) = res2.and_then(|v| v.as_object()) {
if let Some(s) = obj.get("success").and_then(|v| v.as_str()) {
let mut patch = std::collections::HashMap::new();
patch.insert("success".to_string(), s.to_string());
tokio::spawn({
let service_poll = service_poll.clone();
async move {
let _ = service_poll.update_job_result_merge_unchecked(context_id, caller_id, job_id, patch).await;
}
});
}
}
Ok((String::new(), Value::Null))
});
}
}
// Mark processed and stop polling for this message
let _ = service_poll
.update_message_status(
context_id,
caller_id,
id,
MessageStatus::Processed,
)
.await;
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Terminal job {} detected from supervisor status; stopping transport polling",
job_id
)],
)
.await;
break;
}
}
}
}
}
Err(e) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"job.status request error: {}",
e
)],
)
.await;
}
} }
} else { }
}
}
// If we cannot load the job, fall back to requesting job.status
Err(_) => {
let sup = cache
.get_or_create(
sup_hub.clone(),
sup_dest.clone(),
sup_topic.clone(),
secret_for_poller.clone(),
)
.await;
match sup.job_status_wait(job_id.to_string()).await {
Ok((_out_id, _reply_status)) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!(
"Requested supervisor job.status for job {} (fallback; load_job failed, sync)",
job_id
)],
)
.await;
}
Err(e) => {
let _ = service_poll let _ = service_poll
.append_message_logs( .append_message_logs(
context_id, context_id,
caller_id, caller_id,
id, id,
vec![format!( vec![format!(
"Unknown supervisor status '{}' for job {}", "job.status request error: {}",
remote_status, job_id e
)], )],
) )
.await; .await;
} }
} }
Err(e) => {
let _ = service_poll
.append_message_logs(
context_id,
caller_id,
id,
vec![format!("job.status sync error: {}", e)],
)
.await;
}
} }
} }
// Ensure we only do this once
requested_job_check = true;
} }
break; // break;
} }
if matches!(s, TransportStatus::Failed) { if matches!(s, TransportStatus::Failed) {
let _ = service_poll let _ = service_poll

View File

@@ -150,6 +150,8 @@ pub struct RunnerCreate {
/// The script type this runner executes (used for routing) /// The script type this runner executes (used for routing)
pub script_type: ScriptType, pub script_type: ScriptType,
pub local: bool, pub local: bool,
/// Optional secret used for authenticated supervisor calls (if required)
pub secret: Option<String>,
} }
impl RunnerCreate { impl RunnerCreate {
pub fn into_domain(self) -> Runner { pub fn into_domain(self) -> Runner {
@@ -162,6 +164,7 @@ impl RunnerCreate {
topic, topic,
script_type, script_type,
local, local,
secret,
} = self; } = self;
Runner { Runner {
@@ -171,6 +174,7 @@ impl RunnerCreate {
topic, topic,
script_type, script_type,
local, local,
secret,
created_at: ts, created_at: ts,
updated_at: ts, updated_at: ts,
} }

View File

@@ -672,10 +672,10 @@ impl AppService {
let allowed = match current { let allowed = match current {
JobStatus::Dispatched => matches!( JobStatus::Dispatched => matches!(
new_status, new_status,
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Finished | JobStatus::Error
), ),
JobStatus::WaitingForPrerequisites => { JobStatus::WaitingForPrerequisites => {
matches!(new_status, JobStatus::Started | JobStatus::Error) matches!(new_status, JobStatus::Started | JobStatus::Finished | JobStatus::Error)
} }
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error), JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
JobStatus::Finished | JobStatus::Error => false, JobStatus::Finished | JobStatus::Error => false,
@@ -714,10 +714,10 @@ impl AppService {
let allowed = match current { let allowed = match current {
JobStatus::Dispatched => matches!( JobStatus::Dispatched => matches!(
new_status, new_status,
JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Error JobStatus::WaitingForPrerequisites | JobStatus::Started | JobStatus::Finished | JobStatus::Error
), ),
JobStatus::WaitingForPrerequisites => { JobStatus::WaitingForPrerequisites => {
matches!(new_status, JobStatus::Started | JobStatus::Error) matches!(new_status, JobStatus::Started | JobStatus::Finished | JobStatus::Error)
} }
JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error), JobStatus::Started => matches!(new_status, JobStatus::Finished | JobStatus::Error),
JobStatus::Finished | JobStatus::Error => false, JobStatus::Finished | JobStatus::Error => false,
@@ -1161,6 +1161,34 @@ impl AppService {
pub async fn scan_runners(&self, context_id: u32) -> Result<Vec<Runner>, BoxError> { pub async fn scan_runners(&self, context_id: u32) -> Result<Vec<Runner>, BoxError> {
self.redis.scan_runners(context_id).await self.redis.scan_runners(context_id).await
} }
/// Correlation map: store mapping from inner supervisor JSON-RPC id to context/caller/job/message.
pub async fn supcorr_set(
&self,
inner_id: u64,
context_id: u32,
caller_id: u32,
job_id: u32,
message_id: u32,
) -> Result<(), BoxError> {
self.redis
.supcorr_set(inner_id, context_id, caller_id, job_id, message_id)
.await
.map_err(Into::into)
}
/// Correlation map: load mapping by inner supervisor JSON-RPC id.
pub async fn supcorr_get(
&self,
inner_id: u64,
) -> Result<Option<(u32, u32, u32, u32)>, BoxError> {
self.redis.supcorr_get(inner_id).await.map_err(Into::into)
}
/// Correlation map: delete mapping by inner supervisor JSON-RPC id.
pub async fn supcorr_del(&self, inner_id: u64) -> Result<(), BoxError> {
self.redis.supcorr_del(inner_id).await.map_err(Into::into)
}
} }
/// Auto-discovery helpers for contexts (wrappers over RedisDriver) /// Auto-discovery helpers for contexts (wrappers over RedisDriver)

View File

@@ -10,7 +10,7 @@ use crate::models::{
Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageStatus, Runner, Actor, Context, Flow, FlowStatus, Job, JobStatus, Message, MessageStatus, Runner,
TransportStatus, TransportStatus,
}; };
use tracing::{debug, error, info, trace, warn}; use tracing::{error, warn};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>; type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
@@ -122,7 +122,7 @@ impl RedisDriver {
warn!(db=%db, key=%key, error=%e, "DEL before HSET failed"); warn!(db=%db, key=%key, error=%e, "DEL before HSET failed");
} }
// Write all fields // Write all fields
let _: usize = cm.hset_multiple(key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET multiple failed"); error!(db=%db, key=%key, error=%e, "HSET multiple failed");
e e
})?; })?;
@@ -323,7 +323,7 @@ impl RedisDriver {
("status".to_string(), status_str), ("status".to_string(), status_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_job_status failed"); error!(db=%db, key=%key, error=%e, "HSET update_job_status failed");
e e
})?; })?;
@@ -372,7 +372,7 @@ impl RedisDriver {
("status".to_string(), status_str), ("status".to_string(), status_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_flow_status failed"); error!(db=%db, key=%key, error=%e, "HSET update_flow_status failed");
e e
})?; })?;
@@ -400,7 +400,7 @@ impl RedisDriver {
("status".to_string(), status_str), ("status".to_string(), status_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_message_status failed"); error!(db=%db, key=%key, error=%e, "HSET update_message_status failed");
e e
})?; })?;
@@ -437,7 +437,7 @@ impl RedisDriver {
let ts = crate::time::current_timestamp(); let ts = crate::time::current_timestamp();
pairs.push(("updated_at".to_string(), ts.to_string())); pairs.push(("updated_at".to_string(), ts.to_string()));
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_message_transport failed"); error!(db=%db, key=%key, error=%e, "HSET update_message_transport failed");
e e
})?; })?;
@@ -473,7 +473,7 @@ impl RedisDriver {
("env_vars".to_string(), env_vars_str), ("env_vars".to_string(), env_vars_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_flow_env_vars_merge failed"); error!(db=%db, key=%key, error=%e, "HSET update_flow_env_vars_merge failed");
e e
})?; })?;
@@ -509,7 +509,7 @@ impl RedisDriver {
("result".to_string(), result_str), ("result".to_string(), result_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_flow_result_merge failed"); error!(db=%db, key=%key, error=%e, "HSET update_flow_result_merge failed");
e e
})?; })?;
@@ -546,7 +546,7 @@ impl RedisDriver {
("env_vars".to_string(), env_vars_str), ("env_vars".to_string(), env_vars_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_job_env_vars_merge failed"); error!(db=%db, key=%key, error=%e, "HSET update_job_env_vars_merge failed");
e e
})?; })?;
@@ -583,7 +583,7 @@ impl RedisDriver {
("result".to_string(), result_str), ("result".to_string(), result_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_job_result_merge failed"); error!(db=%db, key=%key, error=%e, "HSET update_job_result_merge failed");
e e
})?; })?;
@@ -601,7 +601,7 @@ impl RedisDriver {
("jobs".to_string(), jobs_str), ("jobs".to_string(), jobs_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET update_flow_jobs_set failed"); error!(db=%db, key=%key, error=%e, "HSET update_flow_jobs_set failed");
e e
})?; })?;
@@ -635,7 +635,7 @@ impl RedisDriver {
("logs".to_string(), logs_str), ("logs".to_string(), logs_str),
("updated_at".to_string(), ts.to_string()), ("updated_at".to_string(), ts.to_string()),
]; ];
let _: usize = cm.hset_multiple(&key, &pairs).await.map_err(|e| { let _: () = cm.hset_multiple(&key, &pairs).await.map_err(|e| {
error!(db=%db, key=%key, error=%e, "HSET append_message_logs failed"); error!(db=%db, key=%key, error=%e, "HSET append_message_logs failed");
e e
})?; })?;
@@ -751,4 +751,77 @@ impl RedisDriver {
out.sort_unstable(); out.sort_unstable();
Ok(out) Ok(out)
} }
// -----------------------------
// Supervisor correlation mapping (DB 0)
// Key: "supcorr:{inner_id_decimal}"
// Value: JSON {"context_id":u32,"caller_id":u32,"job_id":u32,"message_id":u32}
// TTL: 1 hour to avoid leaks in case of crashes
pub async fn supcorr_set(
&self,
inner_id: u64,
context_id: u32,
caller_id: u32,
job_id: u32,
message_id: u32,
) -> Result<()> {
let mut cm = self.manager_for_db(0).await?;
let key = format!("supcorr:{}", inner_id);
let val = serde_json::json!({
"context_id": context_id,
"caller_id": caller_id,
"job_id": job_id,
"message_id": message_id,
})
.to_string();
// SET key val EX 3600
let _: () = redis::cmd("SET")
.arg(&key)
.arg(&val)
.arg("EX")
.arg(3600)
.query_async(&mut cm)
.await
.map_err(|e| {
error!(db=0, key=%key, error=%e, "SET supcorr_set failed");
e
})?;
Ok(())
}
pub async fn supcorr_get(&self, inner_id: u64) -> Result<Option<(u32, u32, u32, u32)>> {
let mut cm = self.manager_for_db(0).await?;
let key = format!("supcorr:{}", inner_id);
let res: Option<String> = redis::cmd("GET")
.arg(&key)
.query_async(&mut cm)
.await
.map_err(|e| {
error!(db=0, key=%key, error=%e, "GET supcorr_get failed");
e
})?;
if let Some(s) = res {
let v: Value = serde_json::from_str(&s)?;
let ctx = v.get("context_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
let caller = v.get("caller_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
let job = v.get("job_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
let msg = v.get("message_id").and_then(|x| x.as_u64()).unwrap_or(0) as u32;
return Ok(Some((ctx, caller, job, msg)));
}
Ok(None)
}
pub async fn supcorr_del(&self, inner_id: u64) -> Result<()> {
let mut cm = self.manager_for_db(0).await?;
let key = format!("supcorr:{}", inner_id);
let _: i64 = redis::cmd("DEL")
.arg(&key)
.query_async(&mut cm)
.await
.map_err(|e| {
error!(db=0, key=%key, error=%e, "DEL supcorr_del failed");
e
})?;
Ok(())
}
} }