docs: Initial documentation cleanup of parallel agent MVP

This commit is contained in:
2026-02-17 14:30:28 -07:00
parent cdafdff281
commit 7f267a10a1
4 changed files with 13 additions and 76 deletions
-23
View File
@@ -1,10 +1,5 @@
use serde::{Deserialize, Serialize};
/// A message envelope routed between agents.
///
/// Agents communicate by sending `Envelope`s to each other's mailboxes.
/// The sender fires and forgets; the receiver drains its inbox between
/// LLM turns via the `check_inbox` tool.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Envelope {
pub from: String,
@@ -13,11 +8,6 @@ pub struct Envelope {
pub timestamp: chrono::DateTime<chrono::Utc>,
}
/// The content of an inter-agent message.
///
/// Separates the **control plane** (shutdown signals, task lifecycle events)
/// from the **data plane** (free-form text). Control-plane messages are
/// processed before data-plane messages to prevent race conditions.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum EnvelopePayload {
@@ -27,13 +17,6 @@ pub enum EnvelopePayload {
ShutdownApproved,
}
/// A per-agent inbox that collects incoming messages.
///
/// Backed by a `Vec` behind a `parking_lot::Mutex` so it can be shared
/// between the supervisor (which delivers messages) and the agent's tool
/// handler (which drains them). We use `parking_lot::Mutex` to match the
/// locking convention used elsewhere in Loki (`parking_lot::RwLock` for
/// GlobalConfig).
#[derive(Debug, Default)]
pub struct Inbox {
messages: parking_lot::Mutex<Vec<Envelope>>,
@@ -50,18 +33,12 @@ impl Inbox {
self.messages.lock().push(envelope);
}
/// Drain all pending messages, returning them sorted with control-plane
/// messages first (shutdown, task events) then data-plane (text).
/// This ordering prevents the class of bugs where a text message
/// references state that a control message was supposed to set up.
pub fn drain(&self) -> Vec<Envelope> {
let mut msgs = {
let mut guard = self.messages.lock();
std::mem::take(&mut *guard)
};
// Stable partition: control messages first, then data messages,
// preserving relative order within each group.
msgs.sort_by_key(|e| match &e.payload {
EnvelopePayload::ShutdownRequest { .. } => 0,
EnvelopePayload::ShutdownApproved => 0,
-2
View File
@@ -35,8 +35,6 @@ pub struct AgentHandle {
pub join_handle: JoinHandle<Result<AgentResult>>,
}
/// Lives as an `Arc<parking_lot::RwLock<Supervisor>>` alongside GlobalConfig,
/// NOT inside it — avoids adding lock contention to the shared Config.
pub struct Supervisor {
handles: HashMap<String, AgentHandle>,
task_queue: TaskQueue,
+10 -13
View File
@@ -122,13 +122,13 @@ impl TaskQueue {
}
pub fn claim(&mut self, task_id: &str, owner: &str) -> bool {
if let Some(task) = self.tasks.get_mut(task_id) {
if task.is_runnable() && task.owner.is_none() {
task.owner = Some(owner.to_string());
task.status = TaskStatus::InProgress;
return true;
}
if let Some(task) = self.tasks.get_mut(task_id) &&
task.is_runnable() && task.owner.is_none() {
task.owner = Some(owner.to_string());
task.status = TaskStatus::InProgress;
return true;
}
false
}
@@ -146,8 +146,6 @@ impl TaskQueue {
tasks
}
// DFS cycle detection: would adding task_id -> blocked_by create a cycle?
// A cycle exists if blocked_by can reach task_id through existing dependencies.
fn would_create_cycle(&self, task_id: &str, blocked_by: &str) -> bool {
let mut visited = HashSet::new();
let mut stack = vec![blocked_by.to_string()];
@@ -156,11 +154,10 @@ impl TaskQueue {
if current == task_id {
return true;
}
if visited.insert(current.clone()) {
if let Some(task) = self.tasks.get(&current) {
for dep in &task.blocked_by {
stack.push(dep.clone());
}
if visited.insert(current.clone()) &&
let Some(task) = self.tasks.get(&current) {
for dep in &task.blocked_by {
stack.push(dep.clone());
}
}
}