Files
loki/examples/langchain-sisyphus/sisyphus_langchain/agents/coder.py

146 lines
4.7 KiB
Python

"""
Coder agent node — the implementation worker.
Loki equivalent: assets/agents/coder/config.yaml + tools.sh
In Loki, the coder is the ONLY agent that modifies files. It:
- Receives a structured prompt from sisyphus with code patterns to follow
- Writes files via the write_file tool (never pastes code in chat)
- Verifies builds after every change
- Signals CODER_COMPLETE or CODER_FAILED
In LangGraph, coder is a node with write-capable tools (read_file, write_file,
search_content, execute_command, verify_build). The supervisor formats a
structured delegation prompt (Goal / Reference Files / Code Patterns /
Conventions / Constraints) and routes to this node.
Key Loki→LangGraph mapping:
- Loki's "Coder Delegation Format" → the supervisor builds this as a
HumanMessage before routing to the coder node.
- Loki's auto_continue (up to 15) → the supervisor can re-route to coder
if verification fails, up to iteration_count limits.
- Loki's todo system for multi-file changes → the coder updates
state["todos"] as it completes each file.
"""
from __future__ import annotations
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
from sisyphus_langchain.state import SisyphusState
from sisyphus_langchain.tools.filesystem import (
read_file,
search_content,
search_files,
write_file,
)
from sisyphus_langchain.tools.project import (
execute_command,
run_tests,
verify_build,
)
# ---------------------------------------------------------------------------
# System prompt — faithfully mirrors coder/config.yaml
# ---------------------------------------------------------------------------
CODER_SYSTEM_PROMPT = """\
You are a senior engineer. You write code that works on the first try.
## Your Mission
Given an implementation task:
1. Check for context provided in the conversation (patterns, conventions, reference files).
2. Fill gaps only — read files NOT already covered in context.
3. Write the code using the write_file tool (NEVER output code in chat).
4. Verify it compiles/builds using verify_build.
5. Provide a summary of what you implemented.
## Using Provided Context (IMPORTANT)
Your prompt often contains prior findings from the explore agent: file paths,
code patterns, and conventions.
**If context is provided:**
1. Use it as your primary reference. Don't re-read files already summarized.
2. Follow the code patterns shown — snippets in context ARE the style guide.
3. Read referenced files ONLY IF you need more detail (full signatures, imports).
4. If context includes a "Conventions" section, follow it exactly.
**If context is NOT provided or is too vague:**
Fall back to self-exploration: search for similar files, read 1-2 examples,
match their style.
## Writing Code
CRITICAL: Write code using the write_file tool. NEVER paste code in chat.
## Pattern Matching
Before writing ANY file:
1. Find a similar existing file.
2. Match its style: imports, naming, structure.
3. Follow the same patterns exactly.
## Verification
After writing files:
1. Run verify_build to check compilation.
2. If it fails, fix the error (minimal change).
3. Don't move on until build passes.
## Rules
1. Write code via tools — never output code to chat.
2. Follow patterns — read existing files first.
3. Verify builds — don't finish without checking.
4. Minimal fixes — if build fails, fix precisely.
5. No refactoring — only implement what's asked.
"""
# Full tool set — coder gets write access and command execution
CODER_TOOLS = [
read_file,
write_file,
search_content,
search_files,
execute_command,
verify_build,
run_tests,
]
def create_coder_node(model_name: str = "gpt-4o", temperature: float = 0.1):
"""
Factory that returns a coder node function.
Coder needs a capable model because it writes production code. In Loki,
coder uses the same model as the parent by default.
Args:
model_name: Model identifier.
temperature: LLM temperature (Loki coder uses 0.1 for consistency).
"""
llm = ChatOpenAI(model=model_name, temperature=temperature).bind_tools(CODER_TOOLS)
def coder_node(state: SisyphusState) -> dict:
"""
LangGraph node: run the coder agent.
Reads conversation history (including the supervisor's structured
delegation prompt), invokes the LLM with write-capable tools,
and returns the result.
"""
response = llm.invoke(
[SystemMessage(content=CODER_SYSTEM_PROMPT)] + state["messages"]
)
return {
"messages": [response],
"agent_outputs": {
**state.get("agent_outputs", {}),
"coder": response.content,
},
}
return coder_node