Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
83581d9d18
|
@@ -1,74 +1,3 @@
|
|||||||
## v0.3.0 (2026-04-02)
|
|
||||||
|
|
||||||
### Feat
|
|
||||||
|
|
||||||
- Added `todo__clear` function to the todo system and updated REPL commands to have a .clear todo as well for significant changes in agent direction
|
|
||||||
- Added available tools to prompts for sisyphus and code-reviewer agent families
|
|
||||||
- Added available tools to coder prompt
|
|
||||||
- Improved token efficiency when delegating from sisyphus -> coder
|
|
||||||
- modified sisyphus agents to use the new ddg-search MCP server for web searches instead of built-in model searches
|
|
||||||
- Added support for specifying a custom response to multiple-choice prompts when nothing suits the user's needs
|
|
||||||
- Supported theming in the inquire prompts in the REPL
|
|
||||||
- Added the duckduckgo-search MCP server for searching the web (in addition to the built-in tools for web searches)
|
|
||||||
- Support for Gemini OAuth
|
|
||||||
- Support authenticating or refreshing OAuth for supported clients from within the REPL
|
|
||||||
- Allow first-runs to select OAuth for supported providers
|
|
||||||
- Support OAuth authentication flows for Claude
|
|
||||||
- Improved MCP server spinup and spindown when switching contexts or settings in the REPL: Modify existing config rather than stopping all servers always and re-initializing if unnecessary
|
|
||||||
- Allow the explore agent to run search queries for understanding docs or API specs
|
|
||||||
- Allow the oracle to perform web searches for deeper research
|
|
||||||
- Added web search support to the main sisyphus agent to answer user queries
|
|
||||||
- Created a CodeRabbit-style code-reviewer agent
|
|
||||||
- Added configuration option in agents to indicate the timeout for user input before proceeding (defaults to 5 minutes)
|
|
||||||
- Added support for sub-agents to escalate user interaction requests from any depth to the parent agents for user interactions
|
|
||||||
- built-in user interaction tools to remove the need for the list/confirm/etc prompts in prompt tools and to enhance user interactions in Loki
|
|
||||||
- Experimental update to sisyphus to use the new parallel agent spawning system
|
|
||||||
- Added an agent configuration property that allows auto-injecting sub-agent spawning instructions (when using the built-in sub-agent spawning system)
|
|
||||||
- Auto-dispatch support of sub-agents and support for the teammate pattern between subagents
|
|
||||||
- Full passive task queue integration for parallelization of subagents
|
|
||||||
- Implemented initial scaffolding for built-in sub-agent spawning tool call operations
|
|
||||||
- Initial models for agent parallelization
|
|
||||||
- Added interactive prompting between the LLM and the user in Sisyphus using the built-in Bash utils scripts
|
|
||||||
|
|
||||||
### Fix
|
|
||||||
|
|
||||||
- Clarified user text input interaction
|
|
||||||
- recursion bug with similarly named Bash search functions in the explore agent
|
|
||||||
- updated the error for unauthenticated oauth to include the REPL .authenticated command
|
|
||||||
- Corrected a bug in the coder agent that wasn't outputting a summary of the changes made, so the parent Sisyphus agent has no idea if the agent worked or not
|
|
||||||
- Claude code system prompt injected into claude requests to make them valid once again
|
|
||||||
- Do not inject tools when models don't support them; detect this conflict before API calls happen
|
|
||||||
- The REPL .authenticate command works from within sessions, agents, and roles with pre-configured models
|
|
||||||
- Implemented the path normalization fix for the oracle and explore agents
|
|
||||||
- Updated the atlassian MCP server endpoint to account for future deprecation
|
|
||||||
- Fixed a bug in the coder agent that was causing the agent to create absolute paths from the current directory
|
|
||||||
- the updated regex for secrets injection broke MCP server secrets interpolation because the regex greedily matched on new lines, replacing too much content. This fix just ignores commented out lines in YAML files by skipping commented out lines.
|
|
||||||
- Don't try to inject secrets into commented-out lines in the config
|
|
||||||
- Removed top_p parameter from some agents so they can work across model providers
|
|
||||||
- Improved sub-agent stdout and stderr output for users to follow
|
|
||||||
- Inject agent variables into environment variables for global tool calls when invoked from agents to modify global tool behavior
|
|
||||||
- Removed the unnecessary execute_commands tool from the oracle agent
|
|
||||||
- Added auto_confirm to the coder agent so sub-agent spawning doesn't freeze
|
|
||||||
- Fixed a bug in the new supervisor and todo built-ins that was causing errors with OpenAI models
|
|
||||||
- Added condition to sisyphus to always output a summary to clearly indicate completion
|
|
||||||
- Updated the sisyphus prompt to explicitly tell it to delegate to the coder agent when it wants to write any code at all except for trivial changes
|
|
||||||
- Added back in the auto_confirm variable into sisyphus
|
|
||||||
- Removed the now unnecessary is_stale_response that was breaking auto-continuing with parallel agents
|
|
||||||
- Bypassed enabled_tools for user interaction tools so if function calling is enabled at all, the LLM has access to the user interaction tools when in REPL mode
|
|
||||||
- When parallel agents run, only write to stdout from the parent and only display the parent's throbber
|
|
||||||
- Forgot to implement support for failing a task and keep all dependents blocked
|
|
||||||
- Clean up orphaned sub-agents when the parent agent
|
|
||||||
- Fixed the bash prompt utils so that they correctly show output when being run by a tool invocation
|
|
||||||
- Forgot to automatically add the bidirectional communication back up to parent agents from sub-agents (i.e. need to be able to check inbox and send messages)
|
|
||||||
- Agent delegation tools were not being passed into the {{__tools__}} placeholder so agents weren't delegating to subagents
|
|
||||||
|
|
||||||
### Refactor
|
|
||||||
|
|
||||||
- Made the oauth module more generic so it can support loopback OAuth (not just manual)
|
|
||||||
- Changed the default session name for Sisyphus to temp (to require users to explicitly name sessions they wish to save)
|
|
||||||
- Updated the sisyphus agent to use the built-in user interaction tools instead of custom bash-based tools
|
|
||||||
- Cleaned up some left-over implementation stubs
|
|
||||||
|
|
||||||
## v0.2.0 (2026-02-14)
|
## v0.2.0 (2026-02-14)
|
||||||
|
|
||||||
### Feat
|
### Feat
|
||||||
|
|||||||
Generated
+630
-433
File diff suppressed because it is too large
Load Diff
+4
-5
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "loki-ai"
|
name = "loki-ai"
|
||||||
version = "0.3.0"
|
version = "0.2.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
authors = ["Alex Clarke <alex.j.tusa@gmail.com>"]
|
authors = ["Alex Clarke <alex.j.tusa@gmail.com>"]
|
||||||
description = "An all-in-one, batteries included LLM CLI Tool"
|
description = "An all-in-one, batteries included LLM CLI Tool"
|
||||||
@@ -18,11 +18,10 @@ anyhow = "1.0.69"
|
|||||||
bytes = "1.4.0"
|
bytes = "1.4.0"
|
||||||
clap = { version = "4.5.40", features = ["cargo", "derive", "wrap_help"] }
|
clap = { version = "4.5.40", features = ["cargo", "derive", "wrap_help"] }
|
||||||
dirs = "6.0.0"
|
dirs = "6.0.0"
|
||||||
dunce = "1.0.5"
|
|
||||||
futures-util = "0.3.29"
|
futures-util = "0.3.29"
|
||||||
inquire = "0.9.4"
|
inquire = "0.7.0"
|
||||||
is-terminal = "0.4.9"
|
is-terminal = "0.4.9"
|
||||||
reedline = "0.46.0"
|
reedline = "0.40.0"
|
||||||
serde = { version = "1.0.152", features = ["derive"] }
|
serde = { version = "1.0.152", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.93", features = ["preserve_order"] }
|
serde_json = { version = "1.0.93", features = ["preserve_order"] }
|
||||||
serde_yaml = "0.9.17"
|
serde_yaml = "0.9.17"
|
||||||
@@ -38,7 +37,7 @@ tokio-graceful = "0.2.2"
|
|||||||
tokio-stream = { version = "0.1.15", default-features = false, features = [
|
tokio-stream = { version = "0.1.15", default-features = false, features = [
|
||||||
"sync",
|
"sync",
|
||||||
] }
|
] }
|
||||||
crossterm = "0.29.0"
|
crossterm = "0.28.1"
|
||||||
chrono = "0.4.23"
|
chrono = "0.4.23"
|
||||||
bincode = { version = "2.0.0", features = [
|
bincode = { version = "2.0.0", features = [
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -2,6 +2,68 @@
|
|||||||
# Shared Agent Utilities - Minimal, focused helper functions
|
# Shared Agent Utilities - Minimal, focused helper functions
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
#############################
|
||||||
|
## CONTEXT FILE MANAGEMENT ##
|
||||||
|
#############################
|
||||||
|
|
||||||
|
get_context_file() {
|
||||||
|
local project_dir="${LLM_AGENT_VAR_PROJECT_DIR:-.}"
|
||||||
|
echo "${project_dir}/.loki-context"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize context file for a new task
|
||||||
|
# Usage: init_context "Task description"
|
||||||
|
init_context() {
|
||||||
|
local task="$1"
|
||||||
|
local project_dir="${LLM_AGENT_VAR_PROJECT_DIR:-.}"
|
||||||
|
local context_file
|
||||||
|
context_file=$(get_context_file)
|
||||||
|
|
||||||
|
cat > "${context_file}" <<EOF
|
||||||
|
## Project: ${project_dir}
|
||||||
|
## Task: ${task}
|
||||||
|
## Started: $(date -Iseconds)
|
||||||
|
|
||||||
|
### Prior Findings
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Append findings to the context file
|
||||||
|
# Usage: append_context "agent_name" "finding summary
|
||||||
|
append_context() {
|
||||||
|
local agent="$1"
|
||||||
|
local finding="$2"
|
||||||
|
local context_file
|
||||||
|
context_file=$(get_context_file)
|
||||||
|
|
||||||
|
if [[ -f "${context_file}" ]]; then
|
||||||
|
{
|
||||||
|
echo ""
|
||||||
|
echo "[${agent}]:"
|
||||||
|
echo "${finding}"
|
||||||
|
} >> "${context_file}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Read the current context (returns empty string if no context)
|
||||||
|
# Usage: context=$(read_context)
|
||||||
|
read_context() {
|
||||||
|
local context_file
|
||||||
|
context_file=$(get_context_file)
|
||||||
|
|
||||||
|
if [[ -f "${context_file}" ]]; then
|
||||||
|
cat "${context_file}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clear the context file
|
||||||
|
clear_context() {
|
||||||
|
local context_file
|
||||||
|
context_file=$(get_context_file)
|
||||||
|
rm -f "${context_file}"
|
||||||
|
}
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
## PROJECT DETECTION ##
|
## PROJECT DETECTION ##
|
||||||
#######################
|
#######################
|
||||||
@@ -286,11 +348,77 @@ detect_project() {
|
|||||||
echo '{"type":"unknown","build":"","test":"","check":""}'
|
echo '{"type":"unknown","build":"","test":"","check":""}'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
######################
|
||||||
|
## AGENT INVOCATION ##
|
||||||
|
######################
|
||||||
|
|
||||||
|
# Invoke a subagent with optional context injection
|
||||||
|
# Usage: invoke_agent <agent_name> <prompt> [extra_args...]
|
||||||
|
invoke_agent() {
|
||||||
|
local agent="$1"
|
||||||
|
local prompt="$2"
|
||||||
|
shift 2
|
||||||
|
|
||||||
|
local context
|
||||||
|
context=$(read_context)
|
||||||
|
|
||||||
|
local full_prompt
|
||||||
|
if [[ -n "${context}" ]]; then
|
||||||
|
full_prompt="## Orchestrator Context
|
||||||
|
|
||||||
|
The orchestrator (sisyphus) has gathered this context from prior work:
|
||||||
|
|
||||||
|
<context>
|
||||||
|
${context}
|
||||||
|
</context>
|
||||||
|
|
||||||
|
## Your Task
|
||||||
|
|
||||||
|
${prompt}"
|
||||||
|
else
|
||||||
|
full_prompt="${prompt}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
env AUTO_CONFIRM=true loki --agent "${agent}" "$@" "${full_prompt}" 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invoke a subagent and capture a summary of its findings
|
||||||
|
# Usage: result=$(invoke_agent_with_summary "explore" "find auth patterns")
|
||||||
|
invoke_agent_with_summary() {
|
||||||
|
local agent="$1"
|
||||||
|
local prompt="$2"
|
||||||
|
shift 2
|
||||||
|
|
||||||
|
local output
|
||||||
|
output=$(invoke_agent "${agent}" "${prompt}" "$@")
|
||||||
|
|
||||||
|
local summary=""
|
||||||
|
|
||||||
|
if echo "${output}" | grep -q "FINDINGS:"; then
|
||||||
|
summary=$(echo "${output}" | sed -n '/FINDINGS:/,/^[A-Z_]*COMPLETE/p' | grep "^- " | sed 's/^- / - /')
|
||||||
|
elif echo "${output}" | grep -q "CODER_COMPLETE:"; then
|
||||||
|
summary=$(echo "${output}" | grep "CODER_COMPLETE:" | sed 's/CODER_COMPLETE: *//')
|
||||||
|
elif echo "${output}" | grep -q "ORACLE_COMPLETE"; then
|
||||||
|
summary=$(echo "${output}" | sed -n '/^## Recommendation/,/^## /{/^## Recommendation/d;/^## /d;p}' | sed '/^$/d' | head -10)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Failsafe: extract up to 5 meaningful lines if no markers found
|
||||||
|
if [[ -z "${summary}" ]]; then
|
||||||
|
summary=$(echo "${output}" | grep -v "^$" | grep -v "^#" | grep -v "^\-\-\-" | tail -10 | head -5)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${summary}" ]]; then
|
||||||
|
append_context "${agent}" "${summary}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${output}"
|
||||||
|
}
|
||||||
|
|
||||||
###########################
|
###########################
|
||||||
## FILE SEARCH UTILITIES ##
|
## FILE SEARCH UTILITIES ##
|
||||||
###########################
|
###########################
|
||||||
|
|
||||||
_search_files() {
|
search_files() {
|
||||||
local pattern="$1"
|
local pattern="$1"
|
||||||
local dir="${2:-.}"
|
local dir="${2:-.}"
|
||||||
|
|
||||||
|
|||||||
@@ -122,6 +122,3 @@ instructions: |
|
|||||||
- Project: {{project_dir}}
|
- Project: {{project_dir}}
|
||||||
- CWD: {{__cwd__}}
|
- CWD: {{__cwd__}}
|
||||||
- Shell: {{__shell__}}
|
- Shell: {{__shell__}}
|
||||||
|
|
||||||
## Available Tools:
|
|
||||||
{{__tools__}}
|
|
||||||
|
|||||||
@@ -29,30 +29,11 @@ instructions: |
|
|||||||
## Your Mission
|
## Your Mission
|
||||||
|
|
||||||
Given an implementation task:
|
Given an implementation task:
|
||||||
1. Check for orchestrator context first (see below)
|
1. Understand what to build (from context provided)
|
||||||
2. Fill gaps only. Read files NOT already covered in context
|
2. Study existing patterns (read 1-2 similar files)
|
||||||
3. Write the code (using tools, NOT chat output)
|
3. Write the code (using tools, NOT chat output)
|
||||||
4. Verify it compiles/builds
|
4. Verify it compiles/builds
|
||||||
5. Signal completion with a summary
|
5. Signal completion
|
||||||
|
|
||||||
## Using Orchestrator Context (IMPORTANT)
|
|
||||||
|
|
||||||
When spawned by sisyphus, your prompt will often contain a `<context>` block
|
|
||||||
with prior findings: file paths, code patterns, and conventions discovered by
|
|
||||||
explore agents.
|
|
||||||
|
|
||||||
**If context is provided:**
|
|
||||||
1. Use it as your primary reference. Don't re-read files already summarized
|
|
||||||
2. Follow the code patterns shown. Snippets in context ARE the style guide
|
|
||||||
3. Read the referenced files ONLY IF you need more detail (e.g. full function
|
|
||||||
signature, import list, or adjacent code not included in the snippet)
|
|
||||||
4. If context includes a "Conventions" section, follow it exactly
|
|
||||||
|
|
||||||
**If context is NOT provided or is too vague to act on:**
|
|
||||||
Fall back to self-exploration: grep for similar files, read 1-2 examples,
|
|
||||||
match their style.
|
|
||||||
|
|
||||||
**Never ignore provided context.** It represents work already done upstream.
|
|
||||||
|
|
||||||
## Todo System
|
## Todo System
|
||||||
|
|
||||||
@@ -101,13 +82,12 @@ instructions: |
|
|||||||
|
|
||||||
## Completion Signal
|
## Completion Signal
|
||||||
|
|
||||||
When done, end your response with a summary so the parent agent knows what happened:
|
End with:
|
||||||
|
|
||||||
```
|
```
|
||||||
CODER_COMPLETE: [summary of what was implemented, which files were created/modified, and build status]
|
CODER_COMPLETE: [summary of what was implemented]
|
||||||
```
|
```
|
||||||
|
|
||||||
Or if something went wrong:
|
Or if failed:
|
||||||
```
|
```
|
||||||
CODER_FAILED: [what went wrong]
|
CODER_FAILED: [what went wrong]
|
||||||
```
|
```
|
||||||
@@ -125,5 +105,3 @@ instructions: |
|
|||||||
- CWD: {{__cwd__}}
|
- CWD: {{__cwd__}}
|
||||||
- Shell: {{__shell__}}
|
- Shell: {{__shell__}}
|
||||||
|
|
||||||
## Available tools:
|
|
||||||
{{__tools__}}
|
|
||||||
@@ -14,28 +14,11 @@ _project_dir() {
|
|||||||
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Normalize a path to be relative to project root.
|
|
||||||
# Strips the project_dir prefix if the LLM passes an absolute path.
|
|
||||||
# Usage: local rel_path; rel_path=$(_normalize_path "/abs/or/rel/path")
|
|
||||||
_normalize_path() {
|
|
||||||
local input_path="$1"
|
|
||||||
local project_dir
|
|
||||||
project_dir=$(_project_dir)
|
|
||||||
|
|
||||||
if [[ "${input_path}" == /* ]]; then
|
|
||||||
input_path="${input_path#"${project_dir}"/}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
input_path="${input_path#./}"
|
|
||||||
echo "${input_path}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# @cmd Read a file's contents before modifying
|
# @cmd Read a file's contents before modifying
|
||||||
# @option --path! Path to the file (relative to project root)
|
# @option --path! Path to the file (relative to project root)
|
||||||
read_file() {
|
read_file() {
|
||||||
local file_path
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
local file_path="${argc_path}"
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
local full_path="${project_dir}/${file_path}"
|
local full_path="${project_dir}/${file_path}"
|
||||||
@@ -56,8 +39,7 @@ read_file() {
|
|||||||
# @option --path! Path for the file (relative to project root)
|
# @option --path! Path for the file (relative to project root)
|
||||||
# @option --content! Complete file contents to write
|
# @option --content! Complete file contents to write
|
||||||
write_file() {
|
write_file() {
|
||||||
local file_path
|
local file_path="${argc_path}"
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
local content="${argc_content}"
|
local content="${argc_content}"
|
||||||
local project_dir
|
local project_dir
|
||||||
@@ -65,7 +47,7 @@ write_file() {
|
|||||||
local full_path="${project_dir}/${file_path}"
|
local full_path="${project_dir}/${file_path}"
|
||||||
|
|
||||||
mkdir -p "$(dirname "${full_path}")"
|
mkdir -p "$(dirname "${full_path}")"
|
||||||
printf '%s' "${content}" > "${full_path}"
|
echo "${content}" > "${full_path}"
|
||||||
|
|
||||||
green "Wrote: ${file_path}" >> "$LLM_OUTPUT"
|
green "Wrote: ${file_path}" >> "$LLM_OUTPUT"
|
||||||
}
|
}
|
||||||
@@ -73,8 +55,7 @@ write_file() {
|
|||||||
# @cmd Find files similar to a given path (for pattern matching)
|
# @cmd Find files similar to a given path (for pattern matching)
|
||||||
# @option --path! Path to find similar files for
|
# @option --path! Path to find similar files for
|
||||||
find_similar_files() {
|
find_similar_files() {
|
||||||
local file_path
|
local file_path="${argc_path}"
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
|
|
||||||
@@ -90,14 +71,14 @@ find_similar_files() {
|
|||||||
! -name "$(basename "${file_path}")" \
|
! -name "$(basename "${file_path}")" \
|
||||||
! -name "*test*" \
|
! -name "*test*" \
|
||||||
! -name "*spec*" \
|
! -name "*spec*" \
|
||||||
2>/dev/null | sed "s|^${project_dir}/||" | head -3)
|
2>/dev/null | head -3)
|
||||||
|
|
||||||
if [[ -z "${results}" ]]; then
|
if [[ -z "${results}" ]]; then
|
||||||
results=$(find "${project_dir}/src" -type f -name "*.${ext}" \
|
results=$(find "${project_dir}/src" -type f -name "*.${ext}" \
|
||||||
! -name "*test*" \
|
! -name "*test*" \
|
||||||
! -name "*spec*" \
|
! -name "*spec*" \
|
||||||
-not -path '*/target/*' \
|
-not -path '*/target/*' \
|
||||||
2>/dev/null | sed "s|^${project_dir}/||" | head -3)
|
2>/dev/null | head -3)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
@@ -205,7 +186,6 @@ search_code() {
|
|||||||
grep -v '/target/' | \
|
grep -v '/target/' | \
|
||||||
grep -v '/node_modules/' | \
|
grep -v '/node_modules/' | \
|
||||||
grep -v '/.git/' | \
|
grep -v '/.git/' | \
|
||||||
sed "s|^${project_dir}/||" | \
|
|
||||||
head -20) || true
|
head -20) || true
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
|
|||||||
@@ -8,13 +8,12 @@ variables:
|
|||||||
description: Project directory to explore
|
description: Project directory to explore
|
||||||
default: '.'
|
default: '.'
|
||||||
|
|
||||||
mcp_servers:
|
|
||||||
- ddg-search
|
|
||||||
global_tools:
|
global_tools:
|
||||||
- fs_read.sh
|
- fs_read.sh
|
||||||
- fs_grep.sh
|
- fs_grep.sh
|
||||||
- fs_glob.sh
|
- fs_glob.sh
|
||||||
- fs_ls.sh
|
- fs_ls.sh
|
||||||
|
- web_search_loki.sh
|
||||||
|
|
||||||
instructions: |
|
instructions: |
|
||||||
You are a codebase explorer. Your job: Search, find, report. Nothing else.
|
You are a codebase explorer. Your job: Search, find, report. Nothing else.
|
||||||
@@ -69,9 +68,6 @@ instructions: |
|
|||||||
- Project: {{project_dir}}
|
- Project: {{project_dir}}
|
||||||
- CWD: {{__cwd__}}
|
- CWD: {{__cwd__}}
|
||||||
|
|
||||||
## Available Tools:
|
|
||||||
{{__tools__}}
|
|
||||||
|
|
||||||
conversation_starters:
|
conversation_starters:
|
||||||
- 'Find how authentication is implemented'
|
- 'Find how authentication is implemented'
|
||||||
- 'What patterns are used for API endpoints'
|
- 'What patterns are used for API endpoints'
|
||||||
|
|||||||
@@ -14,21 +14,6 @@ _project_dir() {
|
|||||||
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Normalize a path to be relative to project root.
|
|
||||||
# Strips the project_dir prefix if the LLM passes an absolute path.
|
|
||||||
_normalize_path() {
|
|
||||||
local input_path="$1"
|
|
||||||
local project_dir
|
|
||||||
project_dir=$(_project_dir)
|
|
||||||
|
|
||||||
if [[ "${input_path}" == /* ]]; then
|
|
||||||
input_path="${input_path#"${project_dir}"/}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
input_path="${input_path#./}"
|
|
||||||
echo "${input_path}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# @cmd Get project structure and layout
|
# @cmd Get project structure and layout
|
||||||
get_structure() {
|
get_structure() {
|
||||||
local project_dir
|
local project_dir
|
||||||
@@ -60,7 +45,7 @@ search_files() {
|
|||||||
echo "" >> "$LLM_OUTPUT"
|
echo "" >> "$LLM_OUTPUT"
|
||||||
|
|
||||||
local results
|
local results
|
||||||
results=$(_search_files "${pattern}" "${project_dir}")
|
results=$(search_files "${pattern}" "${project_dir}")
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
echo "${results}" >> "$LLM_OUTPUT"
|
echo "${results}" >> "$LLM_OUTPUT"
|
||||||
@@ -93,7 +78,6 @@ search_content() {
|
|||||||
grep -v '/node_modules/' | \
|
grep -v '/node_modules/' | \
|
||||||
grep -v '/.git/' | \
|
grep -v '/.git/' | \
|
||||||
grep -v '/dist/' | \
|
grep -v '/dist/' | \
|
||||||
sed "s|^${project_dir}/||" | \
|
|
||||||
head -30) || true
|
head -30) || true
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
@@ -107,9 +91,8 @@ search_content() {
|
|||||||
# @option --path! Path to the file (relative to project root)
|
# @option --path! Path to the file (relative to project root)
|
||||||
# @option --lines Maximum lines to read (default: 200)
|
# @option --lines Maximum lines to read (default: 200)
|
||||||
read_file() {
|
read_file() {
|
||||||
local file_path
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
local file_path="${argc_path}"
|
||||||
local max_lines="${argc_lines:-200}"
|
local max_lines="${argc_lines:-200}"
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
@@ -139,8 +122,7 @@ read_file() {
|
|||||||
# @cmd Find similar files to a given file (for pattern matching)
|
# @cmd Find similar files to a given file (for pattern matching)
|
||||||
# @option --path! Path to the reference file
|
# @option --path! Path to the reference file
|
||||||
find_similar() {
|
find_similar() {
|
||||||
local file_path
|
local file_path="${argc_path}"
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
|
|
||||||
@@ -156,7 +138,7 @@ find_similar() {
|
|||||||
! -name "$(basename "${file_path}")" \
|
! -name "$(basename "${file_path}")" \
|
||||||
! -name "*test*" \
|
! -name "*test*" \
|
||||||
! -name "*spec*" \
|
! -name "*spec*" \
|
||||||
2>/dev/null | sed "s|^${project_dir}/||" | head -5)
|
2>/dev/null | head -5)
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
echo "${results}" >> "$LLM_OUTPUT"
|
echo "${results}" >> "$LLM_OUTPUT"
|
||||||
@@ -165,7 +147,7 @@ find_similar() {
|
|||||||
! -name "$(basename "${file_path}")" \
|
! -name "$(basename "${file_path}")" \
|
||||||
! -name "*test*" \
|
! -name "*test*" \
|
||||||
-not -path '*/target/*' \
|
-not -path '*/target/*' \
|
||||||
2>/dev/null | sed "s|^${project_dir}/||" | head -5)
|
2>/dev/null | head -5)
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
echo "${results}" >> "$LLM_OUTPUT"
|
echo "${results}" >> "$LLM_OUTPUT"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -108,6 +108,3 @@ instructions: |
|
|||||||
## Context
|
## Context
|
||||||
- Project: {{project_dir}}
|
- Project: {{project_dir}}
|
||||||
- CWD: {{__cwd__}}
|
- CWD: {{__cwd__}}
|
||||||
|
|
||||||
## Available Tools:
|
|
||||||
{{__tools__}}
|
|
||||||
|
|||||||
@@ -8,13 +8,12 @@ variables:
|
|||||||
description: Project directory for context
|
description: Project directory for context
|
||||||
default: '.'
|
default: '.'
|
||||||
|
|
||||||
mcp_servers:
|
|
||||||
- ddg-search
|
|
||||||
global_tools:
|
global_tools:
|
||||||
- fs_read.sh
|
- fs_read.sh
|
||||||
- fs_grep.sh
|
- fs_grep.sh
|
||||||
- fs_glob.sh
|
- fs_glob.sh
|
||||||
- fs_ls.sh
|
- fs_ls.sh
|
||||||
|
- web_search_loki.sh
|
||||||
|
|
||||||
instructions: |
|
instructions: |
|
||||||
You are Oracle - a senior architect and debugger consulted for complex decisions.
|
You are Oracle - a senior architect and debugger consulted for complex decisions.
|
||||||
@@ -76,9 +75,6 @@ instructions: |
|
|||||||
- Project: {{project_dir}}
|
- Project: {{project_dir}}
|
||||||
- CWD: {{__cwd__}}
|
- CWD: {{__cwd__}}
|
||||||
|
|
||||||
## Available Tools:
|
|
||||||
{{__tools__}}
|
|
||||||
|
|
||||||
conversation_starters:
|
conversation_starters:
|
||||||
- 'Review this architecture design'
|
- 'Review this architecture design'
|
||||||
- 'Help debug this complex issue'
|
- 'Help debug this complex issue'
|
||||||
|
|||||||
@@ -14,38 +14,21 @@ _project_dir() {
|
|||||||
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Normalize a path to be relative to project root.
|
|
||||||
# Strips the project_dir prefix if the LLM passes an absolute path.
|
|
||||||
_normalize_path() {
|
|
||||||
local input_path="$1"
|
|
||||||
local project_dir
|
|
||||||
project_dir=$(_project_dir)
|
|
||||||
|
|
||||||
if [[ "${input_path}" == /* ]]; then
|
|
||||||
input_path="${input_path#"${project_dir}"/}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
input_path="${input_path#./}"
|
|
||||||
echo "${input_path}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# @cmd Read a file for analysis
|
# @cmd Read a file for analysis
|
||||||
# @option --path! Path to the file (relative to project root)
|
# @option --path! Path to the file (relative to project root)
|
||||||
read_file() {
|
read_file() {
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
local file_path
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
file_path=$(_normalize_path "${argc_path}")
|
local full_path="${project_dir}/${argc_path}"
|
||||||
local full_path="${project_dir}/${file_path}"
|
|
||||||
|
|
||||||
if [[ ! -f "${full_path}" ]]; then
|
if [[ ! -f "${full_path}" ]]; then
|
||||||
error "File not found: ${file_path}" >> "$LLM_OUTPUT"
|
error "File not found: ${argc_path}" >> "$LLM_OUTPUT"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
{
|
{
|
||||||
info "Reading: ${file_path}"
|
info "Reading: ${argc_path}"
|
||||||
echo ""
|
echo ""
|
||||||
cat "${full_path}"
|
cat "${full_path}"
|
||||||
} >> "$LLM_OUTPUT"
|
} >> "$LLM_OUTPUT"
|
||||||
@@ -97,7 +80,6 @@ search_code() {
|
|||||||
grep -v '/target/' | \
|
grep -v '/target/' | \
|
||||||
grep -v '/node_modules/' | \
|
grep -v '/node_modules/' | \
|
||||||
grep -v '/.git/' | \
|
grep -v '/.git/' | \
|
||||||
sed "s|^${project_dir}/||" | \
|
|
||||||
head -30) || true
|
head -30) || true
|
||||||
|
|
||||||
if [[ -n "${results}" ]]; then
|
if [[ -n "${results}" ]]; then
|
||||||
@@ -131,8 +113,7 @@ analyze_with_command() {
|
|||||||
# @cmd List directory contents
|
# @cmd List directory contents
|
||||||
# @option --path Path to list (default: project root)
|
# @option --path Path to list (default: project root)
|
||||||
list_directory() {
|
list_directory() {
|
||||||
local dir_path
|
local dir_path="${argc_path:-.}"
|
||||||
dir_path=$(_normalize_path "${argc_path:-.}")
|
|
||||||
local project_dir
|
local project_dir
|
||||||
project_dir=$(_project_dir)
|
project_dir=$(_project_dir)
|
||||||
local full_path="${project_dir}/${dir_path}"
|
local full_path="${project_dir}/${dir_path}"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ can_spawn_agents: true
|
|||||||
max_concurrent_agents: 4
|
max_concurrent_agents: 4
|
||||||
max_agent_depth: 3
|
max_agent_depth: 3
|
||||||
inject_spawn_instructions: true
|
inject_spawn_instructions: true
|
||||||
summarization_threshold: 8000
|
summarization_threshold: 4000
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
- name: project_dir
|
- name: project_dir
|
||||||
@@ -22,13 +22,12 @@ variables:
|
|||||||
description: Auto-confirm command execution
|
description: Auto-confirm command execution
|
||||||
default: '1'
|
default: '1'
|
||||||
|
|
||||||
mcp_servers:
|
|
||||||
- ddg-search
|
|
||||||
global_tools:
|
global_tools:
|
||||||
- fs_read.sh
|
- fs_read.sh
|
||||||
- fs_grep.sh
|
- fs_grep.sh
|
||||||
- fs_glob.sh
|
- fs_glob.sh
|
||||||
- fs_ls.sh
|
- fs_ls.sh
|
||||||
|
- web_search_loki.sh
|
||||||
- execute_command.sh
|
- execute_command.sh
|
||||||
|
|
||||||
instructions: |
|
instructions: |
|
||||||
@@ -70,45 +69,6 @@ instructions: |
|
|||||||
| coder | Write/edit files, implement features | Creates/modifies files, runs builds |
|
| coder | Write/edit files, implement features | Creates/modifies files, runs builds |
|
||||||
| oracle | Architecture decisions, complex debugging | Advisory, high-quality reasoning |
|
| oracle | Architecture decisions, complex debugging | Advisory, high-quality reasoning |
|
||||||
|
|
||||||
## Coder Delegation Format (MANDATORY)
|
|
||||||
|
|
||||||
When spawning the `coder` agent, your prompt MUST include these sections.
|
|
||||||
The coder has NOT seen the codebase. Your prompt IS its entire context.
|
|
||||||
|
|
||||||
### Template:
|
|
||||||
|
|
||||||
```
|
|
||||||
## Goal
|
|
||||||
[1-2 sentences: what to build/modify and where]
|
|
||||||
|
|
||||||
## Reference Files
|
|
||||||
[Files that explore found, with what each demonstrates]
|
|
||||||
- `path/to/file.ext` - what pattern this file shows
|
|
||||||
- `path/to/other.ext` - what convention this file shows
|
|
||||||
|
|
||||||
## Code Patterns to Follow
|
|
||||||
[Paste ACTUAL code snippets from explore results, not descriptions]
|
|
||||||
<code>
|
|
||||||
// From path/to/file.ext - this is the pattern to follow:
|
|
||||||
[actual code explore found, 5-20 lines]
|
|
||||||
</code>
|
|
||||||
|
|
||||||
## Conventions
|
|
||||||
[Naming, imports, error handling, file organization]
|
|
||||||
- Convention 1
|
|
||||||
- Convention 2
|
|
||||||
|
|
||||||
## Constraints
|
|
||||||
[What NOT to do, scope boundaries]
|
|
||||||
- Do NOT modify X
|
|
||||||
- Only touch files in Y/
|
|
||||||
```
|
|
||||||
|
|
||||||
**CRITICAL**: Include actual code snippets, not just file paths.
|
|
||||||
If explore returned code patterns, paste them into the coder prompt.
|
|
||||||
Vague prompts like "follow existing patterns" waste coder's tokens on
|
|
||||||
re-exploration that you already did.
|
|
||||||
|
|
||||||
## Workflow Examples
|
## Workflow Examples
|
||||||
|
|
||||||
### Example 1: Implementation task (explore -> coder, parallel exploration)
|
### Example 1: Implementation task (explore -> coder, parallel exploration)
|
||||||
@@ -120,12 +80,12 @@ instructions: |
|
|||||||
2. todo__add --task "Explore existing API patterns"
|
2. todo__add --task "Explore existing API patterns"
|
||||||
3. todo__add --task "Implement profile endpoint"
|
3. todo__add --task "Implement profile endpoint"
|
||||||
4. todo__add --task "Verify with build/test"
|
4. todo__add --task "Verify with build/test"
|
||||||
5. agent__spawn --agent explore --prompt "Find existing API endpoint patterns, route structures, and controller conventions. Include code snippets."
|
5. agent__spawn --agent explore --prompt "Find existing API endpoint patterns, route structures, and controller conventions"
|
||||||
6. agent__spawn --agent explore --prompt "Find existing data models and database query patterns. Include code snippets."
|
6. agent__spawn --agent explore --prompt "Find existing data models and database query patterns"
|
||||||
7. agent__collect --id <id1>
|
7. agent__collect --id <id1>
|
||||||
8. agent__collect --id <id2>
|
8. agent__collect --id <id2>
|
||||||
9. todo__done --id 1
|
9. todo__done --id 1
|
||||||
10. agent__spawn --agent coder --prompt "<structured prompt using Coder Delegation Format above, including code snippets from explore results>"
|
10. agent__spawn --agent coder --prompt "Create user profiles endpoint following existing patterns. [Include context from explore results]"
|
||||||
11. agent__collect --id <coder_id>
|
11. agent__collect --id <coder_id>
|
||||||
12. todo__done --id 2
|
12. todo__done --id 2
|
||||||
13. run_build
|
13. run_build
|
||||||
@@ -174,6 +134,7 @@ instructions: |
|
|||||||
|
|
||||||
## When to Do It Yourself
|
## When to Do It Yourself
|
||||||
|
|
||||||
|
- Single-file reads/writes
|
||||||
- Simple command execution
|
- Simple command execution
|
||||||
- Trivial changes (typos, renames)
|
- Trivial changes (typos, renames)
|
||||||
- Quick file searches
|
- Quick file searches
|
||||||
|
|||||||
@@ -16,15 +16,11 @@
|
|||||||
},
|
},
|
||||||
"atlassian": {
|
"atlassian": {
|
||||||
"command": "npx",
|
"command": "npx",
|
||||||
"args": ["-y", "mcp-remote@0.1.13", "https://mcp.atlassian.com/v1/mcp"]
|
"args": ["-y", "mcp-remote@0.1.13", "https://mcp.atlassian.com/v1/sse"]
|
||||||
},
|
},
|
||||||
"docker": {
|
"docker": {
|
||||||
"command": "uvx",
|
"command": "uvx",
|
||||||
"args": ["mcp-server-docker"]
|
"args": ["mcp-server-docker"]
|
||||||
},
|
|
||||||
"ddg-search": {
|
|
||||||
"command": "uvx",
|
|
||||||
"args": ["duckduckgo-mcp-server"]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,13 +50,7 @@ def parse_raw_data(data):
|
|||||||
|
|
||||||
def parse_argv():
|
def parse_argv():
|
||||||
agent_func = sys.argv[1]
|
agent_func = sys.argv[1]
|
||||||
|
agent_data = sys.argv[2]
|
||||||
tool_data_file = os.environ.get("LLM_TOOL_DATA_FILE")
|
|
||||||
if tool_data_file and os.path.isfile(tool_data_file):
|
|
||||||
with open(tool_data_file, "r", encoding="utf-8") as f:
|
|
||||||
agent_data = f.read()
|
|
||||||
else:
|
|
||||||
agent_data = sys.argv[2]
|
|
||||||
|
|
||||||
if (not agent_data) or (not agent_func):
|
if (not agent_data) or (not agent_func):
|
||||||
print("Usage: ./{agent_name}.py <agent-func> <agent-data>", file=sys.stderr)
|
print("Usage: ./{agent_name}.py <agent-func> <agent-data>", file=sys.stderr)
|
||||||
|
|||||||
@@ -14,11 +14,7 @@ main() {
|
|||||||
|
|
||||||
parse_argv() {
|
parse_argv() {
|
||||||
agent_func="$1"
|
agent_func="$1"
|
||||||
if [[ -n "$LLM_TOOL_DATA_FILE" ]] && [[ -f "$LLM_TOOL_DATA_FILE" ]]; then
|
agent_data="$2"
|
||||||
agent_data="$(cat "$LLM_TOOL_DATA_FILE")"
|
|
||||||
else
|
|
||||||
agent_data="$2"
|
|
||||||
fi
|
|
||||||
if [[ -z "$agent_data" ]] || [[ -z "$agent_func" ]]; then
|
if [[ -z "$agent_data" ]] || [[ -z "$agent_func" ]]; then
|
||||||
die "usage: ./{agent_name}.sh <agent-func> <agent-data>"
|
die "usage: ./{agent_name}.sh <agent-func> <agent-data>"
|
||||||
fi
|
fi
|
||||||
@@ -61,6 +57,7 @@ run() {
|
|||||||
if [[ "$OS" == "Windows_NT" ]]; then
|
if [[ "$OS" == "Windows_NT" ]]; then
|
||||||
set -o igncr
|
set -o igncr
|
||||||
tools_path="$(cygpath -w "$tools_path")"
|
tools_path="$(cygpath -w "$tools_path")"
|
||||||
|
tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
jq_script="$(cat <<-'EOF'
|
jq_script="$(cat <<-'EOF'
|
||||||
|
|||||||
@@ -49,11 +49,6 @@ def parse_raw_data(data):
|
|||||||
|
|
||||||
|
|
||||||
def parse_argv():
|
def parse_argv():
|
||||||
tool_data_file = os.environ.get("LLM_TOOL_DATA_FILE")
|
|
||||||
if tool_data_file and os.path.isfile(tool_data_file):
|
|
||||||
with open(tool_data_file, "r", encoding="utf-8") as f:
|
|
||||||
return f.read()
|
|
||||||
|
|
||||||
argv = sys.argv[:] + [None] * max(0, 2 - len(sys.argv))
|
argv = sys.argv[:] + [None] * max(0, 2 - len(sys.argv))
|
||||||
|
|
||||||
tool_data = argv[1]
|
tool_data = argv[1]
|
||||||
|
|||||||
@@ -13,11 +13,7 @@ main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
parse_argv() {
|
parse_argv() {
|
||||||
if [[ -n "$LLM_TOOL_DATA_FILE" ]] && [[ -f "$LLM_TOOL_DATA_FILE" ]]; then
|
tool_data="$1"
|
||||||
tool_data="$(cat "$LLM_TOOL_DATA_FILE")"
|
|
||||||
else
|
|
||||||
tool_data="$1"
|
|
||||||
fi
|
|
||||||
if [[ -z "$tool_data" ]]; then
|
if [[ -z "$tool_data" ]]; then
|
||||||
die "usage: ./{function_name}.sh <tool-data>"
|
die "usage: ./{function_name}.sh <tool-data>"
|
||||||
fi
|
fi
|
||||||
@@ -58,6 +54,7 @@ run() {
|
|||||||
if [[ "$OS" == "Windows_NT" ]]; then
|
if [[ "$OS" == "Windows_NT" ]]; then
|
||||||
set -o igncr
|
set -o igncr
|
||||||
tool_path="$(cygpath -w "$tool_path")"
|
tool_path="$(cygpath -w "$tool_path")"
|
||||||
|
tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
jq_script="$(cat <<-'EOF'
|
jq_script="$(cat <<-'EOF'
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ max_concurrent_agents: 4 # Maximum number of agents that can run simulta
|
|||||||
max_agent_depth: 3 # Maximum nesting depth for sub-agents (prevents runaway spawning)
|
max_agent_depth: 3 # Maximum nesting depth for sub-agents (prevents runaway spawning)
|
||||||
inject_spawn_instructions: true # Inject the default agent spawning instructions into the agent's system prompt
|
inject_spawn_instructions: true # Inject the default agent spawning instructions into the agent's system prompt
|
||||||
summarization_model: null # Model to use for summarizing sub-agent output (e.g. 'openai:gpt-4o-mini'); defaults to current model
|
summarization_model: null # Model to use for summarizing sub-agent output (e.g. 'openai:gpt-4o-mini'); defaults to current model
|
||||||
summarization_threshold: 4000 # Character threshold above which sub-agent output is summarized before returning to parent
|
summarization_threshold: 4000 # Character threshold above which sub-agent output is summarized before returning to parent
|
||||||
escalation_timeout: 300 # Seconds a sub-agent waits for a user interaction response before timing out (default: 5 minutes)
|
escalation_timeout: 300 # Seconds a sub-agent waits for a user interaction response before timing out (default: 5 minutes)
|
||||||
mcp_servers: # Optional list of MCP servers that the agent utilizes
|
mcp_servers: # Optional list of MCP servers that the agent utilizes
|
||||||
- github # Corresponds to the name of an MCP server in the `<loki-config-dir>/functions/mcp.json` file
|
- github # Corresponds to the name of an MCP server in the `<loki-config-dir>/functions/mcp.json` file
|
||||||
|
|||||||
+1
-1
@@ -77,7 +77,7 @@ visible_tools: # Which tools are visible to be compiled (and a
|
|||||||
mcp_server_support: true # Enables or disables MCP servers (globally).
|
mcp_server_support: true # Enables or disables MCP servers (globally).
|
||||||
mapping_mcp_servers: # Alias for an MCP server or set of servers
|
mapping_mcp_servers: # Alias for an MCP server or set of servers
|
||||||
git: github,gitmcp
|
git: github,gitmcp
|
||||||
enabled_mcp_servers: null # Which MCP servers to enable by default (e.g. 'github,slack,ddg-search')
|
enabled_mcp_servers: null # Which MCP servers to enable by default (e.g. 'github,slack')
|
||||||
|
|
||||||
# ---- Session ----
|
# ---- Session ----
|
||||||
# See the [Session documentation](./docs/SESSIONS.md) for more information
|
# See the [Session documentation](./docs/SESSIONS.md) for more information
|
||||||
|
|||||||
+1
-3
@@ -467,12 +467,11 @@ inject_todo_instructions: true # Include the default todo instructions into pr
|
|||||||
|
|
||||||
### How It Works
|
### How It Works
|
||||||
|
|
||||||
1. When `inject_todo_instructions` is enabled, agents receive instructions on using five built-in tools:
|
1. When `inject_todo_instructions` is enabled, agents receive instructions on using four built-in tools:
|
||||||
- `todo__init`: Initialize a todo list with a goal
|
- `todo__init`: Initialize a todo list with a goal
|
||||||
- `todo__add`: Add a task to the list
|
- `todo__add`: Add a task to the list
|
||||||
- `todo__done`: Mark a task complete
|
- `todo__done`: Mark a task complete
|
||||||
- `todo__list`: View current todo state
|
- `todo__list`: View current todo state
|
||||||
- `todo__clear`: Clear the entire todo list and reset the goal
|
|
||||||
|
|
||||||
These instructions are a reasonable default that detail how to use Loki's To-Do System. If you wish,
|
These instructions are a reasonable default that detail how to use Loki's To-Do System. If you wish,
|
||||||
you can disable the injection of the default instructions and specify your own instructions for how
|
you can disable the injection of the default instructions and specify your own instructions for how
|
||||||
@@ -715,7 +714,6 @@ Loki comes packaged with some useful built-in agents:
|
|||||||
* `code-reviewer`: A [CodeRabbit](https://coderabbit.ai)-style code reviewer that spawns per-file reviewers using the teammate messaging pattern
|
* `code-reviewer`: A [CodeRabbit](https://coderabbit.ai)-style code reviewer that spawns per-file reviewers using the teammate messaging pattern
|
||||||
* `demo`: An example agent to use for reference when learning to create your own agents
|
* `demo`: An example agent to use for reference when learning to create your own agents
|
||||||
* `explore`: An agent designed to help you explore and understand your codebase
|
* `explore`: An agent designed to help you explore and understand your codebase
|
||||||
* `file-reviewer`: An agent designed to perform code-review on a single file (used by the `code-reviewer` agent)
|
|
||||||
* `jira-helper`: An agent that assists you with all your Jira-related tasks
|
* `jira-helper`: An agent that assists you with all your Jira-related tasks
|
||||||
* `oracle`: An agent for high-level architecture, design decisions, and complex debugging
|
* `oracle`: An agent for high-level architecture, design decisions, and complex debugging
|
||||||
* `sisyphus`: A powerhouse orchestrator agent for writing complex code and acting as a natural language interface for your codebase (similar to ClaudeCode, Gemini CLI, Codex, or OpenCode). Uses sub-agent spawning to delegate to `explore`, `coder`, and `oracle`.
|
* `sisyphus`: A powerhouse orchestrator agent for writing complex code and acting as a natural language interface for your codebase (similar to ClaudeCode, Gemini CLI, Codex, or OpenCode). Uses sub-agent spawning to delegate to `explore`, `coder`, and `oracle`.
|
||||||
|
|||||||
@@ -107,7 +107,6 @@ The following variables can be used to change the log level of Loki or the locat
|
|||||||
can also pass the `--disable-log-colors` flag as well.
|
can also pass the `--disable-log-colors` flag as well.
|
||||||
|
|
||||||
## Miscellaneous Variables
|
## Miscellaneous Variables
|
||||||
| Environment Variable | Description | Default Value |
|
| Environment Variable | Description | Default Value |
|
||||||
|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
|
|----------------------|--------------------------------------------------------------------------------------------------|---------------|
|
||||||
| `AUTO_CONFIRM` | Bypass all `guard_*` checks in the bash prompt helpers; useful for agent composition and routing | |
|
| `AUTO_CONFIRM` | Bypass all `guard_*` checks in the bash prompt helpers; useful for agent composition and routing | |
|
||||||
| `LLM_TOOL_DATA_FILE` | Set automatically by Loki on Windows. Points to a temporary file containing the JSON tool call data. <br>Tool scripts (`run-tool.sh`, `run-agent.sh`, etc.) read from this file instead of command-line args <br>to avoid JSON escaping issues when data passes through `cmd.exe` → bash. **Not intended to be set by users.** | |
|
|
||||||
+7
-8
@@ -120,14 +120,13 @@ For more information on sessions and how to use them in Loki, refer to the [sess
|
|||||||
Loki lets you build OpenAI GPT-style agents. The following commands let you interact with and manage your agents in
|
Loki lets you build OpenAI GPT-style agents. The following commands let you interact with and manage your agents in
|
||||||
Loki:
|
Loki:
|
||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
|----------------------|-----------------------------------------------------------------------------------------------|
|
|----------------------|------------------------------------------------------------|
|
||||||
| `.agent` | Use an agent |
|
| `.agent` | Use an agent |
|
||||||
| `.starter` | Display and use conversation starters for the active agent |
|
| `.starter` | Display and use conversation starters for the active agent |
|
||||||
| `.clear todo` | Clear the todo list and stop auto-continuation (requires `auto_continue: true` on the agent) |
|
| `.edit agent-config` | Open the agent configuration in your preferred text editor |
|
||||||
| `.edit agent-config` | Open the agent configuration in your preferred text editor |
|
| `.info agent` | Display information about the active agent |
|
||||||
| `.info agent` | Display information about the active agent |
|
| `.exit agent` | Leave the active agent |
|
||||||
| `.exit agent` | Leave the active agent |
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|||||||
@@ -117,22 +117,6 @@ Display the current todo list with status of each item.
|
|||||||
|
|
||||||
**Returns:** The full todo list with goal, progress, and item statuses
|
**Returns:** The full todo list with goal, progress, and item statuses
|
||||||
|
|
||||||
### `todo__clear`
|
|
||||||
Clear the entire todo list and reset the goal. Use when the current task has been canceled or invalidated.
|
|
||||||
|
|
||||||
**Parameters:** None
|
|
||||||
|
|
||||||
**Returns:** Confirmation that the todo list was cleared
|
|
||||||
|
|
||||||
### REPL Command: `.clear todo`
|
|
||||||
You can also clear the todo list manually from the REPL by typing `.clear todo`. This is useful when:
|
|
||||||
- You gave a custom response that changes or cancels the current task
|
|
||||||
- The agent is stuck in auto-continuation with stale todos
|
|
||||||
- You want to start fresh without leaving and re-entering the agent
|
|
||||||
|
|
||||||
**Note:** This command is only available when an agent with `auto_continue: true` is active. If the todo
|
|
||||||
system isn't enabled for the current agent, the command will display an error message.
|
|
||||||
|
|
||||||
## Auto-Continuation
|
## Auto-Continuation
|
||||||
When `auto_continue` is enabled, Loki automatically sends a continuation prompt if:
|
When `auto_continue` is enabled, Loki automatically sends a continuation prompt if:
|
||||||
|
|
||||||
|
|||||||
@@ -142,25 +142,6 @@ temporary localhost server to capture the callback automatically (e.g. Gemini) o
|
|||||||
code back into the terminal (e.g. Claude). Loki stores the tokens in `~/.cache/loki/oauth` and automatically refreshes
|
code back into the terminal (e.g. Claude). Loki stores the tokens in `~/.cache/loki/oauth` and automatically refreshes
|
||||||
them when they expire.
|
them when they expire.
|
||||||
|
|
||||||
#### Gemini OAuth Note
|
|
||||||
Loki uses the following scopes for OAuth with Gemini:
|
|
||||||
* https://www.googleapis.com/auth/generative-language.peruserquota
|
|
||||||
* https://www.googleapis.com/auth/userinfo.email
|
|
||||||
* https://www.googleapis.com/auth/generative-language.retriever (Sensitive)
|
|
||||||
|
|
||||||
Since the `generative-language.retriever` scope is a sensitive scope, Google needs to verify Loki, which requires full
|
|
||||||
branding (logo, official website, privacy policy, terms of service, etc.). The Loki app is open-source and is designed
|
|
||||||
to be used as a simple CLI. As such, there's no terms of service or privacy policy associated with it, and thus Google
|
|
||||||
cannot verify Loki.
|
|
||||||
|
|
||||||
So, when you kick off OAuth with Gemini, you may see a page similar to the following:
|
|
||||||

|
|
||||||
|
|
||||||
Simply click the `Advanced` link and click `Go to Loki (unsafe)` to continue the OAuth flow.
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
**Step 3: Use normally**
|
**Step 3: Use normally**
|
||||||
|
|
||||||
Once authenticated, the client works like any other. Loki uses the stored OAuth tokens automatically:
|
Once authenticated, the client works like any other. Loki uses the stored OAuth tokens automatically:
|
||||||
|
|||||||
@@ -55,7 +55,6 @@ Loki ships with a `functions/mcp.json` file that includes some useful MCP server
|
|||||||
* [github](https://github.com/github/github-mcp-server) - Interact with GitHub repositories, issues, pull requests, and more.
|
* [github](https://github.com/github/github-mcp-server) - Interact with GitHub repositories, issues, pull requests, and more.
|
||||||
* [docker](https://github.com/ckreiling/mcp-server-docker) - Manage your local Docker containers with natural language
|
* [docker](https://github.com/ckreiling/mcp-server-docker) - Manage your local Docker containers with natural language
|
||||||
* [slack](https://github.com/korotovsky/slack-mcp-server) - Interact with Slack
|
* [slack](https://github.com/korotovsky/slack-mcp-server) - Interact with Slack
|
||||||
* [ddg-search](https://github.com/nickclyde/duckduckgo-mcp-server) - Perform web searches with the DuckDuckGo search engine
|
|
||||||
|
|
||||||
## Loki Configuration
|
## Loki Configuration
|
||||||
MCP servers, like tools, can be used in a handful of contexts:
|
MCP servers, like tools, can be used in a handful of contexts:
|
||||||
|
|||||||
@@ -3,13 +3,6 @@
|
|||||||
# - https://platform.openai.com/docs/api-reference/chat
|
# - https://platform.openai.com/docs/api-reference/chat
|
||||||
- provider: openai
|
- provider: openai
|
||||||
models:
|
models:
|
||||||
- name: gpt-5.2
|
|
||||||
max_input_tokens: 400000
|
|
||||||
max_output_tokens: 128000
|
|
||||||
input_price: 1.75
|
|
||||||
output_price: 14
|
|
||||||
supports_vision: true
|
|
||||||
supports_function_calling: true
|
|
||||||
- name: gpt-5.1
|
- name: gpt-5.1
|
||||||
max_input_tokens: 400000
|
max_input_tokens: 400000
|
||||||
max_output_tokens: 128000
|
max_output_tokens: 128000
|
||||||
|
|||||||
+1
-40
@@ -11,7 +11,6 @@ use serde::Deserialize;
|
|||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
const API_BASE: &str = "https://api.anthropic.com/v1";
|
const API_BASE: &str = "https://api.anthropic.com/v1";
|
||||||
const CLAUDE_CODE_PREFIX: &str = "You are Claude Code, Anthropic's official CLI for Claude.";
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
pub struct ClaudeConfig {
|
pub struct ClaudeConfig {
|
||||||
@@ -85,7 +84,7 @@ async fn prepare_chat_completions(
|
|||||||
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
||||||
if !ready {
|
if !ready {
|
||||||
bail!(
|
bail!(
|
||||||
"OAuth configured but no tokens found for '{}'. Run: 'loki --authenticate {}' or '.authenticate' in the REPL",
|
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
|
||||||
self_.name(),
|
self_.name(),
|
||||||
self_.name()
|
self_.name()
|
||||||
);
|
);
|
||||||
@@ -95,7 +94,6 @@ async fn prepare_chat_completions(
|
|||||||
for (key, value) in provider.extra_request_headers() {
|
for (key, value) in provider.extra_request_headers() {
|
||||||
request_data.header(key, value);
|
request_data.header(key, value);
|
||||||
}
|
}
|
||||||
inject_oauth_system_prompt(&mut request_data.body);
|
|
||||||
} else if let Ok(api_key) = self_.get_api_key() {
|
} else if let Ok(api_key) = self_.get_api_key() {
|
||||||
request_data.header("x-api-key", api_key);
|
request_data.header("x-api-key", api_key);
|
||||||
} else {
|
} else {
|
||||||
@@ -109,43 +107,6 @@ async fn prepare_chat_completions(
|
|||||||
Ok(request_data)
|
Ok(request_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Anthropic requires OAuth-authenticated requests to include a Claude Code
|
|
||||||
/// system prompt prefix in order to consider a request body as "valid".
|
|
||||||
///
|
|
||||||
/// This behavior was discovered 2026-03-17.
|
|
||||||
///
|
|
||||||
/// So this function injects the Claude Code system prompt into the request
|
|
||||||
/// body to make it a valid request.
|
|
||||||
fn inject_oauth_system_prompt(body: &mut Value) {
|
|
||||||
let prefix_block = json!({
|
|
||||||
"type": "text",
|
|
||||||
"text": CLAUDE_CODE_PREFIX,
|
|
||||||
});
|
|
||||||
|
|
||||||
match body.get("system") {
|
|
||||||
Some(Value::String(existing)) => {
|
|
||||||
let existing_block = json!({
|
|
||||||
"type": "text",
|
|
||||||
"text": existing,
|
|
||||||
});
|
|
||||||
body["system"] = json!([prefix_block, existing_block]);
|
|
||||||
}
|
|
||||||
Some(Value::Array(_)) => {
|
|
||||||
if let Some(arr) = body["system"].as_array_mut() {
|
|
||||||
let already_injected = arr
|
|
||||||
.iter()
|
|
||||||
.any(|block| block["text"].as_str() == Some(CLAUDE_CODE_PREFIX));
|
|
||||||
if !already_injected {
|
|
||||||
arr.insert(0, prefix_block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
body["system"] = json!([prefix_block]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn claude_chat_completions(
|
pub async fn claude_chat_completions(
|
||||||
builder: RequestBuilder,
|
builder: RequestBuilder,
|
||||||
_model: &Model,
|
_model: &Model,
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ async fn prepare_chat_completions(
|
|||||||
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
||||||
if !ready {
|
if !ready {
|
||||||
bail!(
|
bail!(
|
||||||
"OAuth configured but no tokens found for '{}'. Run: 'loki --authenticate {}' or '.authenticate' in the REPL",
|
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
|
||||||
self_.name(),
|
self_.name(),
|
||||||
self_.name()
|
self_.name()
|
||||||
);
|
);
|
||||||
@@ -181,7 +181,7 @@ async fn prepare_embeddings(
|
|||||||
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
|
||||||
if !ready {
|
if !ready {
|
||||||
bail!(
|
bail!(
|
||||||
"OAuth configured but no tokens found for '{}'. Run: 'loki --authenticate {}' or '.authenticate' in the REPL",
|
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
|
||||||
self_.name(),
|
self_.name(),
|
||||||
self_.name()
|
self_.name()
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use super::oauth::{OAuthProvider, TokenRequestFormat};
|
|||||||
|
|
||||||
pub struct GeminiOAuthProvider;
|
pub struct GeminiOAuthProvider;
|
||||||
|
|
||||||
|
// TODO: Replace with real credentials after registering Loki with Google Cloud Console
|
||||||
const GEMINI_CLIENT_ID: &str =
|
const GEMINI_CLIENT_ID: &str =
|
||||||
"50826443741-upqcebrs4gctqht1f08ku46qlbirkdsj.apps.googleusercontent.com";
|
"50826443741-upqcebrs4gctqht1f08ku46qlbirkdsj.apps.googleusercontent.com";
|
||||||
const GEMINI_CLIENT_SECRET: &str = "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";
|
const GEMINI_CLIENT_SECRET: &str = "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";
|
||||||
@@ -28,7 +29,7 @@ impl OAuthProvider for GeminiOAuthProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn scopes(&self) -> &str {
|
fn scopes(&self) -> &str {
|
||||||
"https://www.googleapis.com/auth/generative-language.peruserquota https://www.googleapis.com/auth/generative-language.retriever https://www.googleapis.com/auth/userinfo.email"
|
"https://www.googleapis.com/auth/cloud-platform.readonly https://www.googleapis.com/auth/userinfo.email"
|
||||||
}
|
}
|
||||||
|
|
||||||
fn client_secret(&self) -> Option<&str> {
|
fn client_secret(&self) -> Option<&str> {
|
||||||
|
|||||||
@@ -177,10 +177,6 @@ impl Model {
|
|||||||
self.data.max_output_tokens
|
self.data.max_output_tokens
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn supports_function_calling(&self) -> bool {
|
|
||||||
self.data.supports_function_calling
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn no_stream(&self) -> bool {
|
pub fn no_stream(&self) -> bool {
|
||||||
self.data.no_stream
|
self.data.no_stream
|
||||||
}
|
}
|
||||||
|
|||||||
+21
-20
@@ -1,12 +1,12 @@
|
|||||||
use super::ClientConfig;
|
use super::ClientConfig;
|
||||||
use super::access_token::{is_valid_access_token, set_access_token};
|
use super::access_token::{is_valid_access_token, set_access_token};
|
||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
use anyhow::{Result, anyhow, bail};
|
use anyhow::{Result, bail};
|
||||||
use base64::Engine;
|
use base64::Engine;
|
||||||
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use inquire::Text;
|
use inquire::Text;
|
||||||
use reqwest::{Client as ReqwestClient, RequestBuilder};
|
use reqwest::Client as ReqwestClient;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
@@ -76,6 +76,7 @@ pub async fn run_oauth_flow(provider: &dyn OAuthProvider, client_name: &str) ->
|
|||||||
let listener = TcpListener::bind("127.0.0.1:0")?;
|
let listener = TcpListener::bind("127.0.0.1:0")?;
|
||||||
let port = listener.local_addr()?.port();
|
let port = listener.local_addr()?.port();
|
||||||
let uri = format!("http://127.0.0.1:{port}/callback");
|
let uri = format!("http://127.0.0.1:{port}/callback");
|
||||||
|
// Drop the listener so run_oauth_flow can re-bind below
|
||||||
drop(listener);
|
drop(listener);
|
||||||
uri
|
uri
|
||||||
} else {
|
} else {
|
||||||
@@ -148,15 +149,15 @@ pub async fn run_oauth_flow(provider: &dyn OAuthProvider, client_name: &str) ->
|
|||||||
|
|
||||||
let access_token = response["access_token"]
|
let access_token = response["access_token"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.ok_or_else(|| anyhow!("Missing access_token in response: {response}"))?
|
.ok_or_else(|| anyhow::anyhow!("Missing access_token in response: {response}"))?
|
||||||
.to_string();
|
.to_string();
|
||||||
let refresh_token = response["refresh_token"]
|
let refresh_token = response["refresh_token"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.ok_or_else(|| anyhow!("Missing refresh_token in response: {response}"))?
|
.ok_or_else(|| anyhow::anyhow!("Missing refresh_token in response: {response}"))?
|
||||||
.to_string();
|
.to_string();
|
||||||
let expires_in = response["expires_in"]
|
let expires_in = response["expires_in"]
|
||||||
.as_i64()
|
.as_i64()
|
||||||
.ok_or_else(|| anyhow!("Missing expires_in in response: {response}"))?;
|
.ok_or_else(|| anyhow::anyhow!("Missing expires_in in response: {response}"))?;
|
||||||
|
|
||||||
let expires_at = Utc::now().timestamp() + expires_in;
|
let expires_at = Utc::now().timestamp() + expires_in;
|
||||||
|
|
||||||
@@ -213,7 +214,7 @@ pub async fn refresh_oauth_token(
|
|||||||
|
|
||||||
let access_token = response["access_token"]
|
let access_token = response["access_token"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.ok_or_else(|| anyhow!("Missing access_token in refresh response: {response}"))?
|
.ok_or_else(|| anyhow::anyhow!("Missing access_token in refresh response: {response}"))?
|
||||||
.to_string();
|
.to_string();
|
||||||
let refresh_token = response["refresh_token"]
|
let refresh_token = response["refresh_token"]
|
||||||
.as_str()
|
.as_str()
|
||||||
@@ -221,7 +222,7 @@ pub async fn refresh_oauth_token(
|
|||||||
.unwrap_or_else(|| tokens.refresh_token.clone());
|
.unwrap_or_else(|| tokens.refresh_token.clone());
|
||||||
let expires_in = response["expires_in"]
|
let expires_in = response["expires_in"]
|
||||||
.as_i64()
|
.as_i64()
|
||||||
.ok_or_else(|| anyhow!("Missing expires_in in refresh response: {response}"))?;
|
.ok_or_else(|| anyhow::anyhow!("Missing expires_in in refresh response: {response}"))?;
|
||||||
|
|
||||||
let expires_at = Utc::now().timestamp() + expires_in;
|
let expires_at = Utc::now().timestamp() + expires_in;
|
||||||
|
|
||||||
@@ -265,7 +266,7 @@ fn build_token_request(
|
|||||||
client: &ReqwestClient,
|
client: &ReqwestClient,
|
||||||
provider: &(impl OAuthProvider + ?Sized),
|
provider: &(impl OAuthProvider + ?Sized),
|
||||||
params: &[(&str, &str)],
|
params: &[(&str, &str)],
|
||||||
) -> RequestBuilder {
|
) -> reqwest::RequestBuilder {
|
||||||
let mut request = match provider.token_request_format() {
|
let mut request = match provider.token_request_format() {
|
||||||
TokenRequestFormat::Json => {
|
TokenRequestFormat::Json => {
|
||||||
let body: serde_json::Map<String, Value> = params
|
let body: serde_json::Map<String, Value> = params
|
||||||
@@ -307,7 +308,7 @@ fn listen_for_oauth_callback(redirect_uri: &str) -> Result<(String, String)> {
|
|||||||
let host = url.host_str().unwrap_or("127.0.0.1");
|
let host = url.host_str().unwrap_or("127.0.0.1");
|
||||||
let port = url
|
let port = url
|
||||||
.port()
|
.port()
|
||||||
.ok_or_else(|| anyhow!("No port in redirect URI"))?;
|
.ok_or_else(|| anyhow::anyhow!("No port in redirect URI"))?;
|
||||||
let path = url.path();
|
let path = url.path();
|
||||||
|
|
||||||
println!("Waiting for OAuth callback on {redirect_uri} ...\n");
|
println!("Waiting for OAuth callback on {redirect_uri} ...\n");
|
||||||
@@ -322,11 +323,19 @@ fn listen_for_oauth_callback(redirect_uri: &str) -> Result<(String, String)> {
|
|||||||
let request_path = request_line
|
let request_path = request_line
|
||||||
.split_whitespace()
|
.split_whitespace()
|
||||||
.nth(1)
|
.nth(1)
|
||||||
.ok_or_else(|| anyhow!("Malformed HTTP request from OAuth callback"))?;
|
.ok_or_else(|| anyhow::anyhow!("Malformed HTTP request from OAuth callback"))?;
|
||||||
|
|
||||||
let full_url = format!("http://{host}:{port}{request_path}");
|
let full_url = format!("http://{host}:{port}{request_path}");
|
||||||
let parsed: Url = full_url.parse()?;
|
let parsed: Url = full_url.parse()?;
|
||||||
|
|
||||||
|
let response_body = "<html><body><h2>Authentication successful!</h2><p>You can close this tab and return to your terminal.</p></body></html>";
|
||||||
|
let response = format!(
|
||||||
|
"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
|
||||||
|
response_body.len(),
|
||||||
|
response_body
|
||||||
|
);
|
||||||
|
stream.write_all(response.as_bytes())?;
|
||||||
|
|
||||||
if !parsed.path().starts_with(path) {
|
if !parsed.path().starts_with(path) {
|
||||||
bail!("Unexpected callback path: {}", parsed.path());
|
bail!("Unexpected callback path: {}", parsed.path());
|
||||||
}
|
}
|
||||||
@@ -341,22 +350,14 @@ fn listen_for_oauth_callback(redirect_uri: &str) -> Result<(String, String)> {
|
|||||||
.find(|(k, _)| k == "error")
|
.find(|(k, _)| k == "error")
|
||||||
.map(|(_, v)| v.to_string())
|
.map(|(_, v)| v.to_string())
|
||||||
.unwrap_or_else(|| "unknown".to_string());
|
.unwrap_or_else(|| "unknown".to_string());
|
||||||
anyhow!("OAuth callback returned error: {error}")
|
anyhow::anyhow!("OAuth callback returned error: {error}")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let returned_state = parsed
|
let returned_state = parsed
|
||||||
.query_pairs()
|
.query_pairs()
|
||||||
.find(|(k, _)| k == "state")
|
.find(|(k, _)| k == "state")
|
||||||
.map(|(_, v)| v.to_string())
|
.map(|(_, v)| v.to_string())
|
||||||
.ok_or_else(|| anyhow!("Missing state parameter in OAuth callback"))?;
|
.ok_or_else(|| anyhow::anyhow!("Missing state parameter in OAuth callback"))?;
|
||||||
|
|
||||||
let response_body = "<html><body><h2>Authentication successful!</h2><p>You can close this tab and return to your terminal.</p></body></html>";
|
|
||||||
let response = format!(
|
|
||||||
"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
|
|
||||||
response_body.len(),
|
|
||||||
response_body
|
|
||||||
);
|
|
||||||
stream.write_all(response.as_bytes())?;
|
|
||||||
|
|
||||||
Ok((code, returned_state))
|
Ok((code, returned_state))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -476,11 +476,6 @@ impl Agent {
|
|||||||
self.todo_list.mark_done(id)
|
self.todo_list.mark_done(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear_todo_list(&mut self) {
|
|
||||||
self.todo_list.clear();
|
|
||||||
self.reset_continuation();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn continuation_prompt(&self) -> String {
|
pub fn continuation_prompt(&self) -> String {
|
||||||
self.config.continuation_prompt.clone().unwrap_or_else(|| {
|
self.config.continuation_prompt.clone().unwrap_or_else(|| {
|
||||||
formatdoc! {"
|
formatdoc! {"
|
||||||
|
|||||||
+5
-10
@@ -239,17 +239,12 @@ impl Input {
|
|||||||
patch_messages(&mut messages, model);
|
patch_messages(&mut messages, model);
|
||||||
model.guard_max_input_tokens(&messages)?;
|
model.guard_max_input_tokens(&messages)?;
|
||||||
let (temperature, top_p) = (self.role().temperature(), self.role().top_p());
|
let (temperature, top_p) = (self.role().temperature(), self.role().top_p());
|
||||||
let functions = if model.supports_function_calling() {
|
let functions = self.config.read().select_functions(self.role());
|
||||||
let fns = self.config.read().select_functions(self.role());
|
if let Some(vec) = &functions {
|
||||||
if let Some(vec) = &fns {
|
for def in vec {
|
||||||
for def in vec {
|
debug!("Function definition: {:?}", def.name);
|
||||||
debug!("Function definition: {:?}", def.name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fns
|
}
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
Ok(ChatCompletionsData {
|
Ok(ChatCompletionsData {
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
|
|||||||
@@ -1842,12 +1842,6 @@ impl Config {
|
|||||||
bail!("Already in an agent, please run '.exit agent' first to exit the current agent.");
|
bail!("Already in an agent, please run '.exit agent' first to exit the current agent.");
|
||||||
}
|
}
|
||||||
let agent = Agent::init(config, agent_name, abort_signal.clone()).await?;
|
let agent = Agent::init(config, agent_name, abort_signal.clone()).await?;
|
||||||
if !agent.model().supports_function_calling() {
|
|
||||||
eprintln!(
|
|
||||||
"Warning: The model '{}' does not support function calling. Agent tools (including todo, spawning, and user interaction) will not be available.",
|
|
||||||
agent.model().id()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let session = session_name.map(|v| v.to_string()).or_else(|| {
|
let session = session_name.map(|v| v.to_string()).or_else(|| {
|
||||||
if config.read().macro_flag {
|
if config.read().macro_flag {
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -7,12 +7,10 @@ pub(in crate::config) const DEFAULT_TODO_INSTRUCTIONS: &str = indoc! {"
|
|||||||
- `todo__add`: Add individual tasks. Add all planned steps before starting work.
|
- `todo__add`: Add individual tasks. Add all planned steps before starting work.
|
||||||
- `todo__done`: Mark a task done by id. Call this immediately after completing each step.
|
- `todo__done`: Mark a task done by id. Call this immediately after completing each step.
|
||||||
- `todo__list`: Show the current todo list.
|
- `todo__list`: Show the current todo list.
|
||||||
- `todo__clear`: Clear the entire todo list and reset the goal. Use when the user cancels or changes direction.
|
|
||||||
|
|
||||||
RULES:
|
RULES:
|
||||||
- Always create a todo list before starting work.
|
- Always create a todo list before starting work.
|
||||||
- Mark each task done as soon as you finish it; do not batch.
|
- Mark each task done as soon as you finish it; do not batch.
|
||||||
- If the user cancels the current task or changes direction, call `todo__clear` immediately.
|
|
||||||
- If you stop with incomplete tasks, the system will automatically prompt you to continue."
|
- If you stop with incomplete tasks, the system will automatically prompt you to continue."
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -67,11 +67,6 @@ impl TodoList {
|
|||||||
self.todos.is_empty()
|
self.todos.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.goal.clear();
|
|
||||||
self.todos.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn render_for_model(&self) -> String {
|
pub fn render_for_model(&self) -> String {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
if !self.goal.is_empty() {
|
if !self.goal.is_empty() {
|
||||||
@@ -154,21 +149,6 @@ mod tests {
|
|||||||
assert!(rendered.contains("○ 2. Map"));
|
assert!(rendered.contains("○ 2. Map"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_clear() {
|
|
||||||
let mut list = TodoList::new("Some goal");
|
|
||||||
list.add("Task 1");
|
|
||||||
list.add("Task 2");
|
|
||||||
list.mark_done(1);
|
|
||||||
assert!(!list.is_empty());
|
|
||||||
|
|
||||||
list.clear();
|
|
||||||
assert!(list.is_empty());
|
|
||||||
assert!(list.goal.is_empty());
|
|
||||||
assert_eq!(list.todos.len(), 0);
|
|
||||||
assert!(!list.has_incomplete());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialization_roundtrip() {
|
fn test_serialization_roundtrip() {
|
||||||
let mut list = TodoList::new("Roundtrip");
|
let mut list = TodoList::new("Roundtrip");
|
||||||
|
|||||||
+15
-30
@@ -613,7 +613,6 @@ impl Functions {
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let content_template = unsafe { std::str::from_utf8_unchecked(&embedded_file.data) };
|
let content_template = unsafe { std::str::from_utf8_unchecked(&embedded_file.data) };
|
||||||
let to_script_path = |p: &str| -> String { p.replace('\\', "/") };
|
|
||||||
let content = match binary_type {
|
let content = match binary_type {
|
||||||
BinaryType::Tool(None) => {
|
BinaryType::Tool(None) => {
|
||||||
let root_dir = Config::functions_dir();
|
let root_dir = Config::functions_dir();
|
||||||
@@ -623,8 +622,8 @@ impl Functions {
|
|||||||
);
|
);
|
||||||
content_template
|
content_template
|
||||||
.replace("{function_name}", binary_name)
|
.replace("{function_name}", binary_name)
|
||||||
.replace("{root_dir}", &to_script_path(&root_dir.to_string_lossy()))
|
.replace("{root_dir}", &root_dir.to_string_lossy())
|
||||||
.replace("{tool_path}", &to_script_path(&tool_path))
|
.replace("{tool_path}", &tool_path)
|
||||||
}
|
}
|
||||||
BinaryType::Tool(Some(agent_name)) => {
|
BinaryType::Tool(Some(agent_name)) => {
|
||||||
let root_dir = Config::agent_data_dir(agent_name);
|
let root_dir = Config::agent_data_dir(agent_name);
|
||||||
@@ -634,19 +633,16 @@ impl Functions {
|
|||||||
);
|
);
|
||||||
content_template
|
content_template
|
||||||
.replace("{function_name}", binary_name)
|
.replace("{function_name}", binary_name)
|
||||||
.replace("{root_dir}", &to_script_path(&root_dir.to_string_lossy()))
|
.replace("{root_dir}", &root_dir.to_string_lossy())
|
||||||
.replace("{tool_path}", &to_script_path(&tool_path))
|
.replace("{tool_path}", &tool_path)
|
||||||
}
|
}
|
||||||
BinaryType::Agent => content_template
|
BinaryType::Agent => content_template
|
||||||
.replace("{agent_name}", binary_name)
|
.replace("{agent_name}", binary_name)
|
||||||
.replace(
|
.replace("{config_dir}", &Config::config_dir().to_string_lossy()),
|
||||||
"{config_dir}",
|
|
||||||
&to_script_path(&Config::config_dir().to_string_lossy()),
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
.replace(
|
.replace(
|
||||||
"{prompt_utils_file}",
|
"{prompt_utils_file}",
|
||||||
&to_script_path(&Config::bash_prompt_utils_file().to_string_lossy()),
|
&Config::bash_prompt_utils_file().to_string_lossy(),
|
||||||
);
|
);
|
||||||
if binary_script_file.exists() {
|
if binary_script_file.exists() {
|
||||||
fs::remove_file(&binary_script_file)?;
|
fs::remove_file(&binary_script_file)?;
|
||||||
@@ -670,7 +666,7 @@ impl Functions {
|
|||||||
.join(".venv")
|
.join(".venv")
|
||||||
.join("Scripts")
|
.join("Scripts")
|
||||||
.join("activate.bat");
|
.join("activate.bat");
|
||||||
let canonicalized_path = dunce::canonicalize(&executable_path)?;
|
let canonicalized_path = fs::canonicalize(&executable_path)?;
|
||||||
format!(
|
format!(
|
||||||
"call \"{}\" && {}",
|
"call \"{}\" && {}",
|
||||||
canonicalized_path.to_string_lossy(),
|
canonicalized_path.to_string_lossy(),
|
||||||
@@ -681,16 +677,19 @@ impl Functions {
|
|||||||
let executable_path = which::which("python")
|
let executable_path = which::which("python")
|
||||||
.or_else(|_| which::which("python3"))
|
.or_else(|_| which::which("python3"))
|
||||||
.map_err(|_| anyhow!("Python executable not found in PATH"))?;
|
.map_err(|_| anyhow!("Python executable not found in PATH"))?;
|
||||||
let canonicalized_path = dunce::canonicalize(&executable_path)?;
|
let canonicalized_path = fs::canonicalize(&executable_path)?;
|
||||||
canonicalized_path.to_string_lossy().into_owned()
|
canonicalized_path.to_string_lossy().into_owned()
|
||||||
}
|
}
|
||||||
_ => bail!("Unsupported language: {}", language.as_ref()),
|
_ => bail!("Unsupported language: {}", language.as_ref()),
|
||||||
};
|
};
|
||||||
let bin_dir = binary_file
|
let bin_dir = binary_file
|
||||||
.parent()
|
.parent()
|
||||||
.expect("Failed to get parent directory of binary file");
|
.expect("Failed to get parent directory of binary file")
|
||||||
let canonical_bin_dir = dunce::canonicalize(bin_dir)?.to_string_lossy().into_owned();
|
.canonicalize()?
|
||||||
let wrapper_binary = dunce::canonicalize(&binary_script_file)?
|
.to_string_lossy()
|
||||||
|
.into_owned();
|
||||||
|
let wrapper_binary = binary_script_file
|
||||||
|
.canonicalize()?
|
||||||
.to_string_lossy()
|
.to_string_lossy()
|
||||||
.into_owned();
|
.into_owned();
|
||||||
let content = formatdoc!(
|
let content = formatdoc!(
|
||||||
@@ -698,7 +697,7 @@ impl Functions {
|
|||||||
@echo off
|
@echo off
|
||||||
setlocal
|
setlocal
|
||||||
|
|
||||||
set "bin_dir={canonical_bin_dir}"
|
set "bin_dir={bin_dir}"
|
||||||
|
|
||||||
{run} "{wrapper_binary}" %*"#,
|
{run} "{wrapper_binary}" %*"#,
|
||||||
);
|
);
|
||||||
@@ -1118,20 +1117,6 @@ pub fn run_llm_function(
|
|||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
let cmd_name = polyfill_cmd_name(&cmd_name, &bin_dirs);
|
let cmd_name = polyfill_cmd_name(&cmd_name, &bin_dirs);
|
||||||
|
|
||||||
#[cfg(windows)]
|
|
||||||
let cmd_args = {
|
|
||||||
let mut args = cmd_args;
|
|
||||||
if let Some(json_data) = args.pop() {
|
|
||||||
let tool_data_file = temp_file("-tool-data-", ".json");
|
|
||||||
fs::write(&tool_data_file, &json_data)?;
|
|
||||||
envs.insert(
|
|
||||||
"LLM_TOOL_DATA_FILE".into(),
|
|
||||||
tool_data_file.display().to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
args
|
|
||||||
};
|
|
||||||
|
|
||||||
envs.insert("CLICOLOR_FORCE".into(), "1".into());
|
envs.insert("CLICOLOR_FORCE".into(), "1".into());
|
||||||
envs.insert("FORCE_COLOR".into(), "1".into());
|
envs.insert("FORCE_COLOR".into(), "1".into());
|
||||||
|
|
||||||
|
|||||||
@@ -76,16 +76,6 @@ pub fn todo_function_declarations() -> Vec<FunctionDeclaration> {
|
|||||||
},
|
},
|
||||||
agent: false,
|
agent: false,
|
||||||
},
|
},
|
||||||
FunctionDeclaration {
|
|
||||||
name: format!("{TODO_FUNCTION_PREFIX}clear"),
|
|
||||||
description: "Clear the entire todo list and reset the goal. Use when the current task has been canceled or invalidated.".to_string(),
|
|
||||||
parameters: JsonSchema {
|
|
||||||
type_value: Some("object".to_string()),
|
|
||||||
properties: Some(IndexMap::new()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
agent: false,
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,17 +156,6 @@ pub fn handle_todo_tool(config: &GlobalConfig, cmd_name: &str, args: &Value) ->
|
|||||||
None => bail!("No active agent"),
|
None => bail!("No active agent"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"clear" => {
|
|
||||||
let mut cfg = config.write();
|
|
||||||
let agent = cfg.agent.as_mut();
|
|
||||||
match agent {
|
|
||||||
Some(agent) => {
|
|
||||||
agent.clear_todo_list();
|
|
||||||
Ok(json!({"status": "ok", "message": "Todo list cleared"}))
|
|
||||||
}
|
|
||||||
None => bail!("No active agent"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => bail!("Unknown todo action: {action}"),
|
_ => bail!("Unknown todo action: {action}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ use tokio::sync::oneshot;
|
|||||||
pub const USER_FUNCTION_PREFIX: &str = "user__";
|
pub const USER_FUNCTION_PREFIX: &str = "user__";
|
||||||
|
|
||||||
const DEFAULT_ESCALATION_TIMEOUT_SECS: u64 = 300;
|
const DEFAULT_ESCALATION_TIMEOUT_SECS: u64 = 300;
|
||||||
const CUSTOM_MULTI_CHOICE_ANSWER_OPTION: &str = "Other (custom)";
|
|
||||||
|
|
||||||
pub fn user_interaction_function_declarations() -> Vec<FunctionDeclaration> {
|
pub fn user_interaction_function_declarations() -> Vec<FunctionDeclaration> {
|
||||||
vec![
|
vec![
|
||||||
@@ -152,14 +151,9 @@ fn handle_direct_ask(args: &Value) -> Result<Value> {
|
|||||||
.get("question")
|
.get("question")
|
||||||
.and_then(Value::as_str)
|
.and_then(Value::as_str)
|
||||||
.ok_or_else(|| anyhow!("'question' is required"))?;
|
.ok_or_else(|| anyhow!("'question' is required"))?;
|
||||||
let mut options = parse_options(args)?;
|
let options = parse_options(args)?;
|
||||||
options.push(CUSTOM_MULTI_CHOICE_ANSWER_OPTION.to_string());
|
|
||||||
|
|
||||||
let mut answer = Select::new(question, options).prompt()?;
|
let answer = Select::new(question, options).prompt()?;
|
||||||
|
|
||||||
if answer == CUSTOM_MULTI_CHOICE_ANSWER_OPTION {
|
|
||||||
answer = Text::new("Custom response:").prompt()?
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(json!({ "answer": answer }))
|
Ok(json!({ "answer": answer }))
|
||||||
}
|
}
|
||||||
@@ -181,7 +175,7 @@ fn handle_direct_input(args: &Value) -> Result<Value> {
|
|||||||
.and_then(Value::as_str)
|
.and_then(Value::as_str)
|
||||||
.ok_or_else(|| anyhow!("'question' is required"))?;
|
.ok_or_else(|| anyhow!("'question' is required"))?;
|
||||||
|
|
||||||
let answer = Text::new(&format!("{question}\nYour answer: ")).prompt()?;
|
let answer = Text::new(question).prompt()?;
|
||||||
|
|
||||||
Ok(json!({ "answer": answer }))
|
Ok(json!({ "answer": answer }))
|
||||||
}
|
}
|
||||||
|
|||||||
+2
-10
@@ -23,7 +23,7 @@ use crate::config::{
|
|||||||
TEMP_SESSION_NAME, WorkingMode, ensure_parent_exists, list_agents, load_env_file,
|
TEMP_SESSION_NAME, WorkingMode, ensure_parent_exists, list_agents, load_env_file,
|
||||||
macro_execute,
|
macro_execute,
|
||||||
};
|
};
|
||||||
use crate::render::{prompt_theme, render_error};
|
use crate::render::render_error;
|
||||||
use crate::repl::Repl;
|
use crate::repl::Repl;
|
||||||
use crate::utils::*;
|
use crate::utils::*;
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ use anyhow::{Result, anyhow, bail};
|
|||||||
use clap::{CommandFactory, Parser};
|
use clap::{CommandFactory, Parser};
|
||||||
use clap_complete::CompleteEnv;
|
use clap_complete::CompleteEnv;
|
||||||
use client::ClientConfig;
|
use client::ClientConfig;
|
||||||
use inquire::{Select, Text, set_global_render_config};
|
use inquire::{Select, Text};
|
||||||
use log::LevelFilter;
|
use log::LevelFilter;
|
||||||
use log4rs::append::console::ConsoleAppender;
|
use log4rs::append::console::ConsoleAppender;
|
||||||
use log4rs::append::file::FileAppender;
|
use log4rs::append::file::FileAppender;
|
||||||
@@ -106,14 +106,6 @@ async fn main() -> Result<()> {
|
|||||||
)
|
)
|
||||||
.await?,
|
.await?,
|
||||||
));
|
));
|
||||||
|
|
||||||
{
|
|
||||||
let cfg = config.read();
|
|
||||||
if cfg.highlight {
|
|
||||||
set_global_render_config(prompt_theme(cfg.render_options()?)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = run(config, cli, text, abort_signal).await {
|
if let Err(err) = run(config, cli, text, abort_signal).await {
|
||||||
render_error(err);
|
render_error(err);
|
||||||
process::exit(1);
|
process::exit(1);
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
use crate::render::RenderOptions;
|
|
||||||
use anyhow::Result;
|
|
||||||
use inquire::ui::{Attributes, Color, RenderConfig, StyleSheet};
|
|
||||||
use syntect::highlighting::{Highlighter, Theme};
|
|
||||||
use syntect::parsing::Scope;
|
|
||||||
|
|
||||||
const DEFAULT_INQUIRE_PROMPT_THEME: Color = Color::DarkYellow;
|
|
||||||
|
|
||||||
pub fn prompt_theme<'a>(render_options: RenderOptions) -> Result<RenderConfig<'a>> {
|
|
||||||
let theme = render_options.theme.as_ref();
|
|
||||||
let mut render_config = RenderConfig::default();
|
|
||||||
|
|
||||||
if let Some(theme_ref) = theme {
|
|
||||||
let prompt_color = resolve_foreground(theme_ref, "markup.heading")?
|
|
||||||
.unwrap_or(DEFAULT_INQUIRE_PROMPT_THEME);
|
|
||||||
|
|
||||||
render_config.prompt = StyleSheet::new()
|
|
||||||
.with_fg(prompt_color)
|
|
||||||
.with_attr(Attributes::BOLD);
|
|
||||||
render_config.selected_option = Some(
|
|
||||||
render_config
|
|
||||||
.selected_option
|
|
||||||
.unwrap_or(render_config.option)
|
|
||||||
.with_attr(
|
|
||||||
render_config
|
|
||||||
.selected_option
|
|
||||||
.unwrap_or(render_config.option)
|
|
||||||
.att
|
|
||||||
| Attributes::BOLD,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
render_config.selected_checkbox = render_config
|
|
||||||
.selected_checkbox
|
|
||||||
.with_attr(render_config.selected_checkbox.style.att | Attributes::BOLD);
|
|
||||||
render_config.option = render_config
|
|
||||||
.option
|
|
||||||
.with_attr(render_config.option.att | Attributes::BOLD);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(render_config)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolve_foreground(theme: &Theme, scope_str: &str) -> Result<Option<Color>> {
|
|
||||||
let scope = Scope::new(scope_str)?;
|
|
||||||
let style_mod = Highlighter::new(theme).style_mod_for_stack(&[scope]);
|
|
||||||
let fg = style_mod.foreground.or(theme.settings.foreground);
|
|
||||||
|
|
||||||
Ok(fg.map(|c| Color::Rgb {
|
|
||||||
r: c.r,
|
|
||||||
g: c.g,
|
|
||||||
b: c.b,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,6 @@
|
|||||||
mod inquire;
|
|
||||||
mod markdown;
|
mod markdown;
|
||||||
mod stream;
|
mod stream;
|
||||||
|
|
||||||
pub use inquire::prompt_theme;
|
|
||||||
|
|
||||||
pub use self::markdown::{MarkdownRender, RenderOptions};
|
pub use self::markdown::{MarkdownRender, RenderOptions};
|
||||||
use self::stream::{markdown_stream, raw_stream};
|
use self::stream::{markdown_stream, raw_stream};
|
||||||
|
|
||||||
|
|||||||
@@ -111,14 +111,12 @@ fn create_suggestion(value: &str, description: &str, span: Span) -> Suggestion {
|
|||||||
Some(description.to_string())
|
Some(description.to_string())
|
||||||
};
|
};
|
||||||
Suggestion {
|
Suggestion {
|
||||||
display_override: None,
|
|
||||||
value: value.to_string(),
|
value: value.to_string(),
|
||||||
description,
|
description,
|
||||||
style: None,
|
style: None,
|
||||||
extra: None,
|
extra: None,
|
||||||
span,
|
span,
|
||||||
append_whitespace: false,
|
append_whitespace: false,
|
||||||
match_indices: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
+2
-27
@@ -33,7 +33,7 @@ use std::{env, mem, process};
|
|||||||
|
|
||||||
const MENU_NAME: &str = "completion_menu";
|
const MENU_NAME: &str = "completion_menu";
|
||||||
|
|
||||||
static REPL_COMMANDS: LazyLock<[ReplCommand; 39]> = LazyLock::new(|| {
|
static REPL_COMMANDS: LazyLock<[ReplCommand; 38]> = LazyLock::new(|| {
|
||||||
[
|
[
|
||||||
ReplCommand::new(".help", "Show this help guide", AssertState::pass()),
|
ReplCommand::new(".help", "Show this help guide", AssertState::pass()),
|
||||||
ReplCommand::new(".info", "Show system info", AssertState::pass()),
|
ReplCommand::new(".info", "Show system info", AssertState::pass()),
|
||||||
@@ -137,11 +137,6 @@ static REPL_COMMANDS: LazyLock<[ReplCommand; 39]> = LazyLock::new(|| {
|
|||||||
"Leave agent",
|
"Leave agent",
|
||||||
AssertState::True(StateFlags::AGENT),
|
AssertState::True(StateFlags::AGENT),
|
||||||
),
|
),
|
||||||
ReplCommand::new(
|
|
||||||
".clear todo",
|
|
||||||
"Clear the todo list and stop auto-continuation",
|
|
||||||
AssertState::True(StateFlags::AGENT),
|
|
||||||
),
|
|
||||||
ReplCommand::new(
|
ReplCommand::new(
|
||||||
".rag",
|
".rag",
|
||||||
"Initialize or access RAG",
|
"Initialize or access RAG",
|
||||||
@@ -433,8 +428,7 @@ pub async fn run_repl_command(
|
|||||||
None => println!("Usage: .model <name>"),
|
None => println!("Usage: .model <name>"),
|
||||||
},
|
},
|
||||||
".authenticate" => {
|
".authenticate" => {
|
||||||
let current_model = config.read().current_model().clone();
|
let client = init_client(config, None)?;
|
||||||
let client = init_client(config, Some(current_model))?;
|
|
||||||
if !client.supports_oauth() {
|
if !client.supports_oauth() {
|
||||||
bail!(
|
bail!(
|
||||||
"Client '{}' doesn't either support OAuth or isn't configured to use it (i.e. uses an API key instead)",
|
"Client '{}' doesn't either support OAuth or isn't configured to use it (i.e. uses an API key instead)",
|
||||||
@@ -809,25 +803,6 @@ pub async fn run_repl_command(
|
|||||||
Some("messages") => {
|
Some("messages") => {
|
||||||
bail!("Use '.empty session' instead");
|
bail!("Use '.empty session' instead");
|
||||||
}
|
}
|
||||||
Some("todo") => {
|
|
||||||
let mut cfg = config.write();
|
|
||||||
match cfg.agent.as_mut() {
|
|
||||||
Some(agent) => {
|
|
||||||
if !agent.auto_continue_enabled() {
|
|
||||||
bail!(
|
|
||||||
"The todo system is not enabled for this agent. Set 'auto_continue: true' in the agent's config.yaml to enable it."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if agent.todo_list().is_empty() {
|
|
||||||
println!("Todo list is already empty.");
|
|
||||||
} else {
|
|
||||||
agent.clear_todo_list();
|
|
||||||
println!("Todo list cleared.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => bail!("No active agent"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => unknown_command()?,
|
_ => unknown_command()?,
|
||||||
},
|
},
|
||||||
".vault" => match split_first_arg(args) {
|
".vault" => match split_first_arg(args) {
|
||||||
|
|||||||
Reference in New Issue
Block a user