55 Commits

Author SHA1 Message Date
83581d9d18 feat: Initial scaffolding work for Gemini OAuth support (Claude generated, Alex updated) 2026-03-11 14:18:55 -06:00
eb4d1c02f4 feat: Support authenticating or refreshing OAuth for supported clients from within the REPL
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-03-11 13:07:27 -06:00
c428990900 fix: the updated regex for secrets injection broke MCP server secrets interpolation because the regex greedily matched on new lines, replacing too much content. This fix just ignores commented out lines in YAML files by skipping commented out lines. 2026-03-11 12:55:28 -06:00
03b9cc70b9 feat: Allow first-runs to select OAuth for supported providers 2026-03-11 12:01:17 -06:00
3fa0eb832c fix: Don't try to inject secrets into commented-out lines in the config 2026-03-11 11:11:09 -06:00
83f66e1061 feat: Support OAuth authentication flows for Claude 2026-03-11 11:10:48 -06:00
741b9c364c chore: Added support for Claude 4.6 gen models
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-03-10 14:55:30 -06:00
b6f6f456db fix: Removed top_p parameter from some agents so they can work across model providers
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-03-10 10:18:38 -06:00
00a6cf74d7 Merge branch 'main' of github.com:Dark-Alex-17/loki
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-03-09 14:58:23 -06:00
d35ca352ca chore: Added the new gemini-3.1-pro-preview model to gemini and vertex models
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-03-09 14:57:39 -06:00
57dc1cb252 docs: created an authorship policy and PR template that requires disclosure of AI assistance in contributions 2026-02-24 17:46:07 -07:00
101a9cdd6e style: Applied formatting to MCP module
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-20 15:28:21 -07:00
c5f52e1efb docs: Updated sisyphus README to always include the execute_command.sh tool
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-20 15:06:57 -07:00
470149b606 docs: Updated the sisyphus system docs to have a pro-tip of configuring an IDE MCP server to improve performance 2026-02-20 15:01:08 -07:00
02062c5a50 docs: Created README docs for the CodeRabbit-style Code reviewer agents 2026-02-20 15:00:32 -07:00
e6e99b6926 feat: Improved MCP server spinup and spindown when switching contexts or settings in the REPL: Modify existing config rather than stopping all servers always and re-initializing if unnecessary 2026-02-20 14:36:34 -07:00
15a293204f fix: Improved sub-agent stdout and stderr output for users to follow 2026-02-20 13:47:28 -07:00
ecf3780aed Update models.yaml with latest OpenRouter data 2026-02-20 12:08:00 -07:00
e798747135 Add script to update models.yaml from OpenRouter 2026-02-20 12:07:59 -07:00
60493728a0 fix: Inject agent variables into environment variables for global tool calls when invoked from agents to modify global tool behavior 2026-02-20 11:38:24 -07:00
25d6370b20 feat: Allow the explore agent to run search queries for understanding docs or API specs
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-19 14:29:02 -07:00
d67f845af5 feat: Allow the oracle to perform web searches for deeper research
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-19 14:26:07 -07:00
920a14cabe fix: Removed the unnecessary execute_commands tool from the oracle agent
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-19 14:18:16 -07:00
58bdd2e584 fix: Added auto_confirm to the coder agent so sub-agent spawning doesn't freeze 2026-02-19 14:15:42 -07:00
ce6f53ad05 feat: Added web search support to the main sisyphus agent to answer user queries
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-19 12:29:07 -07:00
96f8007d53 refactor: Changed the default session name for Sisyphus to temp (to require users to explicitly name sessions they wish to save)
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-19 10:26:52 -07:00
32a55652fe fix: Fixed a bug in the new supervisor and todo built-ins that was causing errors with OpenAI models
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 14:52:57 -07:00
2b92e6c98b fix: Added condition to sisyphus to always output a summary to clearly indicate completion 2026-02-18 13:57:51 -07:00
cfa654bcd8 fix: Updated the sisyphus prompt to explicitly tell it to delegate to the coder agent when it wants to write any code at all except for trivial changes
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 13:51:43 -07:00
d0f5ae39e2 fix: Added back in the auto_confirm variable into sisyphus
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 13:42:39 -07:00
2bb8cf5f73 fix: Removed the now unnecessary is_stale_response that was breaking auto-continuing with parallel agents
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 13:36:25 -07:00
fbac446859 style: Applied formatting to the function module
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 13:20:18 -07:00
f91cf2e346 build: Upgraded to the most recent version of rmcp
CI / All (macos-latest) (push) Has been cancelled
CI / All (ubuntu-latest) (push) Has been cancelled
CI / All (windows-latest) (push) Has been cancelled
2026-02-18 12:28:52 -07:00
b6b33ab7e3 refactor: Updated the sisyphus agent to use the built-in user interaction tools instead of custom bash-based tools 2026-02-18 12:17:35 -07:00
c1902a69d1 feat: Created a CodeRabbit-style code-reviewer agent 2026-02-18 12:16:59 -07:00
812a8e101c docs: Updated the docs to include details on the new agent spawning system and built-in user interaction tools 2026-02-18 12:16:29 -07:00
655ee2a599 fix: Bypassed enabled_tools for user interaction tools so if function calling is enabled at all, the LLM has access to the user interaction tools when in REPL mode 2026-02-18 11:25:25 -07:00
128a8f9a9c feat: Added configuration option in agents to indicate the timeout for user input before proceeding (defaults to 5 minutes) 2026-02-18 11:24:47 -07:00
b1be9443e7 feat: Added support for sub-agents to escalate user interaction requests from any depth to the parent agents for user interactions 2026-02-18 11:06:15 -07:00
7b12c69ebf feat: built-in user interaction tools to remove the need for the list/confirm/etc prompts in prompt tools and to enhance user interactions in Loki 2026-02-18 11:05:43 -07:00
69ad584137 fix: When parallel agents run, only write to stdout from the parent and only display the parent's throbber 2026-02-18 09:59:24 -07:00
313058e70a refactor: Cleaned up some left-over implementation stubs 2026-02-18 09:13:39 -07:00
ea96d9ba3d fix: Forgot to implement support for failing a task and keep all dependents blocked 2026-02-18 09:13:11 -07:00
7884adc7c1 fix: Clean up orphaned sub-agents when the parent agent 2026-02-18 09:12:32 -07:00
948466d771 fix: Fixed the bash prompt utils so that they correctly show output when being run by a tool invocation 2026-02-17 17:19:42 -07:00
3894c98b5b feat: Experimental update to sisyphus to use the new parallel agent spawning system 2026-02-17 16:33:08 -07:00
5e9c31595e fix: Forgot to automatically add the bidirectional communication back up to parent agents from sub-agents (i.e. need to be able to check inbox and send messages) 2026-02-17 16:11:35 -07:00
39d9b25e47 feat: Added an agent configuration property that allows auto-injecting sub-agent spawning instructions (when using the built-in sub-agent spawning system) 2026-02-17 15:49:40 -07:00
b86f76ddb9 feat: Auto-dispatch support of sub-agents and support for the teammate pattern between subagents 2026-02-17 15:18:27 -07:00
7f267a10a1 docs: Initial documentation cleanup of parallel agent MVP 2026-02-17 14:30:28 -07:00
cdafdff281 fix: Agent delegation tools were not being passed into the {{__tools__}} placeholder so agents weren't delegating to subagents 2026-02-17 14:19:22 -07:00
60ad83d6d9 feat: Full passive task queue integration for parallelization of subagents 2026-02-17 13:42:53 -07:00
44c03ccf4f feat: Implemented initial scaffolding for built-in sub-agent spawning tool call operations 2026-02-17 11:48:31 -07:00
af933bbb29 feat: Initial models for agent parallelization 2026-02-17 11:27:55 -07:00
1f127ee990 docs: Fixed typos in the Sisyphus documentation 2026-02-16 14:05:51 -07:00
60 changed files with 5236 additions and 790 deletions
@@ -0,0 +1,11 @@
### AI assistance (if any):
- List tools here and files touched by them
### Authorship & Understanding
- [ ] I wrote or heavily modified this code myself
- [ ] I understand how it works end-to-end
- [ ] I can maintain this code in the future
- [ ] No undisclosed AI-generated code was used
- [ ] If AI assistance was used, it is documented below
+7
View File
@@ -76,6 +76,13 @@ Then, you can run workflows locally without having to commit and see if the GitH
act -W .github/workflows/release.yml --input_type bump=minor
```
## Authorship Policy
All code in this repository is written and reviewed by humans. AI-generated code (e.g., Copilot, ChatGPT,
Claude, etc.) is not permitted unless explicitly disclosed and approved.
Submissions must certify that the contributor understands and can maintain the code they submit.
## Questions? Reach out to me!
If you encounter any questions while developing Loki, please don't hesitate to reach out to me at
alex.j.tusa@gmail.com. I'm happy to help contributors in any way I can, regardless of if they're new or experienced!
Generated
+158 -16
View File
@@ -1459,6 +1459,16 @@ dependencies = [
"darling_macro 0.21.3",
]
[[package]]
name = "darling"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d"
dependencies = [
"darling_core 0.23.0",
"darling_macro 0.23.0",
]
[[package]]
name = "darling_core"
version = "0.20.11"
@@ -1487,6 +1497,19 @@ dependencies = [
"syn",
]
[[package]]
name = "darling_core"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0"
dependencies = [
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
@@ -1509,6 +1532,17 @@ dependencies = [
"syn",
]
[[package]]
name = "darling_macro"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d"
dependencies = [
"darling_core 0.23.0",
"quote",
"syn",
]
[[package]]
name = "deranged"
version = "0.5.6"
@@ -2835,6 +2869,15 @@ dependencies = [
"serde",
]
[[package]]
name = "is-docker"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "928bae27f42bc99b60d9ac7334e3a21d10ad8f1835a4e12ec3ec0464765ed1b3"
dependencies = [
"once_cell",
]
[[package]]
name = "is-macro"
version = "0.3.7"
@@ -2858,6 +2901,16 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "is-wsl"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "173609498df190136aa7dea1a91db051746d339e18476eed5ca40521f02d7aa5"
dependencies = [
"is-docker",
"once_cell",
]
[[package]]
name = "is_executable"
version = "1.0.5"
@@ -3008,9 +3061,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "libc"
version = "0.2.182"
version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "libloading"
@@ -3159,6 +3212,7 @@ dependencies = [
"log4rs",
"nu-ansi-term",
"num_cpus",
"open",
"os_info",
"parking_lot",
"path-absolutize",
@@ -3189,6 +3243,7 @@ dependencies = [
"tokio-stream",
"unicode-segmentation",
"unicode-width 0.2.2",
"url",
"urlencoding",
"uuid",
"which",
@@ -3512,6 +3567,18 @@ dependencies = [
"libc",
]
[[package]]
name = "nix"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225e7cfe711e0ba79a68baeddb2982723e4235247aefce1482f2f16c27865b66"
dependencies = [
"bitflags 2.11.0",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "nom"
version = "7.1.3"
@@ -3823,6 +3890,17 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]]
name = "open"
version = "5.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43bb73a7fa3799b198970490a51174027ba0d4ec504b03cd08caf513d40024bc"
dependencies = [
"is-wsl",
"libc",
"pathdiff",
]
[[package]]
name = "openssl"
version = "0.10.75"
@@ -3969,6 +4047,12 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pastey"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b867cad97c0791bbd3aaa6472142568c6c9e8f71937e98379f584cfb0cf35bec"
[[package]]
name = "path-absolutize"
version = "3.1.1"
@@ -3987,6 +4071,12 @@ dependencies = [
"once_cell",
]
[[package]]
name = "pathdiff"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3"
[[package]]
name = "pem"
version = "3.0.6"
@@ -4226,16 +4316,16 @@ dependencies = [
[[package]]
name = "process-wrap"
version = "8.2.1"
version = "9.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3ef4f2f0422f23a82ec9f628ea2acd12871c81a9362b02c43c1aa86acfc3ba1"
checksum = "ccd9713fe2c91c3c85ac388b31b89de339365d2c995146e630b5e0da9d06526a"
dependencies = [
"futures",
"indexmap 2.12.1",
"nix 0.30.1",
"nix 0.31.1",
"tokio",
"tracing",
"windows 0.61.3",
"windows 0.62.2",
]
[[package]]
@@ -4610,14 +4700,15 @@ dependencies = [
[[package]]
name = "rmcp"
version = "0.6.4"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41ab0892f4938752b34ae47cb53910b1b0921e55e77ddb6e44df666cab17939f"
checksum = "cc4c9c94680f75470ee8083a0667988b5d7b5beb70b9f998a8e51de7c682ce60"
dependencies = [
"async-trait",
"base64",
"chrono",
"futures",
"paste",
"pastey",
"pin-project-lite",
"process-wrap",
"rmcp-macros",
@@ -4633,11 +4724,11 @@ dependencies = [
[[package]]
name = "rmcp-macros"
version = "0.6.4"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1827cd98dab34cade0513243c6fe0351f0f0b2c9d6825460bcf45b42804bdda0"
checksum = "90c23c8f26cae4da838fbc3eadfaecf2d549d97c04b558e7bd90526a9c28b42a"
dependencies = [
"darling 0.21.3",
"darling 0.23.0",
"proc-macro2",
"quote",
"serde_json",
@@ -6700,11 +6791,23 @@ version = "0.61.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
dependencies = [
"windows-collections",
"windows-collections 0.2.0",
"windows-core 0.61.2",
"windows-future",
"windows-future 0.2.1",
"windows-link 0.1.3",
"windows-numerics",
"windows-numerics 0.2.0",
]
[[package]]
name = "windows"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580"
dependencies = [
"windows-collections 0.3.2",
"windows-core 0.62.2",
"windows-future 0.3.2",
"windows-numerics 0.3.1",
]
[[package]]
@@ -6716,6 +6819,15 @@ dependencies = [
"windows-core 0.61.2",
]
[[package]]
name = "windows-collections"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610"
dependencies = [
"windows-core 0.62.2",
]
[[package]]
name = "windows-core"
version = "0.61.2"
@@ -6750,7 +6862,18 @@ checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
dependencies = [
"windows-core 0.61.2",
"windows-link 0.1.3",
"windows-threading",
"windows-threading 0.1.0",
]
[[package]]
name = "windows-future"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb"
dependencies = [
"windows-core 0.62.2",
"windows-link 0.2.1",
"windows-threading 0.2.1",
]
[[package]]
@@ -6797,6 +6920,16 @@ dependencies = [
"windows-link 0.1.3",
]
[[package]]
name = "windows-numerics"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26"
dependencies = [
"windows-core 0.62.2",
"windows-link 0.2.1",
]
[[package]]
name = "windows-result"
version = "0.3.4"
@@ -6935,6 +7068,15 @@ dependencies = [
"windows-link 0.1.3",
]
[[package]]
name = "windows-threading"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37"
dependencies = [
"windows-link 0.2.1",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
+4 -2
View File
@@ -88,7 +88,7 @@ duct = "1.0.0"
argc = "1.23.0"
strum_macros = "0.27.2"
indoc = "2.0.6"
rmcp = { version = "0.6.1", features = ["client", "transport-child-process"] }
rmcp = { version = "0.16.0", features = ["client", "transport-child-process"] }
num_cpus = "1.17.0"
rustpython-parser = "0.4.0"
rustpython-ast = "0.4.0"
@@ -96,6 +96,9 @@ colored = "3.0.0"
clap_complete = { version = "4.5.58", features = ["unstable-dynamic"] }
gman = "0.3.0"
clap_complete_nushell = "4.5.9"
open = "5"
rand = "0.9.0"
url = "2.5.8"
[dependencies.reqwest]
version = "0.12.0"
@@ -126,7 +129,6 @@ arboard = { version = "3.3.0", default-features = false }
[dev-dependencies]
pretty_assertions = "1.4.0"
rand = "0.9.0"
[[bin]]
name = "loki"
+22 -1
View File
@@ -35,10 +35,11 @@ Coming from [AIChat](https://github.com/sigoden/aichat)? Follow the [migration g
* [RAG](./docs/RAG.md): Retrieval-Augmented Generation for enhanced information retrieval and generation.
* [Sessions](/docs/SESSIONS.md): Manage and persist conversational contexts and settings across multiple interactions.
* [Roles](./docs/ROLES.md): Customize model behavior for specific tasks or domains.
* [Agents](/docs/AGENTS.md): Leverage AI agents to perform complex tasks and workflows.
* [Agents](/docs/AGENTS.md): Leverage AI agents to perform complex tasks and workflows, including sub-agent spawning, teammate messaging, and user interaction tools.
* [Todo System](./docs/TODO-SYSTEM.md): Built-in task tracking for improved agent reliability with smaller models.
* [Environment Variables](./docs/ENVIRONMENT-VARIABLES.md): Override and customize your Loki configuration at runtime with environment variables.
* [Client Configurations](./docs/clients/CLIENTS.md): Configuration instructions for various LLM providers.
* [Authentication (API Key & OAuth)](./docs/clients/CLIENTS.md#authentication): Authenticate with API keys or OAuth for subscription-based access.
* [Patching API Requests](./docs/clients/PATCHES.md): Learn how to patch API requests for advanced customization.
* [Custom Themes](./docs/THEMES.md): Change the look and feel of Loki to your preferences with custom themes.
* [History](#history): A history of how Loki came to be.
@@ -150,6 +151,26 @@ guide you through the process when you first attempt to access the vault. So, to
loki --list-secrets
```
### Authentication
Each client in your configuration needs authentication (with a few exceptions; e.g. ollama). Most clients use an API key
(set via `api_key` in the config or through the [vault](./docs/VAULT.md)). For providers that support OAuth (e.g. Claude Pro/Max
subscribers, Google Gemini), you can authenticate with your existing subscription instead:
```yaml
# In your config.yaml
clients:
- type: claude
name: my-claude-oauth
auth: oauth # Indicate you want to authenticate with OAuth instead of an API key
```
```sh
loki --authenticate my-claude-oauth
# Or via the REPL: .authenticate
```
For full details, see the [authentication documentation](./docs/clients/CLIENTS.md#authentication).
### Tab-Completions
You can also enable tab completions to make using Loki easier. To do so, add the following to your shell profile:
```shell
+36
View File
@@ -0,0 +1,36 @@
# Code Reviewer
A CodeRabbit-style code review orchestrator that coordinates per-file reviews and synthesizes findings into a unified
report.
This agent acts as the manager for the review process, delegating actual file analysis to **[File Reviewer](../file-reviewer/README.md)**
agents while handling coordination and final reporting.
## Features
- 🤖 **Orchestration**: Spawns parallel reviewers for each changed file.
- 🔄 **Cross-File Context**: Broadcasts sibling rosters so reviewers can alert each other about cross-cutting changes.
- 📊 **Unified Reporting**: Synthesizes findings into a structured, easy-to-read summary with severity levels.
-**Parallel Execution**: Runs reviews concurrently for maximum speed.
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains # The name of your configured IDE MCP server
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
# - execute_command.sh
# ...
```
+124
View File
@@ -0,0 +1,124 @@
name: code-reviewer
description: CodeRabbit-style code reviewer - spawns per-file reviewers, synthesizes findings
version: 1.0.0
temperature: 0.1
auto_continue: true
max_auto_continues: 20
inject_todo_instructions: true
can_spawn_agents: true
max_concurrent_agents: 10
max_agent_depth: 2
variables:
- name: project_dir
description: Project directory to review
default: '.'
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
- execute_command.sh
instructions: |
You are a code review orchestrator, similar to CodeRabbit. You coordinate per-file reviews and produce a unified report.
## Workflow
1. **Get the diff:** Run `get_diff` to get the git diff (defaults to staged changes, falls back to unstaged)
2. **Parse changed files:** Extract the list of files from the diff
3. **Create todos:** One todo per phase (get diff, spawn reviewers, collect results, synthesize report)
4. **Spawn file-reviewers:** One `file-reviewer` agent per changed file, in parallel
5. **Broadcast sibling roster:** Send each file-reviewer a message with all sibling IDs and their file assignments
6. **Collect all results:** Wait for each file-reviewer to complete
7. **Synthesize:** Combine all findings into a CodeRabbit-style report
## Spawning File Reviewers
For each changed file, spawn a file-reviewer with a prompt containing:
- The file path
- The relevant diff hunk(s) for that file
- Instructions to review it
```
agent__spawn --agent file-reviewer --prompt "Review the following diff for <file_path>:
<diff content for this file>
Focus on bugs, security issues, logic errors, and style. Use the severity format (🔴🟡🟢💡).
End with REVIEW_COMPLETE."
```
## Sibling Roster Broadcast
After spawning ALL file-reviewers (collecting their IDs), send each one a message with the roster:
```
agent__send_message --to <agent_id> --message "SIBLING_ROSTER:
- <agent_id_1>: reviewing <file_1>
- <agent_id_2>: reviewing <file_2>
...
Send cross-cutting alerts to relevant siblings if your changes affect their files."
```
## Diff Parsing
Split the diff by file. Each file's diff starts with `diff --git a/<path> b/<path>`. Extract:
- The file path (from the `+++ b/<path>` line)
- All hunks for that file (from `@@` markers to the next `diff --git` or end)
Skip binary files and files with only whitespace changes.
## Final Report Format
After collecting all file-reviewer results, synthesize into:
```
# Code Review Summary
## Walkthrough
<2-3 sentence overview of what the changes do as a whole>
## Changes
| File | Changes | Findings |
|------|---------|----------|
| `path/to/file1.rs` | <brief description> | 🔴 1 🟡 2 🟢 1 |
| `path/to/file2.rs` | <brief description> | 🟢 2 💡 1 |
## Detailed Findings
### `path/to/file1.rs`
<paste file-reviewer's findings here, cleaned up>
### `path/to/file2.rs`
<paste file-reviewer's findings here, cleaned up>
## Cross-File Concerns
<any cross-cutting issues identified by the teammate pattern>
---
*Reviewed N files, found X critical, Y warnings, Z suggestions, W nitpicks*
```
## Edge Cases
- **Single file changed:** Still spawn one file-reviewer (for consistency), skip roster broadcast
- **Too many files (>10):** Group small files (< 20 lines changed) and review them together
- **No changes found:** Report "No changes to review" and exit
- **Binary files:** Skip with a note in the summary
## Rules
1. **Always use `get_diff` first:** Don't assume what changed
2. **Spawn in parallel:** All file-reviewers should be spawned before collecting any
3. **Don't review code yourself:** Delegate ALL review work to file-reviewers
4. **Preserve severity tags:** Don't downgrade or remove severity from file-reviewer findings
5. **Include ALL findings:** Don't summarize away specific issues
## Context
- Project: {{project_dir}}
- CWD: {{__cwd__}}
- Shell: {{__shell__}}
+478
View File
@@ -0,0 +1,478 @@
#!/usr/bin/env bash
set -eo pipefail
# shellcheck disable=SC1090
source "$LLM_PROMPT_UTILS_FILE"
source "$LLM_ROOT_DIR/agents/.shared/utils.sh"
# @env LLM_OUTPUT=/dev/stdout
# @env LLM_AGENT_VAR_PROJECT_DIR=.
# @describe Code review orchestrator tools
_project_dir() {
local dir="${LLM_AGENT_VAR_PROJECT_DIR:-.}"
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
}
# @cmd Get git diff for code review. Returns staged changes, or unstaged if nothing is staged, or HEAD~1 diff if working tree is clean.
# @option --base Optional base ref to diff against (e.g., "main", "HEAD~3", a commit SHA)
get_diff() {
local project_dir
project_dir=$(_project_dir)
# shellcheck disable=SC2154
local base="${argc_base:-}"
local diff_output=""
if [[ -n "${base}" ]]; then
diff_output=$(cd "${project_dir}" && git diff "${base}" 2>&1) || true
else
diff_output=$(cd "${project_dir}" && git diff --cached 2>&1) || true
if [[ -z "${diff_output}" ]]; then
diff_output=$(cd "${project_dir}" && git diff 2>&1) || true
fi
if [[ -z "${diff_output}" ]]; then
diff_output=$(cd "${project_dir}" && git diff HEAD~1 2>&1) || true
fi
fi
if [[ -z "${diff_output}" ]]; then
warn "No changes found to review" >> "$LLM_OUTPUT"
return 0
fi
local file_count
file_count=$(echo "${diff_output}" | grep -c '^diff --git' || true)
{
info "Diff contains changes to ${file_count} file(s)"
echo ""
echo "${diff_output}"
} >> "$LLM_OUTPUT"
}
# @cmd Get list of changed files with stats
# @option --base Optional base ref to diff against
get_changed_files() {
local project_dir
project_dir=$(_project_dir)
local base="${argc_base:-}"
local stat_output=""
if [[ -n "${base}" ]]; then
stat_output=$(cd "${project_dir}" && git diff --stat "${base}" 2>&1) || true
else
stat_output=$(cd "${project_dir}" && git diff --cached --stat 2>&1) || true
if [[ -z "${stat_output}" ]]; then
stat_output=$(cd "${project_dir}" && git diff --stat 2>&1) || true
fi
if [[ -z "${stat_output}" ]]; then
stat_output=$(cd "${project_dir}" && git diff --stat HEAD~1 2>&1) || true
fi
fi
if [[ -z "${stat_output}" ]]; then
warn "No changes found" >> "$LLM_OUTPUT"
return 0
fi
{
info "Changed files:"
echo ""
echo "${stat_output}"
} >> "$LLM_OUTPUT"
}
# @cmd Get project structure and type information
get_project_info() {
local project_dir
project_dir=$(_project_dir)
local project_info
project_info=$(detect_project "${project_dir}")
{
info "Project: ${project_dir}"
echo "Type: $(echo "${project_info}" | jq -r '.type')"
echo ""
get_tree "${project_dir}" 2
} >> "$LLM_OUTPUT"
}
# ARGC-BUILD {
# This block was generated by argc (https://github.com/sigoden/argc).
# Modifying it manually is not recommended
_argc_run() {
if [[ "${1:-}" == "___internal___" ]]; then
_argc_die "error: unsupported ___internal___ command"
fi
if [[ "${OS:-}" == "Windows_NT" ]] && [[ -n "${MSYSTEM:-}" ]]; then
set -o igncr
fi
argc__args=("$(basename "$0" .sh)" "$@")
argc__positionals=()
_argc_index=1
_argc_len="${#argc__args[@]}"
_argc_tools=()
_argc_parse
if [ -n "${argc__fn:-}" ]; then
$argc__fn "${argc__positionals[@]}"
fi
}
_argc_usage() {
cat <<-'EOF'
Code review orchestrator tools
USAGE: <COMMAND>
COMMANDS:
get_diff Get git diff for code review. Returns staged changes, or unstaged if nothing is staged, or HEAD~1 diff if working tree is clean. [aliases: get-diff]
get_changed_files Get list of changed files with stats [aliases: get-changed-files]
get_project_info Get project structure and type information [aliases: get-project-info]
ENVIRONMENTS:
LLM_OUTPUT [default: /dev/stdout]
LLM_AGENT_VAR_PROJECT_DIR [default: .]
EOF
exit
}
_argc_version() {
echo 0.0.0
exit
}
_argc_parse() {
local _argc_key _argc_action
local _argc_subcmds="get_diff, get-diff, get_changed_files, get-changed-files, get_project_info, get-project-info"
while [[ $_argc_index -lt $_argc_len ]]; do
_argc_item="${argc__args[_argc_index]}"
_argc_key="${_argc_item%%=*}"
case "$_argc_key" in
--help | -help | -h)
_argc_usage
;;
--version | -version | -V)
_argc_version
;;
--)
_argc_dash="${#argc__positionals[@]}"
argc__positionals+=("${argc__args[@]:$((_argc_index + 1))}")
_argc_index=$_argc_len
break
;;
get_diff | get-diff)
_argc_index=$((_argc_index + 1))
_argc_action=_argc_parse_get_diff
break
;;
get_changed_files | get-changed-files)
_argc_index=$((_argc_index + 1))
_argc_action=_argc_parse_get_changed_files
break
;;
get_project_info | get-project-info)
_argc_index=$((_argc_index + 1))
_argc_action=_argc_parse_get_project_info
break
;;
help)
local help_arg="${argc__args[$((_argc_index + 1))]:-}"
case "$help_arg" in
get_diff | get-diff)
_argc_usage_get_diff
;;
get_changed_files | get-changed-files)
_argc_usage_get_changed_files
;;
get_project_info | get-project-info)
_argc_usage_get_project_info
;;
"")
_argc_usage
;;
*)
_argc_die "error: invalid value \`$help_arg\` for \`<command>\`"$'\n'" [possible values: $_argc_subcmds]"
;;
esac
;;
*)
_argc_die "error: \`\` requires a subcommand but one was not provided"$'\n'" [subcommands: $_argc_subcmds]"
;;
esac
done
if [[ -n "${_argc_action:-}" ]]; then
$_argc_action
else
_argc_usage
fi
}
_argc_usage_get_diff() {
cat <<-'EOF'
Get git diff for code review. Returns staged changes, or unstaged if nothing is staged, or HEAD~1 diff if working tree is clean.
USAGE: get_diff [OPTIONS]
OPTIONS:
--base <BASE> Optional base ref to diff against (e.g., "main", "HEAD~3", a commit SHA)
-h, --help Print help
ENVIRONMENTS:
LLM_OUTPUT [default: /dev/stdout]
LLM_AGENT_VAR_PROJECT_DIR [default: .]
EOF
exit
}
_argc_parse_get_diff() {
local _argc_key _argc_action
local _argc_subcmds=""
while [[ $_argc_index -lt $_argc_len ]]; do
_argc_item="${argc__args[_argc_index]}"
_argc_key="${_argc_item%%=*}"
case "$_argc_key" in
--help | -help | -h)
_argc_usage_get_diff
;;
--)
_argc_dash="${#argc__positionals[@]}"
argc__positionals+=("${argc__args[@]:$((_argc_index + 1))}")
_argc_index=$_argc_len
break
;;
--base)
_argc_take_args "--base <BASE>" 1 1 "-" ""
_argc_index=$((_argc_index + _argc_take_args_len + 1))
if [[ -z "${argc_base:-}" ]]; then
argc_base="${_argc_take_args_values[0]:-}"
else
_argc_die "error: the argument \`--base\` cannot be used multiple times"
fi
;;
*)
if _argc_maybe_flag_option "-" "$_argc_item"; then
_argc_die "error: unexpected argument \`$_argc_key\` found"
fi
argc__positionals+=("$_argc_item")
_argc_index=$((_argc_index + 1))
;;
esac
done
if [[ -n "${_argc_action:-}" ]]; then
$_argc_action
else
argc__fn=get_diff
if [[ "${argc__positionals[0]:-}" == "help" ]] && [[ "${#argc__positionals[@]}" -eq 1 ]]; then
_argc_usage_get_diff
fi
if [[ -z "${LLM_OUTPUT:-}" ]]; then
export LLM_OUTPUT=/dev/stdout
fi
if [[ -z "${LLM_AGENT_VAR_PROJECT_DIR:-}" ]]; then
export LLM_AGENT_VAR_PROJECT_DIR=.
fi
fi
}
_argc_usage_get_changed_files() {
cat <<-'EOF'
Get list of changed files with stats
USAGE: get_changed_files [OPTIONS]
OPTIONS:
--base <BASE> Optional base ref to diff against
-h, --help Print help
ENVIRONMENTS:
LLM_OUTPUT [default: /dev/stdout]
LLM_AGENT_VAR_PROJECT_DIR [default: .]
EOF
exit
}
_argc_parse_get_changed_files() {
local _argc_key _argc_action
local _argc_subcmds=""
while [[ $_argc_index -lt $_argc_len ]]; do
_argc_item="${argc__args[_argc_index]}"
_argc_key="${_argc_item%%=*}"
case "$_argc_key" in
--help | -help | -h)
_argc_usage_get_changed_files
;;
--)
_argc_dash="${#argc__positionals[@]}"
argc__positionals+=("${argc__args[@]:$((_argc_index + 1))}")
_argc_index=$_argc_len
break
;;
--base)
_argc_take_args "--base <BASE>" 1 1 "-" ""
_argc_index=$((_argc_index + _argc_take_args_len + 1))
if [[ -z "${argc_base:-}" ]]; then
argc_base="${_argc_take_args_values[0]:-}"
else
_argc_die "error: the argument \`--base\` cannot be used multiple times"
fi
;;
*)
if _argc_maybe_flag_option "-" "$_argc_item"; then
_argc_die "error: unexpected argument \`$_argc_key\` found"
fi
argc__positionals+=("$_argc_item")
_argc_index=$((_argc_index + 1))
;;
esac
done
if [[ -n "${_argc_action:-}" ]]; then
$_argc_action
else
argc__fn=get_changed_files
if [[ "${argc__positionals[0]:-}" == "help" ]] && [[ "${#argc__positionals[@]}" -eq 1 ]]; then
_argc_usage_get_changed_files
fi
if [[ -z "${LLM_OUTPUT:-}" ]]; then
export LLM_OUTPUT=/dev/stdout
fi
if [[ -z "${LLM_AGENT_VAR_PROJECT_DIR:-}" ]]; then
export LLM_AGENT_VAR_PROJECT_DIR=.
fi
fi
}
_argc_usage_get_project_info() {
cat <<-'EOF'
Get project structure and type information
USAGE: get_project_info
ENVIRONMENTS:
LLM_OUTPUT [default: /dev/stdout]
LLM_AGENT_VAR_PROJECT_DIR [default: .]
EOF
exit
}
_argc_parse_get_project_info() {
local _argc_key _argc_action
local _argc_subcmds=""
while [[ $_argc_index -lt $_argc_len ]]; do
_argc_item="${argc__args[_argc_index]}"
_argc_key="${_argc_item%%=*}"
case "$_argc_key" in
--help | -help | -h)
_argc_usage_get_project_info
;;
--)
_argc_dash="${#argc__positionals[@]}"
argc__positionals+=("${argc__args[@]:$((_argc_index + 1))}")
_argc_index=$_argc_len
break
;;
*)
argc__positionals+=("$_argc_item")
_argc_index=$((_argc_index + 1))
;;
esac
done
if [[ -n "${_argc_action:-}" ]]; then
$_argc_action
else
argc__fn=get_project_info
if [[ "${argc__positionals[0]:-}" == "help" ]] && [[ "${#argc__positionals[@]}" -eq 1 ]]; then
_argc_usage_get_project_info
fi
if [[ -z "${LLM_OUTPUT:-}" ]]; then
export LLM_OUTPUT=/dev/stdout
fi
if [[ -z "${LLM_AGENT_VAR_PROJECT_DIR:-}" ]]; then
export LLM_AGENT_VAR_PROJECT_DIR=.
fi
fi
}
_argc_take_args() {
_argc_take_args_values=()
_argc_take_args_len=0
local param="$1" min="$2" max="$3" signs="$4" delimiter="$5"
if [[ "$min" -eq 0 ]] && [[ "$max" -eq 0 ]]; then
return
fi
local _argc_take_index=$((_argc_index + 1)) _argc_take_value
if [[ "$_argc_item" == *=* ]]; then
_argc_take_args_values=("${_argc_item##*=}")
else
while [[ $_argc_take_index -lt $_argc_len ]]; do
_argc_take_value="${argc__args[_argc_take_index]}"
if _argc_maybe_flag_option "$signs" "$_argc_take_value"; then
if [[ "${#_argc_take_value}" -gt 1 ]]; then
break
fi
fi
_argc_take_args_values+=("$_argc_take_value")
_argc_take_args_len=$((_argc_take_args_len + 1))
if [[ "$_argc_take_args_len" -ge "$max" ]]; then
break
fi
_argc_take_index=$((_argc_take_index + 1))
done
fi
if [[ "${#_argc_take_args_values[@]}" -lt "$min" ]]; then
_argc_die "error: incorrect number of values for \`$param\`"
fi
if [[ -n "$delimiter" ]] && [[ "${#_argc_take_args_values[@]}" -gt 0 ]]; then
local item values arr=()
for item in "${_argc_take_args_values[@]}"; do
IFS="$delimiter" read -r -a values <<<"$item"
arr+=("${values[@]}")
done
_argc_take_args_values=("${arr[@]}")
fi
}
_argc_maybe_flag_option() {
local signs="$1" arg="$2"
if [[ -z "$signs" ]]; then
return 1
fi
local cond=false
if [[ "$signs" == *"+"* ]]; then
if [[ "$arg" =~ ^\+[^+].* ]]; then
cond=true
fi
elif [[ "$arg" == -* ]]; then
if (( ${#arg} < 3 )) || [[ ! "$arg" =~ ^---.* ]]; then
cond=true
fi
fi
if [[ "$cond" == "false" ]]; then
return 1
fi
local value="${arg%%=*}"
if [[ "$value" =~ [[:space:]] ]]; then
return 1
fi
return 0
}
_argc_die() {
if [[ $# -eq 0 ]]; then
cat
else
echo "$*" >&2
fi
exit 1
}
_argc_run "$@"
# ARGC-BUILD }
+26 -2
View File
@@ -2,7 +2,7 @@
An AI agent that assists you with your coding tasks.
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent to implement code specifications. Sisyphus
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent to implement code specifications. Sisyphus
acts as the coordinator/architect, while Coder handles the implementation details.
## Features
@@ -13,4 +13,28 @@ acts as the coordinator/architect, while Coder handles the implementation detail
- 🧐 Advanced code analysis and improvement suggestions
- 📊 Precise diff-based file editing for controlled code modifications
It can also be used as a standalone tool for direct coding assistance.
It can also be used as a standalone tool for direct coding assistance.
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains # The name of your configured IDE MCP server
global_tools:
# Keep useful read-only tools for reading files in other non-project directories
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
# - fs_write.sh
# - fs_patch.sh
- execute_command.sh
# ...
```
+3 -1
View File
@@ -2,7 +2,6 @@ name: coder
description: Implementation agent - writes code, follows patterns, verifies with builds
version: 1.0.0
temperature: 0.1
top_p: 0.95
auto_continue: true
max_auto_continues: 15
@@ -12,6 +11,9 @@ variables:
- name: project_dir
description: Project directory to work in
default: '.'
- name: auto_confirm
description: Auto-confirm command execution
default: '1'
global_tools:
- fs_read.sh
+23 -1
View File
@@ -2,7 +2,7 @@
An AI agent specialized in exploring codebases, finding patterns, and understanding project structures.
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent to gather information and context. Sisyphus
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent to gather information and context. Sisyphus
acts as the coordinator/architect, while Explore handles the research and discovery phase.
It can also be used as a standalone tool for understanding codebases and finding specific information.
@@ -13,3 +13,25 @@ It can also be used as a standalone tool for understanding codebases and finding
- 📂 File system navigation and content analysis
- 🧠 Context gathering for complex tasks
- 🛡️ Read-only operations for safe investigation
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains # The name of your configured IDE MCP server
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- web_search_loki.sh
# ...
```
+1 -1
View File
@@ -2,7 +2,6 @@ name: explore
description: Fast codebase exploration agent - finds patterns, structures, and relevant files
version: 1.0.0
temperature: 0.1
top_p: 0.95
variables:
- name: project_dir
@@ -14,6 +13,7 @@ global_tools:
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- web_search_loki.sh
instructions: |
You are a codebase explorer. Your job: Search, find, report. Nothing else.
+35
View File
@@ -0,0 +1,35 @@
# File Reviewer
A specialized worker agent that reviews a single file's diff for bugs, style issues, and cross-cutting concerns.
This agent is designed to be spawned by the **[Code Reviewer](../code-reviewer/README.md)** agent. It focuses deeply on
one file while communicating with sibling agents to catch issues that span multiple files.
## Features
- 🔍 **Deep Analysis**: Focuses on bugs, logic errors, security issues, and style problems in a single file.
- 🗣️ **Teammate Communication**: Sends and receives alerts to/from sibling reviewers about interface or dependency
changes.
- 🎯 **Targeted Reading**: Reads only relevant context around changed lines to stay efficient.
- 🏷️ **Structured Findings**: Categorizes issues by severity (🔴 Critical, 🟡 Warning, 🟢 Suggestion, 💡 Nitpick).
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains # The name of your configured IDE MCP server
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
# ...
```
+110
View File
@@ -0,0 +1,110 @@
name: file-reviewer
description: Reviews a single file's diff for bugs, style issues, and cross-cutting concerns
version: 1.0.0
temperature: 0.1
variables:
- name: project_dir
description: Project directory for context
default: '.'
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
instructions: |
You are a precise code reviewer. You review ONE file's diff and produce structured findings.
## Your Mission
You receive a git diff for a single file. Your job:
1. Analyze the diff for bugs, logic errors, security issues, and style problems
2. Read surrounding code for context (use `fs_read` with targeted offsets)
3. Check your inbox for cross-cutting alerts from sibling reviewers
4. Send alerts to siblings if you spot cross-file issues
5. Return structured findings
## Input
You receive:
- The file path being reviewed
- The git diff for that file
- A sibling roster (other file-reviewers and which files they're reviewing)
## Cross-Cutting Alerts (Teammate Pattern)
After analyzing your file, check if changes might affect sibling files:
- **Interface changes**: If a function signature changed, alert siblings reviewing callers
- **Type changes**: If a type/struct changed, alert siblings reviewing files that use it
- **Import changes**: If exports changed, alert siblings reviewing importers
- **Config changes**: Alert all siblings if config format changed
To alert a sibling:
```
agent__send_message --to <sibling_agent_id> --message "ALERT: <description of cross-file concern>"
```
Check your inbox periodically for alerts from siblings:
```
agent__check_inbox
```
If you receive an alert, incorporate it into your findings under a "Cross-File Concerns" section.
## File Reading Strategy
1. **Read changed lines' context:** Use `fs_read --path "file" --offset <start> --limit 50` to see surrounding code
2. **Grep for usage:** `fs_grep --pattern "function_name" --include "*.rs"` to find callers
3. **Never read entire large files:** Target the changed regions only
4. **Max 5 file reads:** Be efficient
## Output Format
Structure your response EXACTLY as:
```
## File: <file_path>
### Summary
<1-2 sentence summary of the changes>
### Findings
#### <finding_title>
- **Severity**: 🔴 CRITICAL | 🟡 WARNING | 🟢 SUGGESTION | 💡 NITPICK
- **Lines**: <start_line>-<end_line>
- **Description**: <clear explanation of the issue>
- **Suggestion**: <how to fix it>
#### <next_finding_title>
...
### Cross-File Concerns
<any issues received from siblings or that you alerted siblings about>
<"None" if no cross-file concerns>
REVIEW_COMPLETE
```
## Severity Guide
| Severity | When to use |
|----------|------------|
| 🔴 CRITICAL | Bugs, security vulnerabilities, data loss risks, crashes |
| 🟡 WARNING | Logic errors, performance issues, missing error handling, race conditions |
| 🟢 SUGGESTION | Better patterns, improved readability, missing docs for public APIs |
| 💡 NITPICK | Style preferences, minor naming issues, formatting |
## Rules
1. **Be specific:** Reference exact line numbers and code
2. **Be actionable:** Every finding must have a suggestion
3. **Don't nitpick formatting:** If a formatter/linter exists (check for .rustfmt.toml, .prettierrc, etc.)
4. **Focus on the diff:** Don't review unchanged code unless it's directly affected
5. **Never modify files:** You are read-only
6. **Always end with REVIEW_COMPLETE**
## Context
- Project: {{project_dir}}
- CWD: {{__cwd__}}
+33
View File
@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -eo pipefail
# shellcheck disable=SC1090
source "$LLM_PROMPT_UTILS_FILE"
source "$LLM_ROOT_DIR/agents/.shared/utils.sh"
# @env LLM_OUTPUT=/dev/stdout
# @env LLM_AGENT_VAR_PROJECT_DIR=.
# @describe File reviewer tools for single-file code review
_project_dir() {
local dir="${LLM_AGENT_VAR_PROJECT_DIR:-.}"
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
}
# @cmd Get project structure to understand codebase layout
get_structure() {
local project_dir
project_dir=$(_project_dir)
info "Project structure:" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
local project_info
project_info=$(detect_project "${project_dir}")
{
echo "Type: $(echo "${project_info}" | jq -r '.type')"
echo ""
get_tree "${project_dir}" 2
} >> "$LLM_OUTPUT"
}
+23 -1
View File
@@ -2,7 +2,7 @@
An AI agent specialized in high-level architecture, complex debugging, and design decisions.
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent when deep reasoning, architectural advice,
This agent is designed to be delegated to by the **[Sisyphus](../sisyphus/README.md)** agent when deep reasoning, architectural advice,
or complex problem-solving is required. Sisyphus acts as the coordinator, while Oracle provides the expert analysis and
recommendations.
@@ -15,3 +15,25 @@ It can also be used as a standalone tool for design reviews and solving difficul
- ⚖️ Tradeoff analysis and technology selection
- 📝 Code review and best practices advice
- 🧠 Deep reasoning for ambiguous problems
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains # The name of your configured IDE MCP server
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- web_search_loki.sh
# ...
```
+1 -2
View File
@@ -2,7 +2,6 @@ name: oracle
description: High-IQ advisor for architecture, debugging, and complex decisions
version: 1.0.0
temperature: 0.2
top_p: 0.95
variables:
- name: project_dir
@@ -14,7 +13,7 @@ global_tools:
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- execute_command.sh
- web_search_loki.sh
instructions: |
You are Oracle - a senior architect and debugger consulted for complex decisions.
+24 -1
View File
@@ -1,6 +1,6 @@
# Sisyphus
The main coordinator agent for the Loki coding ecosystem, providing a powerful CLI interface for code generation and
The main coordinator agent for the Loki coding ecosystem, providing a powerful CLI interface for code generation and
project management similar to OpenCode, ClaudeCode, Codex, or Gemini CLI.
_Inspired by the Sisyphus and Oracle agents of OpenCode._
@@ -16,3 +16,26 @@ Sisyphus acts as the primary entry point, capable of handling complex tasks by c
- 💻 **CLI Coding**: Provides a natural language interface for writing and editing code.
- 🔄 **Task Management**: Tracks progress and context across complex operations.
- 🛠️ **Tool Integration**: Seamlessly uses system tools for building, testing, and file manipulation.
## Pro-Tip: Use an IDE MCP Server for Improved Performance
Many modern IDEs now include MCP servers that let LLMs perform operations within the IDE itself and use IDE tools. Using
an IDE's MCP server dramatically improves the performance of coding agents. So if you have an IDE, try adding that MCP
server to your config (see the [MCP Server docs](../../../docs/function-calling/MCP-SERVERS.md) to see how to configure
them), and modify the agent definition to look like this:
```yaml
# ...
mcp_servers:
- jetbrains
global_tools:
- fs_read.sh
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- web_search_loki.sh
- execute_command.sh
# ...
```
+72 -154
View File
@@ -1,14 +1,19 @@
name: sisyphus
description: OpenCode-style orchestrator - classifies intent, delegates to specialists, tracks progress with todos
version: 1.0.0
version: 2.0.0
temperature: 0.1
top_p: 0.95
agent_session: sisyphus
agent_session: temp
auto_continue: true
max_auto_continues: 25
inject_todo_instructions: true
can_spawn_agents: true
max_concurrent_agents: 4
max_agent_depth: 3
inject_spawn_instructions: true
summarization_threshold: 4000
variables:
- name: project_dir
description: Project directory to work in
@@ -22,6 +27,7 @@ global_tools:
- fs_grep.sh
- fs_glob.sh
- fs_ls.sh
- web_search_loki.sh
- execute_command.sh
instructions: |
@@ -34,14 +40,14 @@ instructions: |
| Type | Signal | Action |
|------|--------|--------|
| Trivial | Single file, known location, typo fix | Do it yourself with tools |
| Exploration | "Find X", "Where is Y", "List all Z" | Delegate to `explore` agent |
| Implementation | "Add feature", "Fix bug", "Write code" | Delegate to `coder` agent |
| Architecture/Design | See oracle triggers below | Delegate to `oracle` agent |
| Ambiguous | Unclear scope, multiple interpretations | ASK the user via `ask_user` or `ask_user_input` |
| Exploration | "Find X", "Where is Y", "List all Z" | Spawn `explore` agent |
| Implementation | "Add feature", "Fix bug", "Write code" | Spawn `coder` agent |
| Architecture/Design | See oracle triggers below | Spawn `oracle` agent |
| Ambiguous | Unclear scope, multiple interpretations | ASK the user via `user__ask` or `user__input` |
### Oracle Triggers (MUST delegate to oracle when you see these)
### Oracle Triggers (MUST spawn oracle when you see these)
Delegate to `oracle` ANY time the user asks about:
Spawn `oracle` ANY time the user asks about:
- **"How should I..."** / **"What's the best way to..."** -- design/approach questions
- **"Why does X keep..."** / **"What's wrong with..."** -- complex debugging (not simple errors)
- **"Should I use X or Y?"** -- technology or pattern choices
@@ -55,54 +61,7 @@ instructions: |
Even if you think you know the answer, oracle provides deeper, more thorough analysis.
The only exception is truly trivial questions about a single file you've already read.
## Context System (CRITICAL for multi-step tasks)
Context is shared between you and your subagents. This lets subagents know what you've learned.
**At the START of a multi-step task:**
```
start_task --goal "Description of overall task"
```
**During work** (automatically captured from delegations, or manually):
```
record_finding --source "manual" --finding "Important discovery"
```
**To see accumulated context:**
```
show_context
```
**When task is COMPLETE:**
```
end_task
```
When you delegate, subagents automatically receive all accumulated context.
## Todo System (MANDATORY for multi-step tasks)
For ANY task with 2+ steps:
1. Call `start_task` with the goal (initializes context)
2. Call `todo__init` with the goal
3. Call `todo__add` for each step BEFORE starting
4. Work through steps, calling `todo__done` IMMEDIATELY after each
5. The system auto-continues until all todos are done
6. Call `end_task` when complete (clears context)
## Delegation Pattern
When delegating, use `delegate_to_agent` with:
- agent: explore | coder | oracle
- task: Specific, atomic goal
- context: Additional context beyond what's in the shared context file
The shared context (from `start_task` and prior delegations) is automatically injected.
**CRITICAL**: After delegation, VERIFY the result before marking the todo done.
## Agent Specializations
### Agent Specializations
| Agent | Use For | Characteristics |
|-------|---------|-----------------|
@@ -112,40 +71,42 @@ instructions: |
## Workflow Examples
### Example 1: Implementation task (explore -> coder)
### Example 1: Implementation task (explore -> coder, parallel exploration)
User: "Add a new API endpoint for user profiles"
```
1. start_task --goal "Add user profiles API endpoint"
2. todo__init --goal "Add user profiles API endpoint"
3. todo__add --task "Explore existing API patterns"
4. todo__add --task "Implement profile endpoint"
5. todo__add --task "Verify with build/test"
6. delegate_to_agent --agent explore --task "Find existing API endpoint patterns and structures"
7. todo__done --id 1
8. delegate_to_agent --agent coder --task "Create user profiles endpoint following existing patterns"
9. todo__done --id 2
10. run_build
11. run_tests
12. todo__done --id 3
13. end_task
1. todo__init --goal "Add user profiles API endpoint"
2. todo__add --task "Explore existing API patterns"
3. todo__add --task "Implement profile endpoint"
4. todo__add --task "Verify with build/test"
5. agent__spawn --agent explore --prompt "Find existing API endpoint patterns, route structures, and controller conventions"
6. agent__spawn --agent explore --prompt "Find existing data models and database query patterns"
7. agent__collect --id <id1>
8. agent__collect --id <id2>
9. todo__done --id 1
10. agent__spawn --agent coder --prompt "Create user profiles endpoint following existing patterns. [Include context from explore results]"
11. agent__collect --id <coder_id>
12. todo__done --id 2
13. run_build
14. run_tests
15. todo__done --id 3
```
### Example 2: Architecture/design question (explore -> oracle)
### Example 2: Architecture/design question (explore + oracle in parallel)
User: "How should I structure the authentication for this app?"
```
1. start_task --goal "Get architecture advice for authentication"
2. todo__init --goal "Get architecture advice for authentication"
3. todo__add --task "Explore current auth-related code"
4. todo__add --task "Consult oracle for architecture recommendation"
5. delegate_to_agent --agent explore --task "Find any existing auth code, middleware, user models, and session handling"
6. todo__done --id 1
7. delegate_to_agent --agent oracle --task "Recommend authentication architecture" --context "User wants auth advice. Explore found: [summarize findings]. Evaluate approaches and recommend the best one with justification."
8. todo__done --id 2
9. end_task
1. todo__init --goal "Get architecture advice for authentication"
2. todo__add --task "Explore current auth-related code"
3. todo__add --task "Consult oracle for architecture recommendation"
4. agent__spawn --agent explore --prompt "Find any existing auth code, middleware, user models, and session handling"
5. agent__spawn --agent oracle --prompt "Recommend authentication architecture for this project. Consider: JWT vs sessions, middleware patterns, security best practices."
6. agent__collect --id <explore_id>
7. todo__done --id 1
8. agent__collect --id <oracle_id>
9. todo__done --id 2
```
### Example 3: Vague/open-ended question (oracle directly)
@@ -153,22 +114,23 @@ instructions: |
User: "What do you think of this codebase structure?"
```
1. delegate_to_agent --agent oracle --task "Review the project structure and provide recommendations for improvement"
# Oracle will read files and analyze on its own
agent__spawn --agent oracle --prompt "Review the project structure and provide recommendations for improvement"
agent__collect --id <oracle_id>
```
## Rules
1. **Always start_task first** - Initialize context before multi-step work
2. **Always classify before acting** - Don't jump into implementation
3. **Create todos for multi-step tasks** - Track your progress
4. **Delegate specialized work** - You're a coordinator, not an implementer
5. **Verify after delegation** - Don't trust blindly
1. **Always classify before acting** - Don't jump into implementation
2. **Create todos for multi-step tasks** - Track your progress
3. **Spawn agents for specialized work** - You're a coordinator, not an implementer
4. **Spawn in parallel when possible** - Independent tasks should run concurrently
5. **Verify after collecting agent results** - Don't trust blindly
6. **Mark todos done immediately** - Don't batch completions
7. **Ask when ambiguous** - Use `ask_user` or `ask_user_input` to clarify with the user interactively
8. **Get buy-in for design decisions** - Use `ask_user` to present options before implementing major changes
9. **Confirm destructive actions** - Use `ask_user_confirm` before large refactors or deletions
10. **Always end_task** - Clean up context when done
7. **Ask when ambiguous** - Use `user__ask` or `user__input` to clarify with the user interactively
8. **Get buy-in for design decisions** - Use `user__ask` to present options before implementing major changes
9. **Confirm destructive actions** - Use `user__confirm` before large refactors or deletions
10. **Delegate to the coder agent to write code** - IMPORTANT: Use the `coder` agent to write code. Do not try to write code yourself except for trivial changes
11. **Always output a summary of changes when finished** - Make it clear to user's that you've completed your tasks
## When to Do It Yourself
@@ -187,58 +149,18 @@ instructions: |
## User Interaction (CRITICAL - get buy-in before major decisions)
You have tools to prompt the user for input. Use them to get user buy-in before making design decisions, and to clarify ambiguities interactively. **Do NOT guess when you can ask.**
You have built-in tools to prompt the user for input. Use them to get user buy-in before making design decisions, and
to clarify ambiguities interactively. **Do NOT guess when you can ask.**
### When to Prompt the User
| Situation | Tool | Example |
|-----------|------|---------|
| Multiple valid design approaches | `ask_user` | "How should we structure this?" with options |
| Confirming a destructive or major action | `ask_user_confirm` | "This will refactor 12 files. Proceed?" |
| User should pick which features/items to include | `ask_user_checkbox` | "Which endpoints should we add?" |
| Need specific input (names, paths, values) | `ask_user_input` | "What should the new module be called?" |
| Ambiguous request with different effort levels | `ask_user` | Present interpretation options |
### How to Use `ask_user` (single-select list)
Present your **recommended option first** with `(Recommended)` appended to its label:
```
ask_user --question "Which authentication strategy should we use?" \
--options "JWT with httpOnly cookies (Recommended)" \
--options "Session-based auth with Redis" \
--options "OAuth2 with third-party provider"
```
The tool renders an interactive list on the user's terminal. They navigate with arrow keys and press Enter. The selected label is returned to you.
### How to Use `ask_user_confirm` (yes/no)
```
ask_user_confirm --question "This will delete the legacy API module. Continue?"
```
Returns "User confirmed: yes" or "User confirmed: no". **Respect the answer** — if the user says no, do NOT proceed with that action.
### How to Use `ask_user_checkbox` (multi-select)
```
ask_user_checkbox --question "Which optional features should be included?" \
--options "Rate limiting" \
--options "Request logging" \
--options "CORS support" \
--options "Health check endpoint"
```
Returns a list of all selected labels. The user selects items with Space and confirms with Enter.
### How to Use `ask_user_input` (free-text)
```
ask_user_input --question "What should the database table be named?"
```
Returns whatever text the user typed.
| Multiple valid design approaches | `user__ask` | "How should we structure this?" with options |
| Confirming a destructive or major action | `user__confirm` | "This will refactor 12 files. Proceed?" |
| User should pick which features/items to include | `user__checkbox` | "Which endpoints should we add?" |
| Need specific input (names, paths, values) | `user__input` | "What should the new module be called?" |
| Ambiguous request with different effort levels | `user__ask` | Present interpretation options |
### Design Review Pattern
@@ -246,27 +168,23 @@ instructions: |
1. **Explore** the codebase to understand existing patterns
2. **Formulate** 2-3 design options based on findings
3. **Present options** to the user via `ask_user` with your recommendation marked `(Recommended)`
3. **Present options** to the user via `user__ask` with your recommendation marked `(Recommended)`
4. **Confirm** the chosen approach before delegating to `coder`
5. Proceed with implementation
Example flow:
```
1. delegate_to_agent --agent explore --task "Find existing API patterns"
2. ask_user --question "I found two patterns in the codebase. Which should we follow?" \
--options "REST with controller pattern in src/api/ (Recommended)" \
--options "GraphQL resolver pattern in src/graphql/"
3. ask_user_confirm --question "I'll create a new controller at src/api/profiles.rs following the REST pattern. Proceed?"
4. delegate_to_agent --agent coder --task "Create profiles controller following REST pattern"
```
### Rules for User Prompts
1. **Always include (Recommended)** on the option you think is best in `ask_user`
2. **Respect user choices** never override or ignore a selection
3. **Don't over-prompt** trivial decisions (variable names in small functions, formatting) don't need prompts
1. **Always include (Recommended)** on the option you think is best in `user__ask`
2. **Respect user choices** - never override or ignore a selection
3. **Don't over-prompt** - trivial decisions (variable names in small functions, formatting) don't need prompts
4. **DO prompt for**: architecture choices, file/module naming, which of multiple valid approaches to take, destructive operations, anything you're genuinely unsure about
5. **Confirm before large changes** if a task will touch 5+ files, confirm the plan first
5. **Confirm before large changes** - if a task will touch 5+ files, confirm the plan first
## Escalation Handling
If you see `pending_escalations` in your tool results, a child agent needs user input and is blocked.
Reply promptly via `agent__reply_escalation` to unblock it. You can answer from context or prompt the user
yourself first, then relay the answer.
## Available Tools
{{__tools__}}
+8 -264
View File
@@ -7,126 +7,18 @@ export AUTO_CONFIRM=true
# @env LLM_OUTPUT=/dev/stdout
# @env LLM_AGENT_VAR_PROJECT_DIR=.
# @describe Sisyphus orchestrator tools for delegating to specialized agents
# @describe Sisyphus orchestrator tools (project info, build, test)
_project_dir() {
local dir="${LLM_AGENT_VAR_PROJECT_DIR:-.}"
(cd "${dir}" 2>/dev/null && pwd) || echo "${dir}"
}
# @cmd Initialize context for a new task (call at the start of multi-step work)
# @option --goal! Description of the overall task/goal
start_task() {
local project_dir
project_dir=$(_project_dir)
export LLM_AGENT_VAR_PROJECT_DIR="${project_dir}"
# shellcheck disable=SC2154
init_context "${argc_goal}"
cat <<-EOF >> "$LLM_OUTPUT"
$(green "Context initialized for task: ${argc_goal}")
Context file: ${project_dir}/.loki-context
EOF
}
# @cmd Add a finding to the shared context (useful for recording discoveries)
# @option --source! Source of the finding (e.g., "manual", "explore", "coder")
# @option --finding! The finding to record
record_finding() {
local project_dir
project_dir=$(_project_dir)
export LLM_AGENT_VAR_PROJECT_DIR="${project_dir}"
# shellcheck disable=SC2154
append_context "${argc_source}" "${argc_finding}"
green "Recorded finding from ${argc_source}" >> "$LLM_OUTPUT"
}
# @cmd Show current accumulated context
show_context() {
local project_dir
project_dir=$(_project_dir)
export LLM_AGENT_VAR_PROJECT_DIR="${project_dir}"
local context
context=$(read_context)
if [[ -n "${context}" ]]; then
cat <<-EOF >> "$LLM_OUTPUT"
$(info "Current Context:")
${context}
EOF
else
warn "No context file found. Use start_task to initialize." >> "$LLM_OUTPUT"
fi
}
# @cmd Clear the context file (call when task is complete)
end_task() {
local project_dir
project_dir=$(_project_dir)
export LLM_AGENT_VAR_PROJECT_DIR="${project_dir}"
clear_context
green "Context cleared. Task complete." >> "$LLM_OUTPUT"
}
# @cmd Delegate a task to a specialized agent
# @option --agent! Agent to delegate to: explore, coder, or oracle
# @option --task! Specific task description for the agent
# @option --context Additional context (file paths, patterns, constraints)
delegate_to_agent() {
local extra_context="${argc_context:-}"
local project_dir
project_dir=$(_project_dir)
# shellcheck disable=SC2154
if [[ ! "${argc_agent}" =~ ^(explore|coder|oracle)$ ]]; then
error "Invalid agent: ${argc_agent}. Must be explore, coder, or oracle" >> "$LLM_OUTPUT"
return 1
fi
export LLM_AGENT_VAR_PROJECT_DIR="${project_dir}"
info "Delegating to ${argc_agent} agent..." >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
# shellcheck disable=SC2154
local prompt="${argc_task}"
if [[ -n "${extra_context}" ]]; then
prompt="$(printf "%s\n\nAdditional Context:\n%s\n" "${argc_task}" "${extra_context}")"
fi
cat <<-EOF >> "$LLM_OUTPUT"
$(cyan "------------------------------------------")
DELEGATING TO: ${argc_agent}
TASK: ${argc_task}
$(cyan "------------------------------------------")
EOF
local output
output=$(invoke_agent_with_summary "${argc_agent}" "${prompt}" \
--agent-variable project_dir "${project_dir}" 2>&1) || true
cat <<-EOF >> "$LLM_OUTPUT"
${output}
$(cyan "------------------------------------------")
DELEGATION COMPLETE: ${argc_agent}
$(cyan "------------------------------------------")
EOF
}
# @cmd Get project information and structure
get_project_info() {
local project_dir
project_dir=$(_project_dir)
info "Project: ${project_dir}" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
@@ -147,17 +39,17 @@ get_project_info() {
run_build() {
local project_dir
project_dir=$(_project_dir)
local project_info
project_info=$(detect_project "${project_dir}")
local build_cmd
build_cmd=$(echo "${project_info}" | jq -r '.build')
if [[ -z "${build_cmd}" ]] || [[ "${build_cmd}" == "null" ]]; then
warn "No build command detected for this project" >> "$LLM_OUTPUT"
return 0
fi
info "Running: ${build_cmd}" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
@@ -177,17 +69,17 @@ run_build() {
run_tests() {
local project_dir
project_dir=$(_project_dir)
local project_info
project_info=$(detect_project "${project_dir}")
local test_cmd
test_cmd=$(echo "${project_info}" | jq -r '.test')
if [[ -z "${test_cmd}" ]] || [[ "${test_cmd}" == "null" ]]; then
warn "No test command detected for this project" >> "$LLM_OUTPUT"
return 0
fi
info "Running: ${test_cmd}" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
@@ -203,151 +95,3 @@ run_tests() {
fi
}
# @cmd Quick file search in the project
# @option --pattern! File name pattern to search for (e.g., "*.rs", "config*")
search_files() {
# shellcheck disable=SC2154
local pattern="${argc_pattern}"
local project_dir
project_dir=$(_project_dir)
info "Searching for: ${pattern}" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
local results
results=$(search_files "${pattern}" "${project_dir}")
if [[ -n "${results}" ]]; then
echo "${results}" >> "$LLM_OUTPUT"
else
warn "No files found matching: ${pattern}" >> "$LLM_OUTPUT"
fi
}
# @cmd Search for content in files
# @option --pattern! Text pattern to search for
# @option --file-type File extension to search in (e.g., "rs", "py")
search_content() {
local pattern="${argc_pattern}"
local file_type="${argc_file_type:-}"
local project_dir
project_dir=$(_project_dir)
info "Searching for: ${pattern}" >> "$LLM_OUTPUT"
echo "" >> "$LLM_OUTPUT"
local grep_args="-rn"
if [[ -n "${file_type}" ]]; then
grep_args="${grep_args} --include=*.${file_type}"
fi
local results
# shellcheck disable=SC2086
results=$(grep ${grep_args} "${pattern}" "${project_dir}" 2>/dev/null | \
grep -v '/target/' | grep -v '/node_modules/' | grep -v '/.git/' | \
head -30) || true
if [[ -n "${results}" ]]; then
echo "${results}" >> "$LLM_OUTPUT"
else
warn "No matches found for: ${pattern}" >> "$LLM_OUTPUT"
fi
}
# @cmd Ask the user to select ONE option from a list. The first option should be your recommended choice — append '(Recommended)' to its label. Returns the selected option's label text.
# @option --question! The question to present to the user
# @option --options+ The list of options to present (first option = recommended, append '(Recommended)' to its label)
ask_user() {
# shellcheck disable=SC2154
local question="${argc_question}"
# shellcheck disable=SC2154
local opts=("${argc_options[@]}")
local opts_count="${#opts[@]}"
if [[ "${opts_count}" -eq 0 ]]; then
error "No options provided for ask_user" >> "$LLM_OUTPUT"
return 1
fi
info "Asking user: ${question}" >> "$LLM_OUTPUT"
local selected_index
selected_index=$(list "${question}" "${opts[@]}")
local selected_label="${opts[$selected_index]}"
cat <<-EOF >> "$LLM_OUTPUT"
User selected: ${selected_label}
EOF
}
# @cmd Ask the user a yes/no confirmation question. Returns 'yes' or 'no'.
# @option --question! The yes/no question to present to the user
ask_user_confirm() {
# shellcheck disable=SC2154
local question="${argc_question}"
info "Asking user: ${question}" >> "$LLM_OUTPUT"
local result
result=$(confirm "${question}")
if [[ "${result}" == "1" ]]; then
echo "User confirmed: yes" >> "$LLM_OUTPUT"
else
echo "User confirmed: no" >> "$LLM_OUTPUT"
fi
}
# @cmd Ask the user to select MULTIPLE options from a list (checkbox). Returns the labels of all selected items.
# @option --question! The question to present to the user
# @option --options+ The list of options the user can select from (multiple selections allowed)
ask_user_checkbox() {
# shellcheck disable=SC2154
local question="${argc_question}"
# shellcheck disable=SC2154
local opts=("${argc_options[@]}")
local opts_count="${#opts[@]}"
if [[ "${opts_count}" -eq 0 ]]; then
error "No options provided for ask_user_checkbox" >> "$LLM_OUTPUT"
return 1
fi
info "Asking user (select multiple): ${question}" >> "$LLM_OUTPUT"
local checked_indices
checked_indices=$(checkbox "${question}" "${opts[@]}")
local selected_labels=()
for idx in ${checked_indices}; do
if [[ -n "${idx}" ]] && [[ "${idx}" =~ ^[0-9]+$ ]]; then
selected_labels+=("${opts[$idx]}")
fi
done
if [[ "${#selected_labels[@]}" -eq 0 ]]; then
echo "User selected: (none)" >> "$LLM_OUTPUT"
else
echo "User selected:" >> "$LLM_OUTPUT"
for label in "${selected_labels[@]}"; do
echo " - ${label}" >> "$LLM_OUTPUT"
done
fi
}
# @cmd Ask the user for free-text input. Returns whatever the user typed.
# @option --question! The prompt/question to present to the user
ask_user_input() {
# shellcheck disable=SC2154
local question="${argc_question}"
info "Asking user: ${question}" >> "$LLM_OUTPUT"
local user_text
user_text=$(input "${question}")
cat <<-EOF >> "$LLM_OUTPUT"
User input: ${user_text}
EOF
}
+2 -2
View File
@@ -121,7 +121,7 @@ _cursor_blink_off() {
}
_cursor_to() {
echo -en "\033[$1;$2H" >&2
echo -en "\033[$1;${2:-1}H" >&2
}
# shellcheck disable=SC2154
@@ -133,7 +133,7 @@ _key_input() {
_read_stdin -rsn2 b
fi
declare input="${a}${b}"
declare input="${a}${b:-}"
case "$input" in
"${ESC}[A" | "k") echo up ;;
"${ESC}[B" | "j") echo down ;;
+10
View File
@@ -24,6 +24,16 @@ auto_continue: false # Enable automatic continuation when incomplete
max_auto_continues: 10 # Maximum number of automatic continuations before stopping
inject_todo_instructions: true # Inject the default todo tool usage instructions into the agent's system prompt
continuation_prompt: null # Custom prompt used when auto-continuing (optional; uses default if null)
# Sub-Agent Spawning System
# Enable this agent to spawn and manage child agents in parallel.
# See docs/AGENTS.md for detailed documentation.
can_spawn_agents: false # Enable the agent to spawn child agents
max_concurrent_agents: 4 # Maximum number of agents that can run simultaneously
max_agent_depth: 3 # Maximum nesting depth for sub-agents (prevents runaway spawning)
inject_spawn_instructions: true # Inject the default agent spawning instructions into the agent's system prompt
summarization_model: null # Model to use for summarizing sub-agent output (e.g. 'openai:gpt-4o-mini'); defaults to current model
summarization_threshold: 4000 # Character threshold above which sub-agent output is summarized before returning to parent
escalation_timeout: 300 # Seconds a sub-agent waits for a user interaction response before timing out (default: 5 minutes)
mcp_servers: # Optional list of MCP servers that the agent utilizes
- github # Corresponds to the name of an MCP server in the `<loki-config-dir>/functions/mcp.json` file
global_tools: # Optional list of additional global tools to enable for the agent; i.e. not tools specific to the agent
+4
View File
@@ -192,6 +192,8 @@ clients:
- type: gemini
api_base: https://generativelanguage.googleapis.com/v1beta
api_key: '{{GEMINI_API_KEY}}' # You can either hard-code or inject secrets from the Loki vault
auth: null # When set to 'oauth', Loki will use OAuth instead of an API key
# Authenticate with `loki --authenticate` or `.authenticate` in the REPL
patch:
chat_completions:
'.*':
@@ -210,6 +212,8 @@ clients:
- type: claude
api_base: https://api.anthropic.com/v1 # Optional
api_key: '{{ANTHROPIC_API_KEY}}' # You can either hard-code or inject secrets from the Loki vault
auth: null # When set to 'oauth', Loki will use OAuth instead of an API key
# Authenticate with `loki --authenticate` or `.authenticate` in the REPL
# See https://docs.mistral.ai/
- type: openai-compatible
+238 -1
View File
@@ -35,6 +35,18 @@ If you're looking for more example agents, refer to the [built-in agents](../ass
- [Bash-Based Agent Tools](#bash-based-agent-tools)
- [5. Conversation Starters](#5-conversation-starters)
- [6. Todo System & Auto-Continuation](#6-todo-system--auto-continuation)
- [7. Sub-Agent Spawning System](#7-sub-agent-spawning-system)
- [Configuration](#spawning-configuration)
- [Spawning & Collecting Agents](#spawning--collecting-agents)
- [Task Queue with Dependencies](#task-queue-with-dependencies)
- [Active Task Dispatch](#active-task-dispatch)
- [Output Summarization](#output-summarization)
- [Teammate Messaging](#teammate-messaging)
- [Runaway Safeguards](#runaway-safeguards)
- [8. User Interaction Tools](#8-user-interaction-tools)
- [Available Tools](#user-interaction-available-tools)
- [Escalation (Sub-Agent to User)](#escalation-sub-agent-to-user)
- [9. Auto-Injected Prompts](#9-auto-injected-prompts)
- [Built-In Agents](#built-in-agents)
<!--toc:end-->
@@ -87,6 +99,14 @@ auto_continue: false # Enable automatic continuation when incomp
max_auto_continues: 10 # Maximum continuation attempts before stopping
inject_todo_instructions: true # Inject todo tool instructions into system prompt
continuation_prompt: null # Custom prompt for continuations (optional)
# Sub-Agent Spawning (see "Sub-Agent Spawning System" section below)
can_spawn_agents: false # Enable spawning child agents
max_concurrent_agents: 4 # Max simultaneous child agents
max_agent_depth: 3 # Max nesting depth (prevents runaway)
inject_spawn_instructions: true # Inject spawning instructions into system prompt
summarization_model: null # Model for summarizing sub-agent output (e.g. 'openai:gpt-4o-mini')
summarization_threshold: 4000 # Char count above which sub-agent output is summarized
escalation_timeout: 300 # Seconds sub-agents wait for escalated user input (default: 5 min)
```
As mentioned previously: Agents utilize function calling to extend a model's capabilities. However, agents operate in
@@ -471,13 +491,230 @@ inject_todo_instructions: true # Include the default todo instructions into pr
For complete documentation including all configuration options, tool details, and best practices, see the
[Todo System Guide](./TODO-SYSTEM.md).
## 7. Sub-Agent Spawning System
Loki agents can spawn and manage child agents that run **in parallel** as background tasks inside the same process.
This enables orchestrator-style agents that delegate specialized work to other agents, similar to how tools like
Claude Code or OpenCode handle complex multi-step tasks.
For a working example of an orchestrator agent that uses sub-agent spawning, see the built-in
[sisyphus](../assets/agents/sisyphus) agent. For an example of the teammate messaging pattern with parallel sub-agents,
see the [code-reviewer](../assets/agents/code-reviewer) agent.
### Spawning Configuration
| Setting | Type | Default | Description |
|-----------------------------|---------|---------------|--------------------------------------------------------------------------------|
| `can_spawn_agents` | boolean | `false` | Enable this agent to spawn child agents |
| `max_concurrent_agents` | integer | `4` | Maximum number of child agents that can run simultaneously |
| `max_agent_depth` | integer | `3` | Maximum nesting depth for sub-agents (prevents runaway spawning chains) |
| `inject_spawn_instructions` | boolean | `true` | Inject the default spawning instructions into the agent's system prompt |
| `summarization_model` | string | current model | Model to use for summarizing long sub-agent output (e.g. `openai:gpt-4o-mini`) |
| `summarization_threshold` | integer | `4000` | Character count above which sub-agent output is summarized before returning |
| `escalation_timeout` | integer | `300` | Seconds a sub-agent waits for an escalated user interaction response |
**Example configuration:**
```yaml
# agents/my-orchestrator/config.yaml
can_spawn_agents: true
max_concurrent_agents: 6
max_agent_depth: 2
inject_spawn_instructions: true
summarization_model: openai:gpt-4o-mini
summarization_threshold: 3000
escalation_timeout: 600
```
### Spawning & Collecting Agents
When `can_spawn_agents` is enabled, the agent receives tools for spawning and managing child agents:
| Tool | Description |
|------------------|-------------------------------------------------------------------------|
| `agent__spawn` | Spawn a child agent in the background. Returns an agent ID immediately. |
| `agent__check` | Non-blocking check: is the agent done? Returns `PENDING` or the result. |
| `agent__collect` | Blocking wait: wait for an agent to finish, return its output. |
| `agent__list` | List all spawned agents and their status. |
| `agent__cancel` | Cancel a running agent by ID. |
The core pattern is **Spawn -> Continue -> Collect**:
```
# 1. Spawn agents in parallel (returns IDs immediately)
agent__spawn --agent explore --prompt "Find auth middleware patterns in src/"
agent__spawn --agent explore --prompt "Find error handling patterns in src/"
# 2. Continue your own work while they run
# 3. Check if done (non-blocking)
agent__check --id agent_explore_a1b2c3d4
# 4. Collect results when ready (blocking)
agent__collect --id agent_explore_a1b2c3d4
agent__collect --id agent_explore_e5f6g7h8
```
Any agent defined in your `<loki-config-dir>/agents/` directory can be spawned as a child. Child agents:
- Run in a fully isolated environment (separate session, config, and tools)
- Have their output suppressed from the terminal (no spinner, no tool call logging)
- Return their accumulated output to the parent when collected
### Task Queue with Dependencies
For complex workflows where tasks have ordering requirements, the spawning system includes a dependency-aware
task queue:
| Tool | Description |
|------------------------|-----------------------------------------------------------------------------|
| `agent__task_create` | Create a task with optional dependencies and auto-dispatch agent. |
| `agent__task_list` | List all tasks with their status, dependencies, and assignments. |
| `agent__task_complete` | Mark a task done. Returns newly unblocked tasks and auto-dispatches agents. |
| `agent__task_fail` | Mark a task as failed. Dependents remain blocked. |
```
# Create tasks with dependency ordering
agent__task_create --subject "Explore existing patterns"
agent__task_create --subject "Implement feature" --blocked_by ["task_1"]
agent__task_create --subject "Write tests" --blocked_by ["task_2"]
# Mark tasks complete to unblock dependents
agent__task_complete --task_id task_1
```
### Active Task Dispatch
Tasks can optionally specify an agent to auto-spawn when the task becomes runnable:
```
agent__task_create \
--subject "Implement the auth module" \
--blocked_by ["task_1"] \
--agent coder \
--prompt "Implement auth module based on patterns found in task_1"
```
When `task_1` completes and the dependent task becomes unblocked, an agent is automatically spawned with the
specified prompt. No manual intervention needed. This enables fully automated multi-step pipelines.
### Output Summarization
When a child agent produces long output, it can be automatically summarized before returning to the parent.
This keeps parent context windows manageable.
- If the output exceeds `summarization_threshold` characters (default: 4000), it is sent through an LLM
summarization pass
- The `summarization_model` setting lets you use a cheaper/faster model for summarization (e.g. `gpt-4o-mini`)
- If `summarization_model` is not set, the parent's current model is used
- The summarization preserves all actionable information: code snippets, file paths, error messages, and
concrete recommendations
### Teammate Messaging
All agents (including children) automatically receive tools for **direct sibling-to-sibling messaging**:
| Tool | Description |
|-----------------------|-----------------------------------------------------|
| `agent__send_message` | Send a text message to another agent's inbox by ID. |
| `agent__check_inbox` | Drain all pending messages from your inbox. |
This enables coordination patterns where child agents share cross-cutting findings:
```
# Agent A discovers something relevant to Agent B
agent__send_message --id agent_reviewer_b1c2d3e4 --message "Found a security issue in auth.rs line 42"
# Agent B checks inbox before finalizing
agent__check_inbox
```
Messages are routed through the parent's supervisor. A parent can message its children, and children can message
their siblings. For a working example of the teammate pattern, see the built-in
[code-reviewer](../assets/agents/code-reviewer) agent, which spawns file-specific reviewers that share
cross-cutting findings with each other.
### Runaway Safeguards
The spawning system includes built-in safeguards to prevent runaway agent chains:
- **`max_concurrent_agents`:** Caps how many agents can run at once (default: 4). Spawn attempts beyond this
limit return an error asking the agent to wait or cancel existing agents.
- **`max_agent_depth`:** Caps nesting depth (default: 3). A child agent spawning its own child increments the
depth counter. Attempts beyond the limit are rejected.
- **`can_spawn_agents`:** Only agents with this flag set to `true` can spawn children. By default, spawning is
disabled. This means child agents cannot spawn their own children unless you explicitly create them with
`can_spawn_agents: true` in their config.
## 8. User Interaction Tools
Loki includes built-in tools for agents (and the REPL) to interactively prompt the user for input. These tools
are **always available**. No configuration needed. They are automatically injected into every agent and into
REPL mode when function calling is enabled.
### User Interaction Available Tools
| Tool | Description | Returns |
|------------------|-----------------------------------------|----------------------------------|
| `user__ask` | Present a single-select list of options | The selected option string |
| `user__confirm` | Ask a yes/no question | `"yes"` or `"no"` |
| `user__input` | Request free-form text input | The text entered by the user |
| `user__checkbox` | Present a multi-select checkbox list | Array of selected option strings |
**Parameters:**
- `user__ask`: `--question "..." --options ["Option A", "Option B", "Option C"]`
- `user__confirm`: `--question "..."`
- `user__input`: `--question "..."`
- `user__checkbox`: `--question "..." --options ["Option A", "Option B", "Option C"]`
At the top level (depth 0), these tools render interactive terminal prompts directly using arrow-key navigation,
checkboxes, and text input fields.
### Escalation (Sub-Agent to User)
When a **child agent** (depth > 0) calls a `user__*` tool, it cannot prompt the terminal directly. Instead,
the request is **automatically escalated** to the root agent:
1. The child agent calls `user__ask(...)` and **blocks**, waiting for a reply
2. The root agent sees a `pending_escalations` notification in its next tool results
3. The root agent either answers from context or prompts the user itself, then calls
`agent__reply_escalation` to unblock the child
4. The child receives the reply and continues
The escalation timeout is configurable via `escalation_timeout` in the agent's `config.yaml` (default: 300
seconds / 5 minutes). If the timeout expires, the child receives a fallback message asking it to use its
best judgment.
| Tool | Description |
|---------------------------|--------------------------------------------------------------------------|
| `agent__reply_escalation` | Reply to a pending child escalation, unblocking the waiting child agent. |
This tool is automatically available to any agent with `can_spawn_agents: true`.
## 9. Auto-Injected Prompts
Loki automatically appends usage instructions to your agent's system prompt for each enabled built-in system.
These instructions are injected into both **static and dynamic instructions** after your own instructions,
ensuring agents always know how to use their available tools.
| System | Injected When | Toggle |
|--------------------|----------------------------------------------------------------|-----------------------------|
| Todo tools | `auto_continue: true` AND `inject_todo_instructions: true` | `inject_todo_instructions` |
| Spawning tools | `can_spawn_agents: true` AND `inject_spawn_instructions: true` | `inject_spawn_instructions` |
| Teammate messaging | Always (all agents) | None (always injected) |
| User interaction | Always (all agents) | None (always injected) |
If you prefer to write your own instructions for a system, set the corresponding `inject_*` flag to `false`
and include your custom instructions in the agent's `instructions` field. The built-in tools will still be
available; only the auto-injected prompt text is suppressed.
## Built-In Agents
Loki comes packaged with some useful built-in agents:
* `coder`: An agent to assist you with all your coding tasks
* `code-reviewer`: A [CodeRabbit](https://coderabbit.ai)-style code reviewer that spawns per-file reviewers using the teammate messaging pattern
* `demo`: An example agent to use for reference when learning to create your own agents
* `explore`: An agent designed to help you explore and understand your codebase
* `jira-helper`: An agent that assists you with all your Jira-related tasks
* `oracle`: An agent for high-level architecture, design decisions, and complex debugging
* `sisyphus`: A powerhouse agent for writing complex code and acting as a natural language interface for your codebase (similar to ClaudeCode, Gemini CLI, Codex, or OpenCode)
* `sisyphus`: A powerhouse orchestrator agent for writing complex code and acting as a natural language interface for your codebase (similar to ClaudeCode, Gemini CLI, Codex, or OpenCode). Uses sub-agent spawning to delegate to `explore`, `coder`, and `oracle`.
* `sql`: A universal SQL agent that enables you to talk to any relational database in natural language
+10 -1
View File
@@ -23,6 +23,7 @@ You can enter the REPL by simply typing `loki` without any follow-up flags or ar
- [`.edit` - Modify configuration files](#edit---modify-configuration-files)
- [`.delete` - Delete configurations from Loki](#delete---delete-configurations-from-loki)
- [`.info` - Display information about the current mode](#info---display-information-about-the-current-mode)
- [`.authenticate` - Authenticate the current model client via OAuth](#authenticate---authenticate-the-current-model-client-via-oauth)
- [`.exit` - Exit an agent/role/session/rag or the Loki REPL itself](#exit---exit-an-agentrolesessionrag-or-the-loki-repl-itself)
- [`.help` - Show the help guide](#help---show-the-help-guide)
<!--toc:end-->
@@ -50,6 +51,9 @@ things like
* **Configurable Keybindings:** You can switch between `emacs` style keybindings or `vi` style keybindings
* [**Custom REPL Prompt:**](./REPL-PROMPT.md) You can even customize the REPL prompt to display information about the
current context in the prompt
* **Built-in user interaction tools:** When function calling is enabled in the REPL, the `user__ask`, `user__confirm`,
`user__input`, and `user__checkbox` tools are always available for interactive prompts. These are not injected in the
one-shot CLI mode.
---
@@ -234,6 +238,11 @@ The following entities are supported:
| `.info agent` | Display information about the active agent |
| `.info rag` | Display information about the active RAG |
### `.authenticate` - Authenticate the current model client via OAuth
The `.authenticate` command will start the OAuth flow for the current model client if
* The client supports OAuth (See the [clients documentation](./clients/CLIENTS.md#providers-that-support-oauth) for supported clients)
* The client is configured in your Loki configuration to use OAuth via the `auth: oauth` property
### `.exit` - Exit an agent/role/session/rag or the Loki REPL itself
The `.exit` command is used to move between modes in the Loki REPL.
@@ -247,4 +256,4 @@ The `.exit` command is used to move between modes in the Loki REPL.
### `.help` - Show the help guide
Just like with any shell or REPL, you sometimes need a little help and want to know what commands are available to you.
That's when you use the `.help` command.
That's when you use the `.help` command.
+80 -6
View File
@@ -14,6 +14,7 @@ loki --info | grep 'config_file' | awk '{print $2}'
<!--toc:start-->
- [Supported Clients](#supported-clients)
- [Client Configuration](#client-configuration)
- [Authentication](#authentication)
- [Extra Settings](#extra-settings)
<!--toc:end-->
@@ -51,12 +52,13 @@ clients:
The client metadata uniquely identifies the client in Loki so you can reference it across your configurations. The
available settings are listed below:
| Setting | Description |
|----------|-----------------------------------------------------------------------------------------------|
| `name` | The name of the client (e.g. `openai`, `gemini`, etc.) |
| `models` | See the [model settings](#model-settings) documentation below |
| `patch` | See the [client patch configuration](./PATCHES.md#client-configuration-patches) documentation |
| `extra` | See the [extra settings](#extra-settings) documentation below |
| Setting | Description |
|----------|------------------------------------------------------------------------------------------------------------|
| `name` | The name of the client (e.g. `openai`, `gemini`, etc.) |
| `auth` | Authentication method: `oauth` for OAuth, or omit to use `api_key` (see [Authentication](#authentication)) |
| `models` | See the [model settings](#model-settings) documentation below |
| `patch` | See the [client patch configuration](./PATCHES.md#client-configuration-patches) documentation |
| `extra` | See the [extra settings](#extra-settings) documentation below |
Be sure to also check provider-specific configurations for any extra fields that are added for authentication purposes.
@@ -83,6 +85,78 @@ The `models` array lists the available models from the model client. Each one ha
| `default_chunk_size` | | `embedding` | The default chunk size to use with the given model |
| `max_batch_size` | | `embedding` | The maximum batch size that the given embedding model supports |
## Authentication
Loki clients support two authentication methods: **API keys** and **OAuth**. Each client entry in your configuration
must use one or the other.
### API Key Authentication
Most clients authenticate using an API key. Simply set the `api_key` field directly or inject it from the
[Loki vault](../VAULT.md):
```yaml
clients:
- type: claude
api_key: '{{ANTHROPIC_API_KEY}}'
```
API keys can also be provided via environment variables named `{CLIENT_NAME}_API_KEY` (e.g. `OPENAI_API_KEY`,
`GEMINI_API_KEY`). See the [environment variables documentation](../ENVIRONMENT-VARIABLES.md#client-related-variables)
for details.
### OAuth Authentication
For [providers that support OAuth](#providers-that-support-oauth), you can authenticate using your existing subscription instead of an API key. This uses
the OAuth 2.0 PKCE flow.
**Step 1: Configure the client**
Add a client entry with `auth: oauth` and no `api_key`:
```yaml
clients:
- type: claude
name: my-claude-oauth
auth: oauth
```
**Step 2: Authenticate**
Run the `--authenticate` flag with the client name:
```sh
loki --authenticate my-claude-oauth
```
Or if you have only one OAuth-configured client, you can omit the name:
```sh
loki --authenticate
```
Alternatively, you can use the REPL command `.authenticate`.
This opens your browser for the OAuth authorization flow. Depending on the provider, Loki will either start a
temporary localhost server to capture the callback automatically (e.g. Gemini) or ask you to paste the authorization
code back into the terminal (e.g. Claude). Loki stores the tokens in `~/.cache/loki/oauth` and automatically refreshes
them when they expire.
**Step 3: Use normally**
Once authenticated, the client works like any other. Loki uses the stored OAuth tokens automatically:
```sh
loki -m my-claude-oauth:claude-sonnet-4-20250514 "Hello!"
```
> **Note:** You can have multiple clients for the same provider. For example: you can have one with an API key and
> another with OAuth. Use the `name` field to distinguish them.
### Providers That Support OAuth
* Claude
* Gemini
## Extra Settings
Loki also lets you customize some extra settings for interacting with APIs:
+11 -11
View File
@@ -66,12 +66,12 @@ Prompt for text input
**Example With Validation:**
```bash
text=$(with_validation 'input "Please enter something:"' validate_present)
text=$(with_validation 'input "Please enter something:"' validate_present 2>/dev/tty)
```
**Example Without Validation:**
```bash
text=$(input "Please enter something:")
text=$(input "Please enter something:" 2>/dev/tty)
```
### confirm
@@ -81,7 +81,7 @@ Show a confirm dialog with options for yes/no
**Example:**
```bash
confirmed=$(confirm "Do the thing?")
confirmed=$(confirm "Do the thing?" 2>/dev/tty)
if [[ $confirmed == "0" ]]; then echo "No"; else echo "Yes"; fi
```
@@ -94,7 +94,7 @@ keys that then returns the chosen option.
**Example:**
```bash
options=("one" "two" "three" "four")
choice=$(list "Select an item" "${options[@]}")
choice=$(list "Select an item" "${options[@]}" 2>/dev/tty)
echo "Your choice: ${options[$choice]}"
```
@@ -107,7 +107,7 @@ and enter keys that then returns the chosen options.
**Example:**
```bash
options=("one" "two" "three" "four")
checked=$(checkbox "Select one or more items" "${options[@]}")
checked=$(checkbox "Select one or more items" "${options[@]}" 2>/dev/tty)
echo "Your choices: ${checked}"
```
@@ -124,12 +124,12 @@ validate_password() {
exit 1
fi
}
pass=$(with_validate 'password "Enter your password"' validate_password)
pass=$(with_validate 'password "Enter your password"' validate_password 2>/dev/tty)
```
**Example Without Validation:**
```bash
pass="$(password "Enter your password:")"
pass="$(password "Enter your password:" 2>/dev/tty)"
```
### editor
@@ -137,7 +137,7 @@ Open the default editor (`$EDITOR`); if none is set, default back to `vi`
**Example:**
```bash
text=$(editor "Please enter something in the editor")
text=$(editor "Please enter something in the editor" 2>/dev/tty)
echo -e "You wrote:\n${text}"
```
@@ -150,7 +150,7 @@ validation functions returns 0.
**Example:**
```bash
# Using the built-in 'validate_present' validator
text=$(with_validate 'input "Please enter something and confirm with enter"' validate_present)
text=$(with_validate 'input "Please enter something and confirm with enter"' validate_present 2>/dev/tty)
# Using a custom validator; e.g. for password
validate_password() {
@@ -159,7 +159,7 @@ validate_password() {
exit 1
fi
}
pass=$(with_validate 'password "Enter random password"' validate_password)
pass=$(with_validate 'password "Enter random password"' validate_password 2>/dev/tty)
```
### validate_present
@@ -169,7 +169,7 @@ Validate that the prompt returned a value.
**Example:**
```bash
text=$(with_validate 'input "Please enter something and confirm with enter"' validate_present)
text=$(with_validate 'input "Please enter something and confirm with enter"' validate_present 2>/dev/tty)
```
### detect_os
+253 -137
View File
@@ -81,6 +81,7 @@
supports_vision: true
supports_function_calling: true
- name: o4-mini
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -93,6 +94,7 @@
temperature: null
top_p: null
- name: o4-mini-high
max_output_tokens: 100000
real_name: o4-mini
max_input_tokens: 200000
input_price: 1.1
@@ -107,6 +109,7 @@
temperature: null
top_p: null
- name: o3
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 2
output_price: 8
@@ -133,6 +136,7 @@
temperature: null
top_p: null
- name: o3-mini
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -145,6 +149,7 @@
temperature: null
top_p: null
- name: o3-mini-high
max_output_tokens: 100000
real_name: o3-mini
max_input_tokens: 200000
input_price: 1.1
@@ -190,25 +195,32 @@
# - https://ai.google.dev/api/rest/v1beta/models/streamGenerateContent
- provider: gemini
models:
- name: gemini-3.1-pro-preview
max_input_tokens: 1048576
max_output_tokens: 65535
input_price: 0.3
output_price: 2.5
supports_vision: true
supports_function_calling: true
- name: gemini-2.5-flash
max_input_tokens: 1048576
max_output_tokens: 65536
input_price: 0
output_price: 0
max_output_tokens: 65535
input_price: 0.3
output_price: 2.5
supports_vision: true
supports_function_calling: true
- name: gemini-2.5-pro
max_input_tokens: 1048576
max_output_tokens: 65536
input_price: 0
output_price: 0
input_price: 1.25
output_price: 10
supports_vision: true
supports_function_calling: true
- name: gemini-2.5-flash-lite
max_input_tokens: 1000000
max_output_tokens: 64000
input_price: 0
output_price: 0
max_input_tokens: 1048576
max_output_tokens: 65535
input_price: 0.1
output_price: 0.4
supports_vision: true
supports_function_calling: true
- name: gemini-2.0-flash
@@ -226,10 +238,11 @@
supports_vision: true
supports_function_calling: true
- name: gemma-3-27b-it
max_input_tokens: 131072
max_output_tokens: 8192
input_price: 0
output_price: 0
supports_vision: true
max_input_tokens: 128000
max_output_tokens: 65536
input_price: 0.04
output_price: 0.15
- name: text-embedding-004
type: embedding
input_price: 0
@@ -242,6 +255,54 @@
# - https://docs.anthropic.com/en/api/messages
- provider: claude
models:
- name: claude-opus-4-6
max_input_tokens: 200000
max_output_tokens: 8192
require_max_tokens: true
input_price: 5
output_price: 25
supports_vision: true
supports_function_calling: true
- name: claude-opus-4-6:thinking
real_name: claude-opus-4-6
max_input_tokens: 200000
max_output_tokens: 24000
require_max_tokens: true
input_price: 5
output_price: 25
supports_vision: true
supports_function_calling: true
patch:
body:
temperature: null
top_p: null
thinking:
type: enabled
budget_tokens: 16000
- name: claude-sonnet-4-6
max_input_tokens: 200000
max_output_tokens: 8192
require_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
supports_function_calling: true
- name: claude-sonnet-4-6:thinking
real_name: claude-sonnet-4-6
max_input_tokens: 200000
max_output_tokens: 24000
require_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
supports_function_calling: true
patch:
body:
temperature: null
top_p: null
thinking:
type: enabled
budget_tokens: 16000
- name: claude-sonnet-4-5-20250929
max_input_tokens: 200000
max_output_tokens: 8192
@@ -509,8 +570,8 @@
output_price: 10
supports_vision: true
- name: command-r7b-12-2024
max_input_tokens: 131072
max_output_tokens: 4096
max_input_tokens: 128000
max_output_tokens: 4000
input_price: 0.0375
output_price: 0.15
- name: embed-v4.0
@@ -547,6 +608,7 @@
- provider: xai
models:
- name: grok-4
supports_vision: true
max_input_tokens: 256000
input_price: 3
output_price: 15
@@ -583,14 +645,18 @@
- provider: perplexity
models:
- name: sonar-pro
max_output_tokens: 8000
supports_vision: true
max_input_tokens: 200000
input_price: 3
output_price: 15
- name: sonar
max_input_tokens: 128000
supports_vision: true
max_input_tokens: 127072
input_price: 1
output_price: 1
- name: sonar-reasoning-pro
supports_vision: true
max_input_tokens: 128000
input_price: 2
output_price: 8
@@ -659,17 +725,16 @@
# - https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini
- provider: vertexai
models:
- name: gemini-3-pro-preview
hipaa_safe: true
- name: gemini-3.1-pro-preview
max_input_tokens: 1048576
max_output_tokens: 65536
input_price: 0
output_price: 0
input_price: 2
output_price: 12
supports_vision: true
supports_function_calling: true
- name: gemini-2.5-flash
max_input_tokens: 1048576
max_output_tokens: 65536
max_output_tokens: 65535
input_price: 0.3
output_price: 2.5
supports_vision: true
@@ -683,16 +748,16 @@
supports_function_calling: true
- name: gemini-2.5-flash-lite
max_input_tokens: 1048576
max_output_tokens: 65536
input_price: 0.3
max_output_tokens: 65535
input_price: 0.1
output_price: 0.4
supports_vision: true
supports_function_calling: true
- name: gemini-2.0-flash-001
max_input_tokens: 1048576
max_output_tokens: 8192
input_price: 0.15
output_price: 0.6
input_price: 0.1
output_price: 0.4
supports_vision: true
supports_function_calling: true
- name: gemini-2.0-flash-lite-001
@@ -1187,17 +1252,22 @@
max_input_tokens: 1024
input_price: 0.07
# Links:
# - https://help.aliyun.com/zh/model-studio/getting-started/models
# - https://help.aliyun.com/zh/model-studio/developer-reference/use-qwen-by-calling-api
- provider: qianwen
models:
- name: qwen3-max
input_price: 1.2
output_price: 6
max_output_tokens: 32768
max_input_tokens: 262144
supports_function_calling: true
- name: qwen-plus
max_input_tokens: 131072
input_price: 0.4
output_price: 1.2
max_output_tokens: 32768
max_input_tokens: 1000000
supports_function_calling: true
- name: qwen-flash
max_input_tokens: 1000000
@@ -1213,14 +1283,14 @@
- name: qwen-coder-flash
max_input_tokens: 1000000
- name: qwen3-next-80b-a3b-instruct
max_input_tokens: 131072
input_price: 0.14
output_price: 0.56
max_input_tokens: 262144
input_price: 0.09
output_price: 1.1
supports_function_calling: true
- name: qwen3-next-80b-a3b-thinking
max_input_tokens: 131072
input_price: 0.14
output_price: 1.4
max_input_tokens: 128000
input_price: 0.15
output_price: 1.2
- name: qwen3-235b-a22b-instruct-2507
max_input_tokens: 131072
input_price: 0.28
@@ -1228,35 +1298,39 @@
supports_function_calling: true
- name: qwen3-235b-a22b-thinking-2507
max_input_tokens: 131072
input_price: 0.28
output_price: 2.8
input_price: 0
output_price: 0
- name: qwen3-30b-a3b-instruct-2507
max_input_tokens: 131072
input_price: 0.105
output_price: 0.42
max_output_tokens: 262144
max_input_tokens: 262144
input_price: 0.09
output_price: 0.3
supports_function_calling: true
- name: qwen3-30b-a3b-thinking-2507
max_input_tokens: 131072
input_price: 0.105
output_price: 1.05
max_input_tokens: 32768
input_price: 0.051
output_price: 0.34
- name: qwen3-vl-32b-instruct
max_output_tokens: 32768
max_input_tokens: 131072
input_price: 0.28
output_price: 1.12
input_price: 0.104
output_price: 0.416
supports_vision: true
- name: qwen3-vl-8b-instruct
max_output_tokens: 32768
max_input_tokens: 131072
input_price: 0.07
output_price: 0.28
input_price: 0.08
output_price: 0.5
supports_vision: true
- name: qwen3-coder-480b-a35b-instruct
max_input_tokens: 262144
input_price: 1.26
output_price: 5.04
- name: qwen3-coder-30b-a3b-instruct
max_input_tokens: 262144
input_price: 0.315
output_price: 1.26
max_output_tokens: 32768
max_input_tokens: 160000
input_price: 0.07
output_price: 0.27
- name: deepseek-v3.2-exp
max_input_tokens: 131072
input_price: 0.28
@@ -1332,9 +1406,9 @@
output_price: 8.12
supports_vision: true
- name: kimi-k2-thinking
max_input_tokens: 262144
input_price: 0.56
output_price: 2.24
max_input_tokens: 131072
input_price: 0.47
output_price: 2
supports_vision: true
# Links:
@@ -1343,10 +1417,10 @@
- provider: deepseek
models:
- name: deepseek-chat
max_input_tokens: 64000
max_output_tokens: 8192
input_price: 0.56
output_price: 1.68
max_input_tokens: 163840
max_output_tokens: 163840
input_price: 0.32
output_price: 0.89
supports_function_calling: true
- name: deepseek-reasoner
max_input_tokens: 64000
@@ -1424,9 +1498,10 @@
- provider: minimax
models:
- name: minimax-m2
max_input_tokens: 204800
input_price: 0.294
output_price: 1.176
max_output_tokens: 65536
max_input_tokens: 196608
input_price: 0.255
output_price: 1
supports_function_calling: true
# Links:
@@ -1442,8 +1517,8 @@
supports_vision: true
supports_function_calling: true
- name: openai/gpt-5.1-chat
max_input_tokens: 400000
max_output_tokens: 128000
max_input_tokens: 128000
max_output_tokens: 16384
input_price: 1.25
output_price: 10
supports_vision: true
@@ -1456,8 +1531,8 @@
supports_vision: true
supports_function_calling: true
- name: openai/gpt-5-chat
max_input_tokens: 400000
max_output_tokens: 128000
max_input_tokens: 128000
max_output_tokens: 16384
input_price: 1.25
output_price: 10
supports_vision: true
@@ -1498,18 +1573,21 @@
supports_vision: true
supports_function_calling: true
- name: openai/gpt-4o
max_output_tokens: 16384
max_input_tokens: 128000
input_price: 2.5
output_price: 10
supports_vision: true
supports_function_calling: true
- name: openai/gpt-4o-mini
max_output_tokens: 16384
max_input_tokens: 128000
input_price: 0.15
output_price: 0.6
supports_vision: true
supports_function_calling: true
- name: openai/o4-mini
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -1522,6 +1600,7 @@
temperature: null
top_p: null
- name: openai/o4-mini-high
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -1535,6 +1614,7 @@
temperature: null
top_p: null
- name: openai/o3
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 2
output_price: 8
@@ -1560,6 +1640,7 @@
temperature: null
top_p: null
- name: openai/o3-mini
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -1571,6 +1652,7 @@
temperature: null
top_p: null
- name: openai/o3-mini-high
max_output_tokens: 100000
max_input_tokens: 200000
input_price: 1.1
output_price: 4.4
@@ -1583,50 +1665,57 @@
top_p: null
- name: openai/gpt-oss-120b
max_input_tokens: 131072
input_price: 0.09
output_price: 0.45
input_price: 0.039
output_price: 0.19
supports_function_calling: true
- name: openai/gpt-oss-20b
max_input_tokens: 131072
input_price: 0.04
output_price: 0.16
input_price: 0.03
output_price: 0.14
supports_function_calling: true
- name: google/gemini-2.5-flash
max_output_tokens: 65535
max_input_tokens: 1048576
input_price: 0.3
output_price: 2.5
supports_vision: true
supports_function_calling: true
- name: google/gemini-2.5-pro
max_output_tokens: 65536
max_input_tokens: 1048576
input_price: 1.25
output_price: 10
supports_vision: true
supports_function_calling: true
- name: google/gemini-2.5-flash-lite
max_output_tokens: 65535
max_input_tokens: 1048576
input_price: 0.3
input_price: 0.1
output_price: 0.4
supports_vision: true
- name: google/gemini-2.0-flash-001
max_input_tokens: 1000000
input_price: 0.15
output_price: 0.6
max_output_tokens: 8192
max_input_tokens: 1048576
input_price: 0.1
output_price: 0.4
supports_vision: true
supports_function_calling: true
- name: google/gemini-2.0-flash-lite-001
max_output_tokens: 8192
max_input_tokens: 1048576
input_price: 0.075
output_price: 0.3
supports_vision: true
supports_function_calling: true
- name: google/gemma-3-27b-it
max_input_tokens: 131072
input_price: 0.1
output_price: 0.2
max_output_tokens: 65536
supports_vision: true
max_input_tokens: 128000
input_price: 0.04
output_price: 0.15
- name: anthropic/claude-sonnet-4.5
max_input_tokens: 200000
max_output_tokens: 8192
max_input_tokens: 1000000
max_output_tokens: 64000
require_max_tokens: true
input_price: 3
output_price: 15
@@ -1634,7 +1723,7 @@
supports_function_calling: true
- name: anthropic/claude-haiku-4.5
max_input_tokens: 200000
max_output_tokens: 8192
max_output_tokens: 64000
require_max_tokens: true
input_price: 1
output_price: 5
@@ -1642,7 +1731,7 @@
supports_function_calling: true
- name: anthropic/claude-opus-4.1
max_input_tokens: 200000
max_output_tokens: 8192
max_output_tokens: 32000
require_max_tokens: true
input_price: 15
output_price: 75
@@ -1650,15 +1739,15 @@
supports_function_calling: true
- name: anthropic/claude-opus-4
max_input_tokens: 200000
max_output_tokens: 8192
max_output_tokens: 32000
require_max_tokens: true
input_price: 15
output_price: 75
supports_vision: true
supports_function_calling: true
- name: anthropic/claude-sonnet-4
max_input_tokens: 200000
max_output_tokens: 8192
max_input_tokens: 1000000
max_output_tokens: 64000
require_max_tokens: true
input_price: 3
output_price: 15
@@ -1666,7 +1755,7 @@
supports_function_calling: true
- name: anthropic/claude-3.7-sonnet
max_input_tokens: 200000
max_output_tokens: 8192
max_output_tokens: 64000
require_max_tokens: true
input_price: 3
output_price: 15
@@ -1681,21 +1770,24 @@
supports_vision: true
supports_function_calling: true
- name: meta-llama/llama-4-maverick
max_output_tokens: 16384
max_input_tokens: 1048576
input_price: 0.18
input_price: 0.15
output_price: 0.6
supports_vision: true
supports_function_calling: true
- name: meta-llama/llama-4-scout
max_output_tokens: 16384
max_input_tokens: 327680
input_price: 0.08
output_price: 0.3
supports_vision: true
supports_function_calling: true
- name: meta-llama/llama-3.3-70b-instruct
max_output_tokens: 16384
max_input_tokens: 131072
input_price: 0.12
output_price: 0.3
input_price: 0.1
output_price: 0.32
- name: mistralai/mistral-medium-3.1
max_input_tokens: 131072
input_price: 0.4
@@ -1703,9 +1795,10 @@
supports_function_calling: true
supports_vision: true
- name: mistralai/mistral-small-3.2-24b-instruct
max_output_tokens: 131072
max_input_tokens: 131072
input_price: 0.1
output_price: 0.3
input_price: 0.06
output_price: 0.18
supports_vision: true
- name: mistralai/magistral-medium-2506
max_input_tokens: 40960
@@ -1726,8 +1819,8 @@
supports_function_calling: true
- name: mistralai/devstral-small
max_input_tokens: 131072
input_price: 0.07
output_price: 0.28
input_price: 0.1
output_price: 0.3
supports_function_calling: true
- name: mistralai/codestral-2508
max_input_tokens: 256000
@@ -1735,6 +1828,7 @@
output_price: 0.9
supports_function_calling: true
- name: ai21/jamba-large-1.7
max_output_tokens: 4096
max_input_tokens: 256000
input_price: 2
output_price: 8
@@ -1745,110 +1839,121 @@
output_price: 0.4
supports_function_calling: true
- name: cohere/command-a
max_output_tokens: 8192
max_input_tokens: 256000
input_price: 2.5
output_price: 10
supports_function_calling: true
- name: cohere/command-r7b-12-2024
max_input_tokens: 128000
max_output_tokens: 4096
max_output_tokens: 4000
input_price: 0.0375
output_price: 0.15
- name: deepseek/deepseek-v3.2-exp
max_output_tokens: 65536
max_input_tokens: 163840
input_price: 0.27
output_price: 0.40
output_price: 0.41
- name: deepseek/deepseek-v3.1-terminus
max_input_tokens: 163840
input_price: 0.23
output_price: 0.90
input_price: 0.21
output_price: 0.79
- name: deepseek/deepseek-chat-v3.1
max_input_tokens: 163840
input_price: 0.2
output_price: 0.8
max_output_tokens: 7168
max_input_tokens: 32768
input_price: 0.15
output_price: 0.75
- name: deepseek/deepseek-r1-0528
max_input_tokens: 128000
input_price: 0.50
output_price: 2.15
max_output_tokens: 65536
max_input_tokens: 163840
input_price: 0.4
output_price: 1.75
patch:
body:
include_reasoning: true
- name: qwen/qwen3-max
max_output_tokens: 32768
max_input_tokens: 262144
input_price: 1.2
output_price: 6
supports_function_calling: true
- name: qwen/qwen-plus
max_input_tokens: 131072
max_output_tokens: 8192
max_input_tokens: 1000000
max_output_tokens: 32768
input_price: 0.4
output_price: 1.2
supports_function_calling: true
- name: qwen/qwen3-next-80b-a3b-instruct
max_input_tokens: 262144
input_price: 0.1
output_price: 0.8
input_price: 0.09
output_price: 1.1
supports_function_calling: true
- name: qwen/qwen3-next-80b-a3b-thinking
max_input_tokens: 262144
input_price: 0.1
output_price: 0.8
max_input_tokens: 128000
input_price: 0.15
output_price: 1.2
- name: qwen/qwen5-235b-a22b-2507 # Qwen3 235B A22B Instruct 2507
max_input_tokens: 262144
input_price: 0.12
output_price: 0.59
supports_function_calling: true
- name: qwen/qwen3-235b-a22b-thinking-2507
max_input_tokens: 262144
input_price: 0.118
output_price: 0.118
- name: qwen/qwen3-30b-a3b-instruct-2507
max_input_tokens: 131072
input_price: 0.2
output_price: 0.8
input_price: 0
output_price: 0
- name: qwen/qwen3-30b-a3b-instruct-2507
max_output_tokens: 262144
max_input_tokens: 262144
input_price: 0.09
output_price: 0.3
- name: qwen/qwen3-30b-a3b-thinking-2507
max_input_tokens: 262144
input_price: 0.071
output_price: 0.285
max_input_tokens: 32768
input_price: 0.051
output_price: 0.34
- name: qwen/qwen3-vl-32b-instruct
max_input_tokens: 262144
input_price: 0.35
output_price: 1.1
max_output_tokens: 32768
max_input_tokens: 131072
input_price: 0.104
output_price: 0.416
supports_vision: true
- name: qwen/qwen3-vl-8b-instruct
max_input_tokens: 262144
max_output_tokens: 32768
max_input_tokens: 131072
input_price: 0.08
output_price: 0.50
output_price: 0.5
supports_vision: true
- name: qwen/qwen3-coder-plus
max_input_tokens: 128000
max_output_tokens: 65536
max_input_tokens: 1000000
input_price: 1
output_price: 5
supports_function_calling: true
- name: qwen/qwen3-coder-flash
max_input_tokens: 128000
max_output_tokens: 65536
max_input_tokens: 1000000
input_price: 0.3
output_price: 1.5
supports_function_calling: true
- name: qwen/qwen3-coder # Qwen3 Coder 480B A35B
- name: qwen/qwen3-coder # Qwen3 Coder 480B A35B
max_input_tokens: 262144
input_price: 0.22
output_price: 0.95
supports_function_calling: true
- name: qwen/qwen3-coder-30b-a3b-instruct
max_input_tokens: 262144
input_price: 0.052
output_price: 0.207
max_output_tokens: 32768
max_input_tokens: 160000
input_price: 0.07
output_price: 0.27
supports_function_calling: true
- name: moonshotai/kimi-k2-0905
max_input_tokens: 262144
input_price: 0.296
output_price: 1.185
max_input_tokens: 131072
input_price: 0.4
output_price: 2
supports_function_calling: true
- name: moonshotai/kimi-k2-thinking
max_input_tokens: 262144
input_price: 0.45
output_price: 2.35
max_input_tokens: 131072
input_price: 0.47
output_price: 2
supports_function_calling: true
- name: moonshotai/kimi-dev-72b
max_input_tokens: 131072
@@ -1856,21 +1961,26 @@
output_price: 1.15
supports_function_calling: true
- name: x-ai/grok-4
supports_vision: true
max_input_tokens: 256000
input_price: 3
output_price: 15
supports_function_calling: true
- name: x-ai/grok-4-fast
max_output_tokens: 30000
supports_vision: true
max_input_tokens: 2000000
input_price: 0.2
output_price: 0.5
supports_function_calling: true
- name: x-ai/grok-code-fast-1
max_output_tokens: 10000
max_input_tokens: 256000
input_price: 0.2
output_price: 1.5
supports_function_calling: true
- name: amazon/nova-premier-v1
max_output_tokens: 32000
max_input_tokens: 1000000
input_price: 2.5
output_price: 12.5
@@ -1893,14 +2003,18 @@
input_price: 0.035
output_price: 0.14
- name: perplexity/sonar-pro
max_output_tokens: 8000
supports_vision: true
max_input_tokens: 200000
input_price: 3
output_price: 15
- name: perplexity/sonar
supports_vision: true
max_input_tokens: 127072
input_price: 1
output_price: 1
- name: perplexity/sonar-reasoning-pro
supports_vision: true
max_input_tokens: 128000
input_price: 2
output_price: 8
@@ -1915,20 +2029,22 @@
body:
include_reasoning: true
- name: perplexity/sonar-deep-research
max_input_tokens: 200000
max_input_tokens: 128000
input_price: 2
output_price: 8
patch:
body:
include_reasoning: true
- name: minimax/minimax-m2
max_output_tokens: 65536
max_input_tokens: 196608
input_price: 0.15
output_price: 0.45
input_price: 0.255
output_price: 1
- name: z-ai/glm-4.6
max_output_tokens: 131072
max_input_tokens: 202752
input_price: 0.5
output_price: 1.75
input_price: 0.35
output_price: 1.71
supports_function_calling: true
# Links:
@@ -2298,4 +2414,4 @@
- name: rerank-2-lite
type: reranker
max_input_tokens: 8000
input_price: 0.02
input_price: 0.02
+2
View File
@@ -0,0 +1,2 @@
requests
ruamel.yaml
+255
View File
@@ -0,0 +1,255 @@
import requests
import sys
import re
import json
# Provider mapping from models.yaml to OpenRouter prefixes
PROVIDER_MAPPING = {
"openai": "openai",
"claude": "anthropic",
"gemini": "google",
"mistral": "mistralai",
"cohere": "cohere",
"perplexity": "perplexity",
"xai": "x-ai",
"openrouter": "openrouter",
"ai21": "ai21",
"deepseek": "deepseek",
"moonshot": "moonshotai",
"qianwen": "qwen",
"zhipuai": "zhipuai",
"minimax": "minimax",
"vertexai": "google",
"groq": "groq",
"bedrock": "amazon",
"hunyuan": "tencent",
"ernie": "baidu",
"github": "github",
}
def fetch_openrouter_models():
print("Fetching models from OpenRouter...")
try:
response = requests.get("https://openrouter.ai/api/v1/models")
response.raise_for_status()
data = response.json()["data"]
print(f"Fetched {len(data)} models.")
return data
except Exception as e:
print(f"Error fetching models: {e}")
sys.exit(1)
def get_openrouter_model(models_data, provider_prefix, model_name, is_openrouter_provider=False):
if is_openrouter_provider:
# For openrouter provider, the model_name in yaml is usually the full ID
for model in models_data:
if model["id"] == model_name:
return model
return None
expected_id = f"{provider_prefix}/{model_name}"
# 1. Try exact match on ID
for model in models_data:
if model["id"] == expected_id:
return model
# 2. Try match by suffix
for model in models_data:
if model["id"].split("/")[-1] == model_name:
if model["id"].startswith(f"{provider_prefix}/"):
return model
return None
def format_price(price_per_token):
if price_per_token is None:
return None
try:
price_per_1m = float(price_per_token) * 1_000_000
if price_per_1m.is_integer():
return str(int(price_per_1m))
else:
return str(round(price_per_1m, 4))
except:
return None
def get_indentation(line):
return len(line) - len(line.lstrip())
def process_model_block(block_lines, current_provider, or_models):
if not block_lines:
return []
# 1. Identify model name and indentation
name_line = block_lines[0]
name_match = re.match(r"^(\s*)-\s*name:\s*(.+)$", name_line)
if not name_match:
return block_lines
name_indent_str = name_match.group(1)
model_name = name_match.group(2).strip()
# 2. Find OpenRouter model
or_prefix = PROVIDER_MAPPING.get(current_provider)
is_openrouter_provider = (current_provider == "openrouter")
if not or_prefix and not is_openrouter_provider:
return block_lines
or_model = get_openrouter_model(or_models, or_prefix, model_name, is_openrouter_provider)
if not or_model:
return block_lines
print(f" Updating {model_name}...")
# 3. Prepare updates
updates = {}
# Pricing
pricing = or_model.get("pricing", {})
p_in = format_price(pricing.get("prompt"))
p_out = format_price(pricing.get("completion"))
if p_in: updates["input_price"] = p_in
if p_out: updates["output_price"] = p_out
# Context
ctx = or_model.get("context_length")
if ctx: updates["max_input_tokens"] = str(ctx)
max_out = None
if "top_provider" in or_model and or_model["top_provider"]:
max_out = or_model["top_provider"].get("max_completion_tokens")
if max_out: updates["max_output_tokens"] = str(max_out)
# Capabilities
arch = or_model.get("architecture", {})
modality = arch.get("modality", "")
if "image" in modality:
updates["supports_vision"] = "true"
# 4. Detect field indentation
field_indent_str = None
existing_fields = {} # key -> line_index
for i, line in enumerate(block_lines):
if i == 0: continue # Skip name line
# Skip comments
if line.strip().startswith("#"):
continue
# Look for "key: value"
m = re.match(r"^(\s*)([\w_-]+):", line)
if m:
indent = m.group(1)
key = m.group(2)
# Must be deeper than name line
if len(indent) > len(name_indent_str):
if field_indent_str is None:
field_indent_str = indent
existing_fields[key] = i
if field_indent_str is None:
field_indent_str = name_indent_str + " "
# 5. Apply updates
new_block = list(block_lines)
# Update existing fields
for key, value in updates.items():
if key in existing_fields:
idx = existing_fields[key]
# Preserve original key indentation exactly
original_line = new_block[idx]
m = re.match(r"^(\s*)([\w_-]+):", original_line)
if m:
current_indent = m.group(1)
new_block[idx] = f"{current_indent}{key}: {value}\n"
# Insert missing fields
# Insert after the name line
insertion_idx = 1
for key, value in updates.items():
if key not in existing_fields:
new_line = f"{field_indent_str}{key}: {value}\n"
new_block.insert(insertion_idx, new_line)
insertion_idx += 1
return new_block
def main():
or_models = fetch_openrouter_models()
print("Reading models.yaml...")
with open("models.yaml", "r") as f:
lines = f.readlines()
new_lines = []
current_provider = None
i = 0
while i < len(lines):
line = lines[i]
# Check for provider
# - provider: name
p_match = re.match(r"^\s*-?\s*provider:\s*(.+)$", line)
if p_match:
current_provider = p_match.group(1).strip()
new_lines.append(line)
i += 1
continue
# Check for model start
# - name: ...
m_match = re.match(r"^(\s*)-\s*name:\s*.+$", line)
if m_match:
# Start of a model block
start_indent = len(m_match.group(1))
# Collect block lines
block_lines = [line]
j = i + 1
while j < len(lines):
next_line = lines[j]
stripped = next_line.strip()
# If empty or comment, include it
if not stripped or stripped.startswith("#"):
block_lines.append(next_line)
j += 1
continue
# Check indentation
next_indent = get_indentation(next_line)
# If indentation is greater, it's part of the block (property)
if next_indent > start_indent:
block_lines.append(next_line)
j += 1
continue
# If indentation is equal or less, it's the end of the block
break
# Process the block
processed_block = process_model_block(block_lines, current_provider, or_models)
new_lines.extend(processed_block)
# Advance i
i = j
continue
# Otherwise, just a regular line
new_lines.append(line)
i += 1
print("Saving models.yaml...")
with open("models.yaml", "w") as f:
f.writelines(new_lines)
print("Done.")
if __name__ == "__main__":
main()
+3
View File
@@ -127,6 +127,9 @@ pub struct Cli {
/// List all secrets stored in the Loki vault
#[arg(long, exclusive = true)]
pub list_secrets: bool,
/// Authenticate with an LLM provider using OAuth (e.g., --authenticate client_name)
#[arg(long, exclusive = true, value_name = "CLIENT_NAME")]
pub authenticate: Option<Option<String>>,
/// Generate static shell completion scripts
#[arg(long, value_name = "SHELL", value_enum)]
pub completions: Option<ShellCompletion>,
+4 -4
View File
@@ -18,16 +18,16 @@ pub struct AzureOpenAIConfig {
impl AzureOpenAIClient {
config_get_fn!(api_base, get_api_base);
config_get_fn!(api_key, get_api_key);
pub const PROMPTS: [PromptAction<'static>; 2] = [
create_client_config!([
(
"api_base",
"API Base",
Some("e.g. https://{RESOURCE}.openai.azure.com"),
false
false,
),
("api_key", "API Key", None, true),
];
]);
}
impl_client_trait!(
+2 -2
View File
@@ -32,11 +32,11 @@ impl BedrockClient {
config_get_fn!(region, get_region);
config_get_fn!(session_token, get_session_token);
pub const PROMPTS: [PromptAction<'static>; 3] = [
create_client_config!([
("access_key_id", "AWS Access Key ID", None, true),
("secret_access_key", "AWS Secret Access Key", None, true),
("region", "AWS Region", None, false),
];
]);
fn chat_completions_builder(
&self,
+64 -15
View File
@@ -1,9 +1,12 @@
use super::access_token::get_access_token;
use super::claude_oauth::ClaudeOAuthProvider;
use super::oauth::{self, OAuthProvider};
use super::*;
use crate::utils::strip_think_tag;
use anyhow::{Context, Result, bail};
use reqwest::RequestBuilder;
use reqwest::{Client as ReqwestClient, RequestBuilder};
use serde::Deserialize;
use serde_json::{Value, json};
@@ -14,6 +17,7 @@ pub struct ClaudeConfig {
pub name: Option<String>,
pub api_key: Option<String>,
pub api_base: Option<String>,
pub auth: Option<String>,
#[serde(default)]
pub models: Vec<ModelData>,
pub patch: Option<RequestPatch>,
@@ -24,25 +28,44 @@ impl ClaudeClient {
config_get_fn!(api_key, get_api_key);
config_get_fn!(api_base, get_api_base);
pub const PROMPTS: [PromptAction<'static>; 1] = [("api_key", "API Key", None, true)];
create_oauth_supported_client_config!();
}
impl_client_trait!(
ClaudeClient,
(
prepare_chat_completions,
claude_chat_completions,
claude_chat_completions_streaming
),
(noop_prepare_embeddings, noop_embeddings),
(noop_prepare_rerank, noop_rerank),
);
#[async_trait::async_trait]
impl Client for ClaudeClient {
client_common_fns!();
fn prepare_chat_completions(
fn supports_oauth(&self) -> bool {
self.config.auth.as_deref() == Some("oauth")
}
async fn chat_completions_inner(
&self,
client: &ReqwestClient,
data: ChatCompletionsData,
) -> Result<ChatCompletionsOutput> {
let request_data = prepare_chat_completions(self, client, data).await?;
let builder = self.request_builder(client, request_data);
claude_chat_completions(builder, self.model()).await
}
async fn chat_completions_streaming_inner(
&self,
client: &ReqwestClient,
handler: &mut SseHandler,
data: ChatCompletionsData,
) -> Result<()> {
let request_data = prepare_chat_completions(self, client, data).await?;
let builder = self.request_builder(client, request_data);
claude_chat_completions_streaming(builder, handler, self.model()).await
}
}
async fn prepare_chat_completions(
self_: &ClaudeClient,
client: &ReqwestClient,
data: ChatCompletionsData,
) -> Result<RequestData> {
let api_key = self_.get_api_key()?;
let api_base = self_
.get_api_base()
.unwrap_or_else(|_| API_BASE.to_string());
@@ -53,7 +76,33 @@ fn prepare_chat_completions(
let mut request_data = RequestData::new(url, body);
request_data.header("anthropic-version", "2023-06-01");
request_data.header("x-api-key", api_key);
let uses_oauth = self_.config.auth.as_deref() == Some("oauth");
if uses_oauth {
let provider = ClaudeOAuthProvider;
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
if !ready {
bail!(
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
self_.name(),
self_.name()
);
}
let token = get_access_token(self_.name())?;
request_data.bearer_auth(token);
for (key, value) in provider.extra_request_headers() {
request_data.header(key, value);
}
} else if let Ok(api_key) = self_.get_api_key() {
request_data.header("x-api-key", api_key);
} else {
bail!(
"No authentication configured for '{}'. Set `api_key` or use `auth: oauth` with `loki --authenticate {}`.",
self_.name(),
self_.name()
);
}
Ok(request_data)
}
+43
View File
@@ -0,0 +1,43 @@
use super::oauth::OAuthProvider;
pub const BETA_HEADER: &str = "oauth-2025-04-20";
pub struct ClaudeOAuthProvider;
impl OAuthProvider for ClaudeOAuthProvider {
fn provider_name(&self) -> &str {
"claude"
}
fn client_id(&self) -> &str {
"9d1c250a-e61b-44d9-88ed-5944d1962f5e"
}
fn authorize_url(&self) -> &str {
"https://claude.ai/oauth/authorize"
}
fn token_url(&self) -> &str {
"https://console.anthropic.com/v1/oauth/token"
}
fn redirect_uri(&self) -> &str {
"https://console.anthropic.com/oauth/code/callback"
}
fn scopes(&self) -> &str {
"org:create_api_key user:profile user:inference"
}
fn extra_authorize_params(&self) -> Vec<(&str, &str)> {
vec![("code", "true")]
}
fn extra_token_headers(&self) -> Vec<(&str, &str)> {
vec![("anthropic-beta", BETA_HEADER)]
}
fn extra_request_headers(&self) -> Vec<(&str, &str)> {
vec![("anthropic-beta", BETA_HEADER)]
}
}
+1 -1
View File
@@ -24,7 +24,7 @@ impl CohereClient {
config_get_fn!(api_key, get_api_key);
config_get_fn!(api_base, get_api_base);
pub const PROMPTS: [PromptAction<'static>; 1] = [("api_key", "API Key", None, true)];
create_client_config!([("api_key", "API Key", None, true)]);
}
impl_client_trait!(
+8 -10
View File
@@ -47,6 +47,10 @@ pub trait Client: Sync + Send {
fn model(&self) -> &Model;
fn supports_oauth(&self) -> bool {
false
}
fn build_client(&self) -> Result<ReqwestClient> {
let mut builder = ReqwestClient::builder();
let extra = self.extra_config();
@@ -411,9 +415,11 @@ pub async fn call_chat_completions(
client: &dyn Client,
abort_signal: AbortSignal,
) -> Result<(String, Vec<ToolResult>)> {
let is_child_agent = client.global_config().read().current_depth > 0;
let spinner_message = if is_child_agent { "" } else { "Generating" };
let ret = abortable_run_with_spinner(
client.chat_completions(input.clone()),
"Generating",
spinner_message,
abort_signal,
)
.await;
@@ -487,14 +493,6 @@ pub async fn call_chat_completions_streaming(
}
}
pub fn noop_prepare_embeddings<T>(_client: &T, _data: &EmbeddingsData) -> Result<RequestData> {
bail!("The client doesn't support embeddings api")
}
pub async fn noop_embeddings(_builder: RequestBuilder, _model: &Model) -> Result<EmbeddingsOutput> {
bail!("The client doesn't support embeddings api")
}
pub fn noop_prepare_rerank<T>(_client: &T, _data: &RerankData) -> Result<RequestData> {
bail!("The client doesn't support rerank api")
}
@@ -552,7 +550,7 @@ pub fn json_str_from_map<'a>(
map.get(field_name).and_then(|v| v.as_str())
}
async fn set_client_models_config(client_config: &mut Value, client: &str) -> Result<String> {
pub async fn set_client_models_config(client_config: &mut Value, client: &str) -> Result<String> {
if let Some(provider) = ALL_PROVIDER_MODELS.iter().find(|v| v.provider == client) {
let models: Vec<String> = provider
.models
+120 -35
View File
@@ -1,10 +1,13 @@
use super::access_token::get_access_token;
use super::gemini_oauth::GeminiOAuthProvider;
use super::oauth;
use super::vertexai::*;
use super::*;
use anyhow::{Context, Result};
use reqwest::RequestBuilder;
use anyhow::{Context, Result, bail};
use reqwest::{Client as ReqwestClient, RequestBuilder};
use serde::Deserialize;
use serde_json::{json, Value};
use serde_json::{Value, json};
const API_BASE: &str = "https://generativelanguage.googleapis.com/v1beta";
@@ -13,6 +16,7 @@ pub struct GeminiConfig {
pub name: Option<String>,
pub api_key: Option<String>,
pub api_base: Option<String>,
pub auth: Option<String>,
#[serde(default)]
pub models: Vec<ModelData>,
pub patch: Option<RequestPatch>,
@@ -23,25 +27,64 @@ impl GeminiClient {
config_get_fn!(api_key, get_api_key);
config_get_fn!(api_base, get_api_base);
pub const PROMPTS: [PromptAction<'static>; 1] = [("api_key", "API Key", None, true)];
create_oauth_supported_client_config!();
}
impl_client_trait!(
GeminiClient,
(
prepare_chat_completions,
gemini_chat_completions,
gemini_chat_completions_streaming
),
(prepare_embeddings, embeddings),
(noop_prepare_rerank, noop_rerank),
);
#[async_trait::async_trait]
impl Client for GeminiClient {
client_common_fns!();
fn prepare_chat_completions(
fn supports_oauth(&self) -> bool {
self.config.auth.as_deref() == Some("oauth")
}
async fn chat_completions_inner(
&self,
client: &ReqwestClient,
data: ChatCompletionsData,
) -> Result<ChatCompletionsOutput> {
let request_data = prepare_chat_completions(self, client, data).await?;
let builder = self.request_builder(client, request_data);
gemini_chat_completions(builder, self.model()).await
}
async fn chat_completions_streaming_inner(
&self,
client: &ReqwestClient,
handler: &mut SseHandler,
data: ChatCompletionsData,
) -> Result<()> {
let request_data = prepare_chat_completions(self, client, data).await?;
let builder = self.request_builder(client, request_data);
gemini_chat_completions_streaming(builder, handler, self.model()).await
}
async fn embeddings_inner(
&self,
client: &ReqwestClient,
data: &EmbeddingsData,
) -> Result<EmbeddingsOutput> {
let request_data = prepare_embeddings(self, client, data).await?;
let builder = self.request_builder(client, request_data);
embeddings(builder, self.model()).await
}
async fn rerank_inner(
&self,
client: &ReqwestClient,
data: &RerankData,
) -> Result<RerankOutput> {
let request_data = noop_prepare_rerank(self, data)?;
let builder = self.request_builder(client, request_data);
noop_rerank(builder, self.model()).await
}
}
async fn prepare_chat_completions(
self_: &GeminiClient,
client: &ReqwestClient,
data: ChatCompletionsData,
) -> Result<RequestData> {
let api_key = self_.get_api_key()?;
let api_base = self_
.get_api_base()
.unwrap_or_else(|_| API_BASE.to_string());
@@ -59,26 +102,61 @@ fn prepare_chat_completions(
);
let body = gemini_build_chat_completions_body(data, &self_.model)?;
let mut request_data = RequestData::new(url, body);
request_data.header("x-goog-api-key", api_key);
let uses_oauth = self_.config.auth.as_deref() == Some("oauth");
if uses_oauth {
let provider = GeminiOAuthProvider;
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
if !ready {
bail!(
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
self_.name(),
self_.name()
);
}
let token = get_access_token(self_.name())?;
request_data.bearer_auth(token);
} else if let Ok(api_key) = self_.get_api_key() {
request_data.header("x-goog-api-key", api_key);
} else {
bail!(
"No authentication configured for '{}'. Set `api_key` or use `auth: oauth` with `loki --authenticate {}`.",
self_.name(),
self_.name()
);
}
Ok(request_data)
}
fn prepare_embeddings(self_: &GeminiClient, data: &EmbeddingsData) -> Result<RequestData> {
let api_key = self_.get_api_key()?;
async fn prepare_embeddings(
self_: &GeminiClient,
client: &ReqwestClient,
data: &EmbeddingsData,
) -> Result<RequestData> {
let api_base = self_
.get_api_base()
.unwrap_or_else(|_| API_BASE.to_string());
let url = format!(
"{}/models/{}:batchEmbedContents?key={}",
api_base.trim_end_matches('/'),
self_.model.real_name(),
api_key
);
let uses_oauth = self_.config.auth.as_deref() == Some("oauth");
let url = if uses_oauth {
format!(
"{}/models/{}:batchEmbedContents",
api_base.trim_end_matches('/'),
self_.model.real_name(),
)
} else {
let api_key = self_.get_api_key()?;
format!(
"{}/models/{}:batchEmbedContents?key={}",
api_base.trim_end_matches('/'),
self_.model.real_name(),
api_key
)
};
let model_id = format!("models/{}", self_.model.real_name());
@@ -89,21 +167,28 @@ fn prepare_embeddings(self_: &GeminiClient, data: &EmbeddingsData) -> Result<Req
json!({
"model": model_id,
"content": {
"parts": [
{
"text": text
}
]
"parts": [{ "text": text }]
},
})
})
.collect();
let body = json!({
"requests": requests,
});
let body = json!({ "requests": requests });
let mut request_data = RequestData::new(url, body);
let request_data = RequestData::new(url, body);
if uses_oauth {
let provider = GeminiOAuthProvider;
let ready = oauth::prepare_oauth_access_token(client, &provider, self_.name()).await?;
if !ready {
bail!(
"OAuth configured but no tokens found for '{}'. Run: loki --authenticate {}",
self_.name(),
self_.name()
);
}
let token = get_access_token(self_.name())?;
request_data.bearer_auth(token);
}
Ok(request_data)
}
+50
View File
@@ -0,0 +1,50 @@
use super::oauth::{OAuthProvider, TokenRequestFormat};
pub struct GeminiOAuthProvider;
// TODO: Replace with real credentials after registering Loki with Google Cloud Console
const GEMINI_CLIENT_ID: &str =
"50826443741-upqcebrs4gctqht1f08ku46qlbirkdsj.apps.googleusercontent.com";
const GEMINI_CLIENT_SECRET: &str = "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";
impl OAuthProvider for GeminiOAuthProvider {
fn provider_name(&self) -> &str {
"gemini"
}
fn client_id(&self) -> &str {
GEMINI_CLIENT_ID
}
fn authorize_url(&self) -> &str {
"https://accounts.google.com/o/oauth2/v2/auth"
}
fn token_url(&self) -> &str {
"https://oauth2.googleapis.com/token"
}
fn redirect_uri(&self) -> &str {
""
}
fn scopes(&self) -> &str {
"https://www.googleapis.com/auth/cloud-platform.readonly https://www.googleapis.com/auth/userinfo.email"
}
fn client_secret(&self) -> Option<&str> {
Some(GEMINI_CLIENT_SECRET)
}
fn extra_authorize_params(&self) -> Vec<(&str, &str)> {
vec![("access_type", "offline"), ("prompt", "consent")]
}
fn token_request_format(&self) -> TokenRequestFormat {
TokenRequestFormat::FormUrlEncoded
}
fn uses_localhost_redirect(&self) -> bool {
true
}
}
+39 -1
View File
@@ -90,7 +90,7 @@ macro_rules! register_client {
pub async fn create_client_config(client: &str, vault: &$crate::vault::Vault) -> anyhow::Result<(String, serde_json::Value)> {
$(
if client == $client::NAME && client != $crate::client::OpenAICompatibleClient::NAME {
return create_config(&$client::PROMPTS, $client::NAME, vault).await
return $client::create_client_config(vault).await
}
)+
if let Some(ret) = create_openai_compatible_client_config(client).await? {
@@ -218,6 +218,44 @@ macro_rules! impl_client_trait {
};
}
#[macro_export]
macro_rules! create_client_config {
($prompts:expr) => {
pub async fn create_client_config(
vault: &$crate::vault::Vault,
) -> anyhow::Result<(String, serde_json::Value)> {
$crate::client::create_config(&$prompts, Self::NAME, vault).await
}
};
}
#[macro_export]
macro_rules! create_oauth_supported_client_config {
() => {
pub async fn create_client_config(vault: &$crate::vault::Vault) -> anyhow::Result<(String, serde_json::Value)> {
let mut config = serde_json::json!({ "type": Self::NAME });
let auth_method = inquire::Select::new(
"Authentication method:",
vec!["API Key", "OAuth"],
)
.prompt()?;
if auth_method == "API Key" {
let env_name = format!("{}_API_KEY", Self::NAME).to_ascii_uppercase();
vault.add_secret(&env_name)?;
config["api_key"] = format!("{{{{{env_name}}}}}").into();
} else {
config["auth"] = "oauth".into();
}
let model = $crate::client::set_client_models_config(&mut config, Self::NAME).await?;
let clients = json!(vec![config]);
Ok((model, clients))
}
}
}
#[macro_export]
macro_rules! config_get_fn {
($field_name:ident, $fn_name:ident) => {
+3
View File
@@ -1,6 +1,9 @@
mod access_token;
mod claude_oauth;
mod common;
mod gemini_oauth;
mod message;
pub mod oauth;
#[macro_use]
mod macros;
mod model;
+430
View File
@@ -0,0 +1,430 @@
use super::ClientConfig;
use super::access_token::{is_valid_access_token, set_access_token};
use crate::config::Config;
use anyhow::{Result, bail};
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use chrono::Utc;
use inquire::Text;
use reqwest::Client as ReqwestClient;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::fs;
use std::io::{BufRead, BufReader, Write};
use std::net::TcpListener;
use url::Url;
use uuid::Uuid;
pub enum TokenRequestFormat {
Json,
FormUrlEncoded,
}
pub trait OAuthProvider: Send + Sync {
fn provider_name(&self) -> &str;
fn client_id(&self) -> &str;
fn authorize_url(&self) -> &str;
fn token_url(&self) -> &str;
fn redirect_uri(&self) -> &str;
fn scopes(&self) -> &str;
fn client_secret(&self) -> Option<&str> {
None
}
fn extra_authorize_params(&self) -> Vec<(&str, &str)> {
vec![]
}
fn token_request_format(&self) -> TokenRequestFormat {
TokenRequestFormat::Json
}
fn uses_localhost_redirect(&self) -> bool {
false
}
fn extra_token_headers(&self) -> Vec<(&str, &str)> {
vec![]
}
fn extra_request_headers(&self) -> Vec<(&str, &str)> {
vec![]
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OAuthTokens {
pub access_token: String,
pub refresh_token: String,
pub expires_at: i64,
}
pub async fn run_oauth_flow(provider: &dyn OAuthProvider, client_name: &str) -> Result<()> {
let random_bytes: [u8; 32] = rand::random::<[u8; 32]>();
let code_verifier = URL_SAFE_NO_PAD.encode(random_bytes);
let mut hasher = Sha256::new();
hasher.update(code_verifier.as_bytes());
let code_challenge = URL_SAFE_NO_PAD.encode(hasher.finalize());
let state = Uuid::new_v4().to_string();
let redirect_uri = if provider.uses_localhost_redirect() {
let listener = TcpListener::bind("127.0.0.1:0")?;
let port = listener.local_addr()?.port();
let uri = format!("http://127.0.0.1:{port}/callback");
// Drop the listener so run_oauth_flow can re-bind below
drop(listener);
uri
} else {
provider.redirect_uri().to_string()
};
let encoded_scopes = urlencoding::encode(provider.scopes());
let encoded_redirect = urlencoding::encode(&redirect_uri);
let mut authorize_url = format!(
"{}?client_id={}&response_type=code&scope={}&redirect_uri={}&code_challenge={}&code_challenge_method=S256&state={}",
provider.authorize_url(),
provider.client_id(),
encoded_scopes,
encoded_redirect,
code_challenge,
state
);
for (key, value) in provider.extra_authorize_params() {
authorize_url.push_str(&format!(
"&{}={}",
urlencoding::encode(key),
urlencoding::encode(value)
));
}
println!(
"\nOpen this URL to authenticate with {} (client '{}'):\n",
provider.provider_name(),
client_name
);
println!(" {authorize_url}\n");
let _ = open::that(&authorize_url);
let (code, returned_state) = if provider.uses_localhost_redirect() {
listen_for_oauth_callback(&redirect_uri)?
} else {
let input = Text::new("Paste the authorization code:").prompt()?;
let parts: Vec<&str> = input.splitn(2, '#').collect();
if parts.len() != 2 {
bail!("Invalid authorization code format. Expected format: <code>#<state>");
}
(parts[0].to_string(), parts[1].to_string())
};
if returned_state != state {
bail!(
"OAuth state mismatch: expected '{state}', got '{returned_state}'. \
This may indicate a CSRF attack or a stale authorization attempt."
);
}
let client = ReqwestClient::new();
let request = build_token_request(
&client,
provider,
&[
("grant_type", "authorization_code"),
("client_id", provider.client_id()),
("code", &code),
("code_verifier", &code_verifier),
("redirect_uri", &redirect_uri),
("state", &state),
],
);
let response: Value = request.send().await?.json().await?;
let access_token = response["access_token"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Missing access_token in response: {response}"))?
.to_string();
let refresh_token = response["refresh_token"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Missing refresh_token in response: {response}"))?
.to_string();
let expires_in = response["expires_in"]
.as_i64()
.ok_or_else(|| anyhow::anyhow!("Missing expires_in in response: {response}"))?;
let expires_at = Utc::now().timestamp() + expires_in;
let tokens = OAuthTokens {
access_token,
refresh_token,
expires_at,
};
save_oauth_tokens(client_name, &tokens)?;
println!(
"Successfully authenticated client '{}' with {} via OAuth. Tokens saved.",
client_name,
provider.provider_name()
);
Ok(())
}
pub fn load_oauth_tokens(client_name: &str) -> Option<OAuthTokens> {
let path = Config::token_file(client_name);
let content = fs::read_to_string(path).ok()?;
serde_json::from_str(&content).ok()
}
fn save_oauth_tokens(client_name: &str, tokens: &OAuthTokens) -> Result<()> {
let path = Config::token_file(client_name);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let json = serde_json::to_string_pretty(tokens)?;
fs::write(path, json)?;
Ok(())
}
pub async fn refresh_oauth_token(
client: &ReqwestClient,
provider: &impl OAuthProvider,
client_name: &str,
tokens: &OAuthTokens,
) -> Result<OAuthTokens> {
let request = build_token_request(
client,
provider,
&[
("grant_type", "refresh_token"),
("client_id", provider.client_id()),
("refresh_token", &tokens.refresh_token),
],
);
let response: Value = request.send().await?.json().await?;
let access_token = response["access_token"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Missing access_token in refresh response: {response}"))?
.to_string();
let refresh_token = response["refresh_token"]
.as_str()
.map(|s| s.to_string())
.unwrap_or_else(|| tokens.refresh_token.clone());
let expires_in = response["expires_in"]
.as_i64()
.ok_or_else(|| anyhow::anyhow!("Missing expires_in in refresh response: {response}"))?;
let expires_at = Utc::now().timestamp() + expires_in;
let new_tokens = OAuthTokens {
access_token,
refresh_token,
expires_at,
};
save_oauth_tokens(client_name, &new_tokens)?;
Ok(new_tokens)
}
pub async fn prepare_oauth_access_token(
client: &ReqwestClient,
provider: &impl OAuthProvider,
client_name: &str,
) -> Result<bool> {
if is_valid_access_token(client_name) {
return Ok(true);
}
let tokens = match load_oauth_tokens(client_name) {
Some(t) => t,
None => return Ok(false),
};
let tokens = if Utc::now().timestamp() >= tokens.expires_at {
refresh_oauth_token(client, provider, client_name, &tokens).await?
} else {
tokens
};
set_access_token(client_name, tokens.access_token.clone(), tokens.expires_at);
Ok(true)
}
fn build_token_request(
client: &ReqwestClient,
provider: &(impl OAuthProvider + ?Sized),
params: &[(&str, &str)],
) -> reqwest::RequestBuilder {
let mut request = match provider.token_request_format() {
TokenRequestFormat::Json => {
let body: serde_json::Map<String, Value> = params
.iter()
.map(|(k, v)| (k.to_string(), Value::String(v.to_string())))
.collect();
if let Some(secret) = provider.client_secret() {
let mut body = body;
body.insert(
"client_secret".to_string(),
Value::String(secret.to_string()),
);
client.post(provider.token_url()).json(&body)
} else {
client.post(provider.token_url()).json(&body)
}
}
TokenRequestFormat::FormUrlEncoded => {
let mut form: HashMap<String, String> = params
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
if let Some(secret) = provider.client_secret() {
form.insert("client_secret".to_string(), secret.to_string());
}
client.post(provider.token_url()).form(&form)
}
};
for (key, value) in provider.extra_token_headers() {
request = request.header(key, value);
}
request
}
fn listen_for_oauth_callback(redirect_uri: &str) -> Result<(String, String)> {
let url: Url = redirect_uri.parse()?;
let host = url.host_str().unwrap_or("127.0.0.1");
let port = url
.port()
.ok_or_else(|| anyhow::anyhow!("No port in redirect URI"))?;
let path = url.path();
println!("Waiting for OAuth callback on {redirect_uri} ...\n");
let listener = TcpListener::bind(format!("{host}:{port}"))?;
let (mut stream, _) = listener.accept()?;
let mut reader = BufReader::new(&stream);
let mut request_line = String::new();
reader.read_line(&mut request_line)?;
let request_path = request_line
.split_whitespace()
.nth(1)
.ok_or_else(|| anyhow::anyhow!("Malformed HTTP request from OAuth callback"))?;
let full_url = format!("http://{host}:{port}{request_path}");
let parsed: Url = full_url.parse()?;
let response_body = "<html><body><h2>Authentication successful!</h2><p>You can close this tab and return to your terminal.</p></body></html>";
let response = format!(
"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
response_body.len(),
response_body
);
stream.write_all(response.as_bytes())?;
if !parsed.path().starts_with(path) {
bail!("Unexpected callback path: {}", parsed.path());
}
let code = parsed
.query_pairs()
.find(|(k, _)| k == "code")
.map(|(_, v)| v.to_string())
.ok_or_else(|| {
let error = parsed
.query_pairs()
.find(|(k, _)| k == "error")
.map(|(_, v)| v.to_string())
.unwrap_or_else(|| "unknown".to_string());
anyhow::anyhow!("OAuth callback returned error: {error}")
})?;
let returned_state = parsed
.query_pairs()
.find(|(k, _)| k == "state")
.map(|(_, v)| v.to_string())
.ok_or_else(|| anyhow::anyhow!("Missing state parameter in OAuth callback"))?;
Ok((code, returned_state))
}
pub fn get_oauth_provider(provider_type: &str) -> Option<Box<dyn OAuthProvider>> {
match provider_type {
"claude" => Some(Box::new(super::claude_oauth::ClaudeOAuthProvider)),
"gemini" => Some(Box::new(super::gemini_oauth::GeminiOAuthProvider)),
_ => None,
}
}
pub fn resolve_provider_type(client_name: &str, clients: &[ClientConfig]) -> Option<&'static str> {
for client_config in clients {
let (config_name, provider_type, auth) = client_config_info(client_config);
if config_name == client_name {
if auth == Some("oauth") && get_oauth_provider(provider_type).is_some() {
return Some(provider_type);
}
return None;
}
}
None
}
pub fn list_oauth_capable_clients(clients: &[ClientConfig]) -> Vec<String> {
clients
.iter()
.filter_map(|client_config| {
let (name, provider_type, auth) = client_config_info(client_config);
if auth == Some("oauth") && get_oauth_provider(provider_type).is_some() {
Some(name.to_string())
} else {
None
}
})
.collect()
}
fn client_config_info(client_config: &ClientConfig) -> (&str, &'static str, Option<&str>) {
match client_config {
ClientConfig::ClaudeConfig(c) => (
c.name.as_deref().unwrap_or("claude"),
"claude",
c.auth.as_deref(),
),
ClientConfig::OpenAIConfig(c) => (c.name.as_deref().unwrap_or("openai"), "openai", None),
ClientConfig::OpenAICompatibleConfig(c) => (
c.name.as_deref().unwrap_or("openai-compatible"),
"openai-compatible",
None,
),
ClientConfig::GeminiConfig(c) => (
c.name.as_deref().unwrap_or("gemini"),
"gemini",
c.auth.as_deref(),
),
ClientConfig::CohereConfig(c) => (c.name.as_deref().unwrap_or("cohere"), "cohere", None),
ClientConfig::AzureOpenAIConfig(c) => (
c.name.as_deref().unwrap_or("azure-openai"),
"azure-openai",
None,
),
ClientConfig::VertexAIConfig(c) => {
(c.name.as_deref().unwrap_or("vertexai"), "vertexai", None)
}
ClientConfig::BedrockConfig(c) => (c.name.as_deref().unwrap_or("bedrock"), "bedrock", None),
ClientConfig::Unknown => ("unknown", "unknown", None),
}
}
+6 -4
View File
@@ -2,10 +2,10 @@ use super::*;
use crate::utils::strip_think_tag;
use anyhow::{bail, Context, Result};
use anyhow::{Context, Result, bail};
use reqwest::RequestBuilder;
use serde::Deserialize;
use serde_json::{json, Value};
use serde_json::{Value, json};
const API_BASE: &str = "https://api.openai.com/v1";
@@ -25,7 +25,7 @@ impl OpenAIClient {
config_get_fn!(api_key, get_api_key);
config_get_fn!(api_base, get_api_base);
pub const PROMPTS: [PromptAction<'static>; 1] = [("api_key", "API Key", None, true)];
create_client_config!([("api_key", "API Key", None, true)]);
}
impl_client_trait!(
@@ -114,7 +114,9 @@ pub async fn openai_chat_completions_streaming(
function_arguments = String::from("{}");
}
let arguments: Value = function_arguments.parse().with_context(|| {
format!("Tool call '{function_name}' has non-JSON arguments '{function_arguments}'")
format!(
"Tool call '{function_name}' has non-JSON arguments '{function_arguments}'"
)
})?;
handler.tool_call(ToolCall::new(
function_name.clone(),
+1 -1
View File
@@ -21,7 +21,7 @@ impl OpenAICompatibleClient {
config_get_fn!(api_base, get_api_base);
config_get_fn!(api_key, get_api_key);
pub const PROMPTS: [PromptAction<'static>; 0] = [];
create_client_config!([]);
}
impl_client_trait!(
+24 -16
View File
@@ -3,11 +3,11 @@ use super::claude::*;
use super::openai::*;
use super::*;
use anyhow::{anyhow, bail, Context, Result};
use anyhow::{Context, Result, anyhow, bail};
use chrono::{Duration, Utc};
use reqwest::{Client as ReqwestClient, RequestBuilder};
use serde::Deserialize;
use serde_json::{json, Value};
use serde_json::{Value, json};
use std::{path::PathBuf, str::FromStr};
#[derive(Debug, Clone, Deserialize, Default)]
@@ -26,10 +26,10 @@ impl VertexAIClient {
config_get_fn!(project_id, get_project_id);
config_get_fn!(location, get_location);
pub const PROMPTS: [PromptAction<'static>; 2] = [
create_client_config!([
("project_id", "Project ID", None, false),
("location", "Location", None, false),
];
]);
}
#[async_trait::async_trait]
@@ -99,9 +99,13 @@ fn prepare_chat_completions(
let access_token = get_access_token(self_.name())?;
let base_url = if location == "global" {
format!("https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers")
format!(
"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers"
)
} else {
format!("https://{location}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/publishers")
format!(
"https://{location}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/publishers"
)
};
let model_name = self_.model.real_name();
@@ -158,9 +162,13 @@ fn prepare_embeddings(self_: &VertexAIClient, data: &EmbeddingsData) -> Result<R
let access_token = get_access_token(self_.name())?;
let base_url = if location == "global" {
format!("https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers")
format!(
"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers"
)
} else {
format!("https://{location}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/publishers")
format!(
"https://{location}-aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/publishers"
)
};
let url = format!(
"{base_url}/google/models/{}:predict",
@@ -220,12 +228,12 @@ pub async fn gemini_chat_completions_streaming(
part["functionCall"]["args"].as_object(),
) {
let thought_signature = part["thoughtSignature"]
.as_str()
.or_else(|| part["thought_signature"].as_str())
.map(|s| s.to_string());
.as_str()
.or_else(|| part["thought_signature"].as_str())
.map(|s| s.to_string());
handler.tool_call(
ToolCall::new(name.to_string(), json!(args), None)
.with_thought_signature(thought_signature),
.with_thought_signature(thought_signature),
)?;
}
}
@@ -288,12 +296,12 @@ fn gemini_extract_chat_completions_text(data: &Value) -> Result<ChatCompletionsO
part["functionCall"]["args"].as_object(),
) {
let thought_signature = part["thoughtSignature"]
.as_str()
.or_else(|| part["thought_signature"].as_str())
.map(|s| s.to_string());
.as_str()
.or_else(|| part["thought_signature"].as_str())
.map(|s| s.to_string());
tool_calls.push(
ToolCall::new(name.to_string(), json!(args), None)
.with_thought_signature(thought_signature),
.with_thought_signature(thought_signature),
);
}
}
+74 -19
View File
@@ -6,6 +6,10 @@ use crate::{
function::{Functions, run_llm_function},
};
use crate::config::prompts::{
DEFAULT_SPAWN_INSTRUCTIONS, DEFAULT_TEAMMATE_INSTRUCTIONS, DEFAULT_TODO_INSTRUCTIONS,
DEFAULT_USER_INTERACTION_INSTRUCTIONS,
};
use crate::vault::SECRET_RE;
use anyhow::{Context, Result};
use fancy_regex::Captures;
@@ -15,18 +19,6 @@ use serde::{Deserialize, Serialize};
use std::{ffi::OsStr, path::Path};
const DEFAULT_AGENT_NAME: &str = "rag";
const DEFAULT_TODO_INSTRUCTIONS: &str = "\
\n## Task Tracking\n\
You have built-in task tracking tools. Use them to track your progress:\n\
- `todo__init`: Initialize a todo list with a goal. Call this at the start of every multi-step task.\n\
- `todo__add`: Add individual tasks. Add all planned steps before starting work.\n\
- `todo__done`: Mark a task done by id. Call this immediately after completing each step.\n\
- `todo__list`: Show the current todo list.\n\
\n\
RULES:\n\
- Always create a todo list before starting work.\n\
- Mark each task done as soon as you finish it; do not batch.\n\
- If you stop with incomplete tasks, the system will automatically prompt you to continue.";
pub type AgentVariables = IndexMap<String, String>;
@@ -140,7 +132,6 @@ impl Agent {
}
config.write().mcp_registry = Some(new_mcp_registry);
agent_config.replace_tools_placeholder(&functions);
agent_config.load_envs(&config.read());
@@ -208,6 +199,15 @@ impl Agent {
functions.append_todo_functions();
}
if agent_config.can_spawn_agents {
functions.append_supervisor_functions();
}
functions.append_teammate_functions();
functions.append_user_interaction_functions();
agent_config.replace_tools_placeholder(&functions);
Ok(Self {
name: name.to_string(),
config: agent_config,
@@ -342,6 +342,13 @@ impl Agent {
output.push_str(DEFAULT_TODO_INSTRUCTIONS);
}
if self.config.can_spawn_agents && self.config.inject_spawn_instructions {
output.push_str(DEFAULT_SPAWN_INSTRUCTIONS);
}
output.push_str(DEFAULT_TEAMMATE_INSTRUCTIONS);
output.push_str(DEFAULT_USER_INTERACTION_INSTRUCTIONS);
self.interpolate_text(&output)
}
@@ -412,6 +419,30 @@ impl Agent {
self.config.max_auto_continues
}
pub fn can_spawn_agents(&self) -> bool {
self.config.can_spawn_agents
}
pub fn max_concurrent_agents(&self) -> usize {
self.config.max_concurrent_agents
}
pub fn max_agent_depth(&self) -> usize {
self.config.max_agent_depth
}
pub fn summarization_model(&self) -> Option<&str> {
self.config.summarization_model.as_deref()
}
pub fn summarization_threshold(&self) -> usize {
self.config.summarization_threshold
}
pub fn escalation_timeout(&self) -> u64 {
self.config.escalation_timeout
}
pub fn continuation_count(&self) -> usize {
self.continuation_count
}
@@ -425,12 +456,6 @@ impl Agent {
self.last_continuation_response = None;
}
pub fn is_stale_response(&self, response: &str) -> bool {
self.last_continuation_response
.as_ref()
.is_some_and(|last| last == response)
}
pub fn set_last_continuation_response(&mut self, response: String) {
self.last_continuation_response = Some(response);
}
@@ -590,10 +615,18 @@ pub struct AgentConfig {
pub agent_session: Option<String>,
#[serde(default)]
pub auto_continue: bool,
#[serde(default)]
pub can_spawn_agents: bool,
#[serde(default = "default_max_concurrent_agents")]
pub max_concurrent_agents: usize,
#[serde(default = "default_max_agent_depth")]
pub max_agent_depth: usize,
#[serde(default = "default_max_auto_continues")]
pub max_auto_continues: usize,
#[serde(default = "default_true")]
pub inject_todo_instructions: bool,
#[serde(default = "default_true")]
pub inject_spawn_instructions: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub compression_threshold: Option<usize>,
#[serde(default)]
@@ -616,16 +649,38 @@ pub struct AgentConfig {
pub conversation_starters: Vec<String>,
#[serde(default)]
pub documents: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub summarization_model: Option<String>,
#[serde(default = "default_summarization_threshold")]
pub summarization_threshold: usize,
#[serde(default = "default_escalation_timeout")]
pub escalation_timeout: u64,
}
fn default_max_auto_continues() -> usize {
10
}
fn default_max_concurrent_agents() -> usize {
4
}
fn default_max_agent_depth() -> usize {
3
}
fn default_true() -> bool {
true
}
fn default_summarization_threshold() -> usize {
4000
}
fn default_escalation_timeout() -> u64 {
300
}
impl AgentConfig {
pub fn load(path: &Path) -> Result<Self> {
let contents = read_to_string(path)
+61
View File
@@ -1,6 +1,7 @@
mod agent;
mod input;
mod macros;
mod prompts;
mod role;
mod session;
pub(crate) mod todo;
@@ -18,6 +19,7 @@ use crate::client::{
ClientConfig, MessageContentToolCalls, Model, ModelType, OPENAI_COMPATIBLE_PROVIDERS,
ProviderModels, create_client_config, list_client_types, list_models,
};
use crate::function::user_interaction::USER_FUNCTION_PREFIX;
use crate::function::{FunctionDeclaration, Functions, ToolCallTracker, ToolResult};
use crate::rag::Rag;
use crate::render::{MarkdownRender, RenderOptions};
@@ -28,6 +30,9 @@ use crate::mcp::{
MCP_DESCRIBE_META_FUNCTION_NAME_PREFIX, MCP_INVOKE_META_FUNCTION_NAME_PREFIX,
MCP_SEARCH_META_FUNCTION_NAME_PREFIX, McpRegistry,
};
use crate::supervisor::Supervisor;
use crate::supervisor::escalation::EscalationQueue;
use crate::supervisor::mailbox::Inbox;
use crate::vault::{GlobalVault, Vault, create_vault_password_file, interpolate_secrets};
use anyhow::{Context, Result, anyhow, bail};
use fancy_regex::Regex;
@@ -207,6 +212,18 @@ pub struct Config {
pub agent: Option<Agent>,
#[serde(skip)]
pub(crate) tool_call_tracker: Option<ToolCallTracker>,
#[serde(skip)]
pub supervisor: Option<Arc<RwLock<Supervisor>>>,
#[serde(skip)]
pub parent_supervisor: Option<Arc<RwLock<Supervisor>>>,
#[serde(skip)]
pub self_agent_id: Option<String>,
#[serde(skip)]
pub current_depth: usize,
#[serde(skip)]
pub inbox: Option<Arc<Inbox>>,
#[serde(skip)]
pub root_escalation_queue: Option<Arc<EscalationQueue>>,
}
impl Default for Config {
@@ -280,6 +297,12 @@ impl Default for Config {
rag: None,
agent: None,
tool_call_tracker: Some(ToolCallTracker::default()),
supervisor: None,
parent_supervisor: None,
self_agent_id: None,
current_depth: 0,
inbox: None,
root_escalation_queue: None,
}
}
}
@@ -405,6 +428,14 @@ impl Config {
base_dir.join(env!("CARGO_CRATE_NAME"))
}
pub fn oauth_tokens_path() -> PathBuf {
Self::cache_path().join("oauth")
}
pub fn token_file(client_name: &str) -> PathBuf {
Self::oauth_tokens_path().join(format!("{client_name}_oauth_tokens.json"))
}
pub fn log_path() -> PathBuf {
Config::cache_path().join(format!("{}.log", env!("CARGO_CRATE_NAME")))
}
@@ -1818,8 +1849,17 @@ impl Config {
agent.agent_session().map(|v| v.to_string())
}
});
let should_init_supervisor = agent.can_spawn_agents();
let max_concurrent = agent.max_concurrent_agents();
let max_depth = agent.max_agent_depth();
config.write().rag = agent.rag();
config.write().agent = Some(agent);
if should_init_supervisor {
config.write().supervisor = Some(Arc::new(RwLock::new(Supervisor::new(
max_concurrent,
max_depth,
))));
}
if let Some(session) = session {
Config::use_session_safely(config, Some(&session), abort_signal).await?;
} else {
@@ -1871,6 +1911,10 @@ impl Config {
self.exit_session()?;
self.load_functions()?;
if self.agent.take().is_some() {
if let Some(ref supervisor) = self.supervisor {
supervisor.read().cancel_all();
}
self.supervisor.take();
self.rag.take();
self.discontinuous_last_message();
}
@@ -2029,6 +2073,20 @@ impl Config {
.collect();
}
if self.agent.is_none() {
let existing: HashSet<String> = functions.iter().map(|f| f.name.clone()).collect();
let builtin_functions: Vec<FunctionDeclaration> = self
.functions
.declarations()
.iter()
.filter(|v| {
v.name.starts_with(USER_FUNCTION_PREFIX) && !existing.contains(&v.name)
})
.cloned()
.collect();
functions.extend(builtin_functions);
}
if let Some(agent) = &self.agent {
let mut agent_functions: Vec<FunctionDeclaration> = agent
.functions()
@@ -2886,6 +2944,9 @@ impl Config {
fn load_functions(&mut self) -> Result<()> {
self.functions = Functions::init(self.visible_tools.as_ref().unwrap_or(&Vec::new()))?;
if self.working_mode.is_repl() {
self.functions.append_user_interaction_functions();
}
Ok(())
}
+129
View File
@@ -0,0 +1,129 @@
use indoc::indoc;
pub(in crate::config) const DEFAULT_TODO_INSTRUCTIONS: &str = indoc! {"
## Task Tracking
You have built-in task tracking tools. Use them to track your progress:
- `todo__init`: Initialize a todo list with a goal. Call this at the start of every multi-step task.
- `todo__add`: Add individual tasks. Add all planned steps before starting work.
- `todo__done`: Mark a task done by id. Call this immediately after completing each step.
- `todo__list`: Show the current todo list.
RULES:
- Always create a todo list before starting work.
- Mark each task done as soon as you finish it; do not batch.
- If you stop with incomplete tasks, the system will automatically prompt you to continue."
};
pub(in crate::config) const DEFAULT_SPAWN_INSTRUCTIONS: &str = indoc! {"
## Agent Spawning System
You have built-in tools for spawning and managing subagents. These run **in parallel** as
background tasks inside the same process; no shell overhead, true concurrency.
### Available Agent Tools
| Tool | Purpose |
|------|----------|
| `agent__spawn` | Spawn a subagent in the background. Returns an `id` immediately. |
| `agent__check` | Non-blocking check: is the agent done yet? Returns PENDING or result. |
| `agent__collect` | Blocking wait: wait for an agent to finish, return its output. |
| `agent__list` | List all spawned agents and their status. |
| `agent__cancel` | Cancel a running agent by ID. |
| `agent__task_create` | Create a task in the dependency-aware task queue. |
| `agent__task_list` | List all tasks and their status/dependencies. |
| `agent__task_complete` | Mark a task done; returns any newly unblocked tasks. Auto-dispatches agents for tasks with a designated agent. |
| `agent__task_fail` | Mark a task as failed. Dependents remain blocked. |
### Core Pattern: Spawn -> Continue -> Collect
```
# 1. Spawn agents in parallel
agent__spawn --agent explore --prompt \"Find auth middleware patterns in src/\"
agent__spawn --agent explore --prompt \"Find error handling patterns in src/\"
# Both return IDs immediately, e.g. agent_explore_a1b2c3d4, agent_explore_e5f6g7h8
# 2. Continue your own work while they run (or spawn more agents)
# 3. Check if done (non-blocking)
agent__check --id agent_explore_a1b2c3d4
# 4. Collect results when ready (blocking)
agent__collect --id agent_explore_a1b2c3d4
agent__collect --id agent_explore_e5f6g7h8
```
### Parallel Spawning (DEFAULT for multi-agent work)
When a task needs multiple agents, **spawn them all at once**, then collect:
```
# Spawn explore and oracle simultaneously
agent__spawn --agent explore --prompt \"Find all database query patterns\"
agent__spawn --agent oracle --prompt \"Evaluate pros/cons of connection pooling approaches\"
# Collect both results
agent__collect --id <explore_id>
agent__collect --id <oracle_id>
```
**NEVER spawn sequentially when tasks are independent.** Parallel is always better.
### Task Queue (for complex dependency chains)
When tasks have ordering requirements, use the task queue:
```
# Create tasks with dependencies (optional: auto-dispatch with --agent)
agent__task_create --subject \"Explore existing patterns\"
agent__task_create --subject \"Implement feature\" --blocked_by [\"task_1\"] --agent coder --prompt \"Implement based on patterns found\"
agent__task_create --subject \"Write tests\" --blocked_by [\"task_2\"]
# Check what's runnable
agent__task_list
# After completing a task, mark it done to unblock dependents
# If dependents have --agent set, they auto-dispatch
agent__task_complete --task_id task_1
```
### Escalation Handling
Child agents may need user input but cannot prompt the user directly. When this happens,
you will see `pending_escalations` in your tool results listing blocked children and their questions.
| Tool | Purpose |
|------|----------|
| `agent__reply_escalation` | Unblock a child agent by answering its escalated question. |
When you see a pending escalation:
1. Read the child's question and options.
2. If you can answer from context, call `agent__reply_escalation` with your answer.
3. If you need the user's input, call the appropriate `user__*` tool yourself, then relay the answer via `agent__reply_escalation`.
4. **Respond promptly**; the child agent is blocked and waiting (5-minute timeout).
"};
pub(in crate::config) const DEFAULT_TEAMMATE_INSTRUCTIONS: &str = indoc! {"
## Teammate Messaging
You have tools to communicate with other agents running alongside you:
- `agent__send_message --id <agent_id> --message \"...\"`: Send a message to a sibling or parent agent.
- `agent__check_inbox`: Check for messages sent to you by other agents.
If you are working alongside other agents (e.g. reviewing different files, exploring different areas):
- **Check your inbox** before finalizing your work to incorporate any cross-cutting findings from teammates.
- **Send messages** to teammates when you discover something that affects their work.
- Messages are delivered to the agent's inbox and read on their next `check_inbox` call."
};
pub(in crate::config) const DEFAULT_USER_INTERACTION_INSTRUCTIONS: &str = indoc! {"
## User Interaction
You have built-in tools to interact with the user directly:
- `user__ask --question \"...\" --options [\"A\", \"B\", \"C\"]`: Present a selection prompt. Returns the chosen option.
- `user__confirm --question \"...\"`: Ask a yes/no question. Returns \"yes\" or \"no\".
- `user__input --question \"...\"`: Request free-form text input from the user.
- `user__checkbox --question \"...\" --options [\"A\", \"B\", \"C\"]`: Multi-select prompt. Returns an array of selected options.
Use these tools when you need user decisions, preferences, or clarification.
If you are running as a subagent, these questions are automatically escalated to the root agent for resolution."
};
+116 -8
View File
@@ -1,4 +1,6 @@
pub(crate) mod supervisor;
pub(crate) mod todo;
pub(crate) mod user_interaction;
use crate::{
config::{Agent, Config, GlobalConfig},
@@ -20,7 +22,7 @@ use serde_json::{Value, json};
use std::collections::VecDeque;
use std::ffi::OsStr;
use std::fs::File;
use std::io::Write;
use std::io::{Read, Write};
use std::{
collections::{HashMap, HashSet},
env, fs, io,
@@ -28,7 +30,9 @@ use std::{
process::{Command, Stdio},
};
use strum_macros::AsRefStr;
use supervisor::SUPERVISOR_FUNCTION_PREFIX;
use todo::TODO_FUNCTION_PREFIX;
use user_interaction::USER_FUNCTION_PREFIX;
#[derive(Embed)]
#[folder = "assets/functions/"]
@@ -119,6 +123,34 @@ pub async fn eval_tool_calls(
if is_all_null {
output = vec![];
}
if !output.is_empty() {
let (has_escalations, summary) = {
let cfg = config.read();
if cfg.current_depth == 0
&& let Some(ref queue) = cfg.root_escalation_queue
&& queue.has_pending()
{
(true, queue.pending_summary())
} else {
(false, vec![])
}
};
if has_escalations {
let notification = json!({
"pending_escalations": summary,
"instruction": "Child agents are BLOCKED waiting for your reply. Call agent__reply_escalation for each pending escalation to unblock them."
});
let synthetic_call = ToolCall::new(
"__escalation_notification".to_string(),
json!({}),
Some("escalation_check".to_string()),
);
output.push(ToolResult::new(synthetic_call, notification));
}
}
Ok(output)
}
@@ -269,6 +301,23 @@ impl Functions {
self.declarations.extend(todo::todo_function_declarations());
}
pub fn append_supervisor_functions(&mut self) {
self.declarations
.extend(supervisor::supervisor_function_declarations());
self.declarations
.extend(supervisor::escalation_function_declarations());
}
pub fn append_teammate_functions(&mut self) {
self.declarations
.extend(supervisor::teammate_function_declarations());
}
pub fn append_user_interaction_functions(&mut self) {
self.declarations
.extend(user_interaction::user_interaction_function_declarations());
}
pub fn clear_mcp_meta_functions(&mut self) {
self.declarations.retain(|d| {
!d.name.starts_with(MCP_INVOKE_META_FUNCTION_NAME_PREFIX)
@@ -849,7 +898,7 @@ impl ToolCall {
let prompt = format!("Call {cmd_name} {}", cmd_args.join(" "));
if *IS_STDOUT_TERMINAL {
if *IS_STDOUT_TERMINAL && config.read().current_depth == 0 {
println!("{}", dimmed_text(&prompt));
}
@@ -886,6 +935,24 @@ impl ToolCall {
json!({"tool_call_error": error_msg})
})
}
_ if cmd_name.starts_with(SUPERVISOR_FUNCTION_PREFIX) => {
supervisor::handle_supervisor_tool(config, &cmd_name, &json_data)
.await
.unwrap_or_else(|e| {
let error_msg = format!("Supervisor tool failed: {e}");
eprintln!("{}", warning_text(&format!("⚠️ {error_msg} ⚠️")));
json!({"tool_call_error": error_msg})
})
}
_ if cmd_name.starts_with(USER_FUNCTION_PREFIX) => {
user_interaction::handle_user_tool(config, &cmd_name, &json_data)
.await
.unwrap_or_else(|e| {
let error_msg = format!("User interaction failed: {e}");
eprintln!("{}", warning_text(&format!("⚠️ {error_msg} ⚠️")));
json!({"tool_call_error": error_msg})
})
}
_ => match run_llm_function(cmd_name, cmd_args, envs, agent_name) {
Ok(Some(contents)) => serde_json::from_str(&contents)
.ok()
@@ -997,7 +1064,7 @@ impl ToolCall {
function_name.clone(),
function_name,
vec![],
Default::default(),
agent.variable_envs(),
))
}
}
@@ -1050,18 +1117,59 @@ pub fn run_llm_function(
#[cfg(windows)]
let cmd_name = polyfill_cmd_name(&cmd_name, &bin_dirs);
let output = Command::new(&cmd_name)
envs.insert("CLICOLOR_FORCE".into(), "1".into());
envs.insert("FORCE_COLOR".into(), "1".into());
let mut child = Command::new(&cmd_name)
.args(&cmd_args)
.envs(envs)
.stdout(Stdio::inherit())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.and_then(|child| child.wait_with_output())
.map_err(|err| anyhow!("Unable to run {command_name}, {err}"))?;
let exit_code = output.status.code().unwrap_or_default();
let stdout = child.stdout.take().expect("Failed to capture stdout");
let mut stderr = child.stderr.take().expect("Failed to capture stderr");
let stdout_thread = std::thread::spawn(move || {
let mut buffer = [0; 1024];
let mut reader = stdout;
let mut out = io::stdout();
while let Ok(n) = reader.read(&mut buffer) {
if n == 0 {
break;
}
let chunk = &buffer[0..n];
let mut last_pos = 0;
for (i, &byte) in chunk.iter().enumerate() {
if byte == b'\n' {
let _ = out.write_all(&chunk[last_pos..i]);
let _ = out.write_all(b"\r\n");
last_pos = i + 1;
}
}
if last_pos < n {
let _ = out.write_all(&chunk[last_pos..n]);
}
let _ = out.flush();
}
});
let stderr_thread = std::thread::spawn(move || {
let mut buf = Vec::new();
let _ = stderr.read_to_end(&mut buf);
buf
});
let status = child
.wait()
.map_err(|err| anyhow!("Unable to run {command_name}, {err}"))?;
let _ = stdout_thread.join();
let stderr_bytes = stderr_thread.join().unwrap_or_default();
let exit_code = status.code().unwrap_or_default();
if exit_code != 0 {
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
let stderr = String::from_utf8_lossy(&stderr_bytes).trim().to_string();
if !stderr.is_empty() {
eprintln!("{stderr}");
}
File diff suppressed because it is too large Load Diff
+1
View File
@@ -71,6 +71,7 @@ pub fn todo_function_declarations() -> Vec<FunctionDeclaration> {
description: "Display the current todo list with status of each item.".to_string(),
parameters: JsonSchema {
type_value: Some("object".to_string()),
properties: Some(IndexMap::new()),
..Default::default()
},
agent: false,
+272
View File
@@ -0,0 +1,272 @@
use super::{FunctionDeclaration, JsonSchema};
use crate::config::GlobalConfig;
use crate::supervisor::escalation::{EscalationRequest, new_escalation_id};
use anyhow::{Result, anyhow};
use indexmap::IndexMap;
use inquire::{Confirm, MultiSelect, Select, Text};
use serde_json::{Value, json};
use std::time::Duration;
use tokio::sync::oneshot;
pub const USER_FUNCTION_PREFIX: &str = "user__";
const DEFAULT_ESCALATION_TIMEOUT_SECS: u64 = 300;
pub fn user_interaction_function_declarations() -> Vec<FunctionDeclaration> {
vec![
FunctionDeclaration {
name: format!("{USER_FUNCTION_PREFIX}ask"),
description: "Ask the user to select one option from a list. Returns the selected option. Indicate the recommended choice if there is one.".to_string(),
parameters: JsonSchema {
type_value: Some("object".to_string()),
properties: Some(IndexMap::from([
(
"question".to_string(),
JsonSchema {
type_value: Some("string".to_string()),
description: Some("The question to present to the user".into()),
..Default::default()
},
),
(
"options".to_string(),
JsonSchema {
type_value: Some("array".to_string()),
description: Some("List of options for the user to choose from".into()),
items: Some(Box::new(JsonSchema {
type_value: Some("string".to_string()),
..Default::default()
})),
..Default::default()
},
),
])),
required: Some(vec!["question".to_string(), "options".to_string()]),
..Default::default()
},
agent: false,
},
FunctionDeclaration {
name: format!("{USER_FUNCTION_PREFIX}confirm"),
description: "Ask the user a yes/no question. Returns \"yes\" or \"no\".".to_string(),
parameters: JsonSchema {
type_value: Some("object".to_string()),
properties: Some(IndexMap::from([(
"question".to_string(),
JsonSchema {
type_value: Some("string".to_string()),
description: Some("The yes/no question to ask the user".into()),
..Default::default()
},
)])),
required: Some(vec!["question".to_string()]),
..Default::default()
},
agent: false,
},
FunctionDeclaration {
name: format!("{USER_FUNCTION_PREFIX}input"),
description: "Ask the user for free-form text input. Returns the text entered.".to_string(),
parameters: JsonSchema {
type_value: Some("object".to_string()),
properties: Some(IndexMap::from([(
"question".to_string(),
JsonSchema {
type_value: Some("string".to_string()),
description: Some("The prompt/question to display".into()),
..Default::default()
},
)])),
required: Some(vec!["question".to_string()]),
..Default::default()
},
agent: false,
},
FunctionDeclaration {
name: format!("{USER_FUNCTION_PREFIX}checkbox"),
description: "Ask the user to select one or more options from a list. Returns an array of selected options.".to_string(),
parameters: JsonSchema {
type_value: Some("object".to_string()),
properties: Some(IndexMap::from([
(
"question".to_string(),
JsonSchema {
type_value: Some("string".to_string()),
description: Some("The question to present to the user".into()),
..Default::default()
},
),
(
"options".to_string(),
JsonSchema {
type_value: Some("array".to_string()),
description: Some("List of options the user can select from (multiple selections allowed)".into()),
items: Some(Box::new(JsonSchema {
type_value: Some("string".to_string()),
..Default::default()
})),
..Default::default()
},
),
])),
required: Some(vec!["question".to_string(), "options".to_string()]),
..Default::default()
},
agent: false,
},
]
}
pub async fn handle_user_tool(
config: &GlobalConfig,
cmd_name: &str,
args: &Value,
) -> Result<Value> {
let action = cmd_name
.strip_prefix(USER_FUNCTION_PREFIX)
.unwrap_or(cmd_name);
let depth = config.read().current_depth;
if depth == 0 {
handle_direct(action, args)
} else {
handle_escalated(config, action, args).await
}
}
fn handle_direct(action: &str, args: &Value) -> Result<Value> {
match action {
"ask" => handle_direct_ask(args),
"confirm" => handle_direct_confirm(args),
"input" => handle_direct_input(args),
"checkbox" => handle_direct_checkbox(args),
_ => Err(anyhow!("Unknown user interaction: {action}")),
}
}
fn handle_direct_ask(args: &Value) -> Result<Value> {
let question = args
.get("question")
.and_then(Value::as_str)
.ok_or_else(|| anyhow!("'question' is required"))?;
let options = parse_options(args)?;
let answer = Select::new(question, options).prompt()?;
Ok(json!({ "answer": answer }))
}
fn handle_direct_confirm(args: &Value) -> Result<Value> {
let question = args
.get("question")
.and_then(Value::as_str)
.ok_or_else(|| anyhow!("'question' is required"))?;
let answer = Confirm::new(question).with_default(true).prompt()?;
Ok(json!({ "answer": if answer { "yes" } else { "no" } }))
}
fn handle_direct_input(args: &Value) -> Result<Value> {
let question = args
.get("question")
.and_then(Value::as_str)
.ok_or_else(|| anyhow!("'question' is required"))?;
let answer = Text::new(question).prompt()?;
Ok(json!({ "answer": answer }))
}
fn handle_direct_checkbox(args: &Value) -> Result<Value> {
let question = args
.get("question")
.and_then(Value::as_str)
.ok_or_else(|| anyhow!("'question' is required"))?;
let options = parse_options(args)?;
let answers = MultiSelect::new(question, options).prompt()?;
Ok(json!({ "answers": answers }))
}
async fn handle_escalated(config: &GlobalConfig, action: &str, args: &Value) -> Result<Value> {
let question = args
.get("question")
.and_then(Value::as_str)
.ok_or_else(|| anyhow!("'question' is required"))?
.to_string();
let options: Option<Vec<String>> = args.get("options").and_then(Value::as_array).map(|arr| {
arr.iter()
.filter_map(Value::as_str)
.map(String::from)
.collect()
});
let (from_agent_id, from_agent_name, root_queue, timeout_secs) = {
let cfg = config.read();
let agent_id = cfg
.self_agent_id
.clone()
.unwrap_or_else(|| "unknown".to_string());
let agent_name = cfg
.agent
.as_ref()
.map(|a| a.name().to_string())
.unwrap_or_else(|| "unknown".to_string());
let queue = cfg
.root_escalation_queue
.clone()
.ok_or_else(|| anyhow!("No escalation queue available; cannot reach parent agent"))?;
let timeout = cfg
.agent
.as_ref()
.map(|a| a.escalation_timeout())
.unwrap_or(DEFAULT_ESCALATION_TIMEOUT_SECS);
(agent_id, agent_name, queue, timeout)
};
let escalation_id = new_escalation_id();
let (tx, rx) = oneshot::channel();
let request = EscalationRequest {
id: escalation_id.clone(),
from_agent_id,
from_agent_name: from_agent_name.clone(),
question: format!("[{action}] {question}"),
options,
reply_tx: tx,
};
root_queue.submit(request);
let timeout = Duration::from_secs(timeout_secs);
match tokio::time::timeout(timeout, rx).await {
Ok(Ok(reply)) => Ok(json!({ "answer": reply })),
Ok(Err(_)) => Ok(json!({
"error": "Escalation was cancelled. The parent agent dropped the request",
"fallback": "Make your best judgment and proceed",
})),
Err(_) => Ok(json!({
"error": format!(
"Escalation timed out after {timeout_secs} seconds waiting for user response"
),
"fallback": "Make your best judgment and proceed",
})),
}
}
fn parse_options(args: &Value) -> Result<Vec<String>> {
args.get("options")
.and_then(Value::as_array)
.map(|arr| {
arr.iter()
.filter_map(Value::as_str)
.map(String::from)
.collect()
})
.ok_or_else(|| anyhow!("'options' is required and must be an array of strings"))
}
+43 -3
View File
@@ -9,13 +9,14 @@ mod repl;
mod utils;
mod mcp;
mod parsers;
mod supervisor;
mod vault;
#[macro_use]
extern crate log;
use crate::client::{
ModelType, call_chat_completions, call_chat_completions_streaming, list_models,
ModelType, call_chat_completions, call_chat_completions_streaming, list_models, oauth,
};
use crate::config::{
Agent, CODE_ROLE, Config, EXPLAIN_SHELL_ROLE, GlobalConfig, Input, SHELL_ROLE,
@@ -28,15 +29,17 @@ use crate::utils::*;
use crate::cli::Cli;
use crate::vault::Vault;
use anyhow::{Result, bail};
use anyhow::{Result, anyhow, bail};
use clap::{CommandFactory, Parser};
use clap_complete::CompleteEnv;
use inquire::Text;
use client::ClientConfig;
use inquire::{Select, Text};
use log::LevelFilter;
use log4rs::append::console::ConsoleAppender;
use log4rs::append::file::FileAppender;
use log4rs::config::{Appender, Logger, Root};
use log4rs::encode::pattern::PatternEncoder;
use oauth::OAuthProvider;
use parking_lot::RwLock;
use std::path::PathBuf;
use std::{env, mem, process, sync::Arc};
@@ -80,6 +83,13 @@ async fn main() -> Result<()> {
let log_path = setup_logger()?;
if let Some(client_arg) = &cli.authenticate {
let config = Config::init_bare()?;
let (client_name, provider) = resolve_oauth_client(client_arg.as_deref(), &config.clients)?;
oauth::run_oauth_flow(&*provider, &client_name).await?;
return Ok(());
}
if vault_flags {
return Vault::handle_vault_flags(cli, Config::init_bare()?);
}
@@ -503,3 +513,33 @@ fn init_console_logger(
.build(Root::builder().appender("console").build(root_log_level))
.unwrap()
}
fn resolve_oauth_client(
explicit: Option<&str>,
clients: &[ClientConfig],
) -> Result<(String, Box<dyn OAuthProvider>)> {
if let Some(name) = explicit {
let provider_type = oauth::resolve_provider_type(name, clients)
.ok_or_else(|| anyhow!("Client '{name}' not found or doesn't support OAuth"))?;
let provider = oauth::get_oauth_provider(provider_type).unwrap();
return Ok((name.to_string(), provider));
}
let candidates = oauth::list_oauth_capable_clients(clients);
match candidates.len() {
0 => bail!("No OAuth-capable clients configured."),
1 => {
let name = &candidates[0];
let provider_type = oauth::resolve_provider_type(name, clients).unwrap();
let provider = oauth::get_oauth_provider(provider_type).unwrap();
Ok((name.clone(), provider))
}
_ => {
let choice =
Select::new("Select a client to authenticate:", candidates.clone()).prompt()?;
let provider_type = oauth::resolve_provider_type(&choice, clients).unwrap();
let provider = oauth::get_oauth_provider(provider_type).unwrap();
Ok((choice, provider))
}
}
}
+80 -53
View File
@@ -6,7 +6,7 @@ use bm25::{Document, Language, SearchEngine, SearchEngineBuilder};
use futures_util::future::BoxFuture;
use futures_util::{StreamExt, TryStreamExt, stream};
use indoc::formatdoc;
use rmcp::model::{CallToolRequestParam, CallToolResult};
use rmcp::model::{CallToolRequestParams, CallToolResult};
use rmcp::service::RunningService;
use rmcp::transport::TokioChildProcess;
use rmcp::{RoleClient, ServiceExt};
@@ -158,27 +158,31 @@ impl McpRegistry {
}
pub async fn reinit(
registry: McpRegistry,
mut registry: McpRegistry,
enabled_mcp_servers: Option<String>,
abort_signal: AbortSignal,
) -> Result<Self> {
debug!("Reinitializing MCP registry");
debug!("Stopping all MCP servers");
let mut new_registry = abortable_run_with_spinner(
registry.stop_all_servers(),
"Stopping MCP servers",
let desired_ids = registry.resolve_server_ids(enabled_mcp_servers.clone());
let desired_set: HashSet<String> = desired_ids.iter().cloned().collect();
debug!("Stopping unused MCP servers");
abortable_run_with_spinner(
registry.stop_unused_servers(&desired_set),
"Stopping unused MCP servers",
abort_signal.clone(),
)
.await?;
abortable_run_with_spinner(
new_registry.start_select_mcp_servers(enabled_mcp_servers),
registry.start_select_mcp_servers(enabled_mcp_servers),
"Loading MCP servers",
abort_signal,
)
.await?;
Ok(new_registry)
Ok(registry)
}
async fn start_select_mcp_servers(
@@ -192,43 +196,30 @@ impl McpRegistry {
return Ok(());
}
if let Some(servers) = enabled_mcp_servers {
debug!("Starting selected MCP servers: {:?}", servers);
let config = self
.config
.as_ref()
.with_context(|| "MCP Config not defined. Cannot start servers")?;
let mcp_servers = config.mcp_servers.clone();
let desired_ids = self.resolve_server_ids(enabled_mcp_servers);
let ids_to_start: Vec<String> = desired_ids
.into_iter()
.filter(|id| !self.servers.contains_key(id))
.collect();
let enabled_servers: HashSet<String> =
servers.split(',').map(|s| s.trim().to_string()).collect();
let server_ids: Vec<String> = if servers == "all" {
mcp_servers.into_keys().collect()
} else {
mcp_servers
.into_keys()
.filter(|id| enabled_servers.contains(id))
.collect()
};
if ids_to_start.is_empty() {
return Ok(());
}
let results: Vec<(String, Arc<_>, ServerCatalog)> = stream::iter(
server_ids
.into_iter()
.map(|id| async { self.start_server(id).await }),
)
.buffer_unordered(num_cpus::get())
.try_collect()
.await?;
debug!("Starting selected MCP servers: {:?}", ids_to_start);
self.servers = results
.clone()
let results: Vec<(String, Arc<_>, ServerCatalog)> = stream::iter(
ids_to_start
.into_iter()
.map(|(id, server, _)| (id, server))
.collect();
self.catalogs = results
.into_iter()
.map(|(id, _, catalog)| (id, catalog))
.collect();
.map(|id| async { self.start_server(id).await }),
)
.buffer_unordered(num_cpus::get())
.try_collect()
.await?;
for (id, server, catalog) in results {
self.servers.insert(id.clone(), server);
self.catalogs.insert(id, catalog);
}
Ok(())
@@ -309,19 +300,53 @@ impl McpRegistry {
Ok((id.to_string(), service, catalog))
}
pub async fn stop_all_servers(mut self) -> Result<Self> {
for (id, server) in self.servers {
Arc::try_unwrap(server)
.map_err(|_| anyhow!("Failed to unwrap Arc for MCP server: {id}"))?
.cancel()
.await
.with_context(|| format!("Failed to stop MCP server: {id}"))?;
info!("Stopped MCP server: {id}");
fn resolve_server_ids(&self, enabled_mcp_servers: Option<String>) -> Vec<String> {
if let Some(config) = &self.config
&& let Some(servers) = enabled_mcp_servers
{
if servers == "all" {
config.mcp_servers.keys().cloned().collect()
} else {
let enabled_servers: HashSet<String> =
servers.split(',').map(|s| s.trim().to_string()).collect();
config
.mcp_servers
.keys()
.filter(|id| enabled_servers.contains(*id))
.cloned()
.collect()
}
} else {
vec![]
}
}
pub async fn stop_unused_servers(&mut self, keep_ids: &HashSet<String>) -> Result<()> {
let mut ids_to_remove = Vec::new();
for (id, _) in self.servers.iter() {
if !keep_ids.contains(id) {
ids_to_remove.push(id.clone());
}
}
self.servers = HashMap::new();
Ok(self)
for id in ids_to_remove {
if let Some(server) = self.servers.remove(&id) {
match Arc::try_unwrap(server) {
Ok(server_inner) => {
server_inner
.cancel()
.await
.with_context(|| format!("Failed to stop MCP server: {id}"))?;
info!("Stopped MCP server: {id}");
}
Err(_) => {
info!("Detaching from MCP server: {id} (still in use)");
}
}
self.catalogs.remove(&id);
}
}
Ok(())
}
pub fn list_started_servers(&self) -> Vec<String> {
@@ -395,9 +420,11 @@ impl McpRegistry {
let tool = tool.to_owned();
Box::pin(async move {
let server = server?;
let call_tool_request = CallToolRequestParam {
let call_tool_request = CallToolRequestParams {
name: Cow::Owned(tool.to_owned()),
arguments: arguments.as_object().cloned(),
meta: None,
task: None,
};
let result = server.call_tool(call_tool_request).await?;
+20 -3
View File
@@ -6,7 +6,7 @@ use self::completer::ReplCompleter;
use self::highlighter::ReplHighlighter;
use self::prompt::ReplPrompt;
use crate::client::{call_chat_completions, call_chat_completions_streaming};
use crate::client::{call_chat_completions, call_chat_completions_streaming, init_client, oauth};
use crate::config::{
AgentVariables, AssertState, Config, GlobalConfig, Input, LastMessage, StateFlags,
macro_execute,
@@ -17,6 +17,7 @@ use crate::utils::{
};
use crate::mcp::McpRegistry;
use crate::resolve_oauth_client;
use anyhow::{Context, Result, bail};
use crossterm::cursor::SetCursorStyle;
use fancy_regex::Regex;
@@ -32,10 +33,15 @@ use std::{env, mem, process};
const MENU_NAME: &str = "completion_menu";
static REPL_COMMANDS: LazyLock<[ReplCommand; 37]> = LazyLock::new(|| {
static REPL_COMMANDS: LazyLock<[ReplCommand; 38]> = LazyLock::new(|| {
[
ReplCommand::new(".help", "Show this help guide", AssertState::pass()),
ReplCommand::new(".info", "Show system info", AssertState::pass()),
ReplCommand::new(
".authenticate",
"Authenticate the current model client via OAuth (if configured)",
AssertState::pass(),
),
ReplCommand::new(
".edit config",
"Modify configuration file",
@@ -421,6 +427,18 @@ pub async fn run_repl_command(
}
None => println!("Usage: .model <name>"),
},
".authenticate" => {
let client = init_client(config, None)?;
if !client.supports_oauth() {
bail!(
"Client '{}' doesn't either support OAuth or isn't configured to use it (i.e. uses an API key instead)",
client.name()
);
}
let clients = config.read().clients.clone();
let (client_name, provider) = resolve_oauth_client(Some(client.name()), &clients)?;
oauth::run_oauth_flow(&*provider, &client_name).await?;
}
".prompt" => match args {
Some(text) => {
config.write().use_prompt(text)?;
@@ -887,7 +905,6 @@ async fn ask(
if let Some(agent) = &cfg.agent {
agent.auto_continue_enabled()
&& agent.continuation_count() < agent.max_auto_continues()
&& !agent.is_stale_response(&output)
&& agent.todo_list().has_incomplete()
} else {
false
+80
View File
@@ -0,0 +1,80 @@
use fmt::{Debug, Formatter};
use serde_json::{Value, json};
use std::collections::HashMap;
use std::fmt;
use tokio::sync::oneshot;
use uuid::Uuid;
pub struct EscalationRequest {
pub id: String,
pub from_agent_id: String,
pub from_agent_name: String,
pub question: String,
pub options: Option<Vec<String>>,
pub reply_tx: oneshot::Sender<String>,
}
pub struct EscalationQueue {
pending: parking_lot::Mutex<HashMap<String, EscalationRequest>>,
}
impl EscalationQueue {
pub fn new() -> Self {
Self {
pending: parking_lot::Mutex::new(HashMap::new()),
}
}
pub fn submit(&self, request: EscalationRequest) -> String {
let id = request.id.clone();
self.pending.lock().insert(id.clone(), request);
id
}
pub fn take(&self, escalation_id: &str) -> Option<EscalationRequest> {
self.pending.lock().remove(escalation_id)
}
pub fn pending_summary(&self) -> Vec<Value> {
self.pending
.lock()
.values()
.map(|r| {
let mut entry = json!({
"escalation_id": r.id,
"from_agent_id": r.from_agent_id,
"from_agent_name": r.from_agent_name,
"question": r.question,
});
if let Some(ref options) = r.options {
entry["options"] = json!(options);
}
entry
})
.collect()
}
pub fn has_pending(&self) -> bool {
!self.pending.lock().is_empty()
}
}
impl Default for EscalationQueue {
fn default() -> Self {
Self::new()
}
}
impl Debug for EscalationQueue {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let count = self.pending.lock().len();
f.debug_struct("EscalationQueue")
.field("pending_count", &count)
.finish()
}
}
pub fn new_escalation_id() -> String {
let short = &Uuid::new_v4().to_string()[..8];
format!("esc_{short}")
}
+60
View File
@@ -0,0 +1,60 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Envelope {
pub from: String,
pub to: String,
pub payload: EnvelopePayload,
pub timestamp: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum EnvelopePayload {
Text { content: String },
TaskCompleted { task_id: String, summary: String },
ShutdownRequest { reason: String },
ShutdownApproved,
}
#[derive(Debug, Default)]
pub struct Inbox {
messages: parking_lot::Mutex<Vec<Envelope>>,
}
impl Inbox {
pub fn new() -> Self {
Self {
messages: parking_lot::Mutex::new(Vec::new()),
}
}
pub fn deliver(&self, envelope: Envelope) {
self.messages.lock().push(envelope);
}
pub fn drain(&self) -> Vec<Envelope> {
let mut msgs = {
let mut guard = self.messages.lock();
std::mem::take(&mut *guard)
};
msgs.sort_by_key(|e| match &e.payload {
EnvelopePayload::ShutdownRequest { .. } => 0,
EnvelopePayload::ShutdownApproved => 0,
EnvelopePayload::TaskCompleted { .. } => 1,
EnvelopePayload::Text { .. } => 2,
});
msgs
}
}
impl Clone for Inbox {
fn clone(&self) -> Self {
let messages = self.messages.lock().clone();
Self {
messages: parking_lot::Mutex::new(messages),
}
}
}
+128
View File
@@ -0,0 +1,128 @@
pub mod escalation;
pub mod mailbox;
pub mod taskqueue;
use crate::utils::AbortSignal;
use fmt::{Debug, Formatter};
use mailbox::Inbox;
use taskqueue::TaskQueue;
use anyhow::{Result, bail};
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use tokio::task::JoinHandle;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AgentExitStatus {
Completed,
Failed(String),
}
pub struct AgentResult {
pub id: String,
pub agent_name: String,
pub output: String,
pub exit_status: AgentExitStatus,
}
pub struct AgentHandle {
pub id: String,
pub agent_name: String,
pub depth: usize,
pub inbox: Arc<Inbox>,
pub abort_signal: AbortSignal,
pub join_handle: JoinHandle<Result<AgentResult>>,
}
pub struct Supervisor {
handles: HashMap<String, AgentHandle>,
task_queue: TaskQueue,
max_concurrent: usize,
max_depth: usize,
}
impl Supervisor {
pub fn new(max_concurrent: usize, max_depth: usize) -> Self {
Self {
handles: HashMap::new(),
task_queue: TaskQueue::new(),
max_concurrent,
max_depth,
}
}
pub fn active_count(&self) -> usize {
self.handles.len()
}
pub fn max_concurrent(&self) -> usize {
self.max_concurrent
}
pub fn max_depth(&self) -> usize {
self.max_depth
}
pub fn task_queue(&self) -> &TaskQueue {
&self.task_queue
}
pub fn task_queue_mut(&mut self) -> &mut TaskQueue {
&mut self.task_queue
}
pub fn register(&mut self, handle: AgentHandle) -> Result<()> {
if self.handles.len() >= self.max_concurrent {
bail!(
"Cannot spawn agent: at capacity ({}/{})",
self.handles.len(),
self.max_concurrent
);
}
if handle.depth > self.max_depth {
bail!(
"Cannot spawn agent: max depth exceeded ({}/{})",
handle.depth,
self.max_depth
);
}
self.handles.insert(handle.id.clone(), handle);
Ok(())
}
pub fn is_finished(&self, id: &str) -> Option<bool> {
self.handles.get(id).map(|h| h.join_handle.is_finished())
}
pub fn take(&mut self, id: &str) -> Option<AgentHandle> {
self.handles.remove(id)
}
pub fn inbox(&self, id: &str) -> Option<&Arc<Inbox>> {
self.handles.get(id).map(|h| &h.inbox)
}
pub fn list_agents(&self) -> Vec<(&str, &str)> {
self.handles
.values()
.map(|h| (h.id.as_str(), h.agent_name.as_str()))
.collect()
}
pub fn cancel_all(&self) {
for handle in self.handles.values() {
handle.abort_signal.set_ctrlc();
}
}
}
impl Debug for Supervisor {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("Supervisor")
.field("active_agents", &self.handles.len())
.field("max_concurrent", &self.max_concurrent)
.field("max_depth", &self.max_depth)
.finish()
}
}
+271
View File
@@ -0,0 +1,271 @@
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum TaskStatus {
Pending,
Blocked,
InProgress,
Completed,
Failed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskNode {
pub id: String,
pub subject: String,
pub description: String,
pub status: TaskStatus,
pub owner: Option<String>,
pub blocked_by: HashSet<String>,
pub blocks: HashSet<String>,
pub dispatch_agent: Option<String>,
pub prompt: Option<String>,
}
impl TaskNode {
pub fn new(
id: String,
subject: String,
description: String,
dispatch_agent: Option<String>,
prompt: Option<String>,
) -> Self {
Self {
id,
subject,
description,
status: TaskStatus::Pending,
owner: None,
blocked_by: HashSet::new(),
blocks: HashSet::new(),
dispatch_agent,
prompt,
}
}
pub fn is_runnable(&self) -> bool {
self.status == TaskStatus::Pending && self.blocked_by.is_empty()
}
}
#[derive(Debug, Clone, Default)]
pub struct TaskQueue {
tasks: HashMap<String, TaskNode>,
next_id: usize,
}
impl TaskQueue {
pub fn new() -> Self {
Self {
tasks: HashMap::new(),
next_id: 1,
}
}
pub fn create(
&mut self,
subject: String,
description: String,
dispatch_agent: Option<String>,
prompt: Option<String>,
) -> String {
let id = self.next_id.to_string();
self.next_id += 1;
let task = TaskNode::new(id.clone(), subject, description, dispatch_agent, prompt);
self.tasks.insert(id.clone(), task);
id
}
pub fn add_dependency(&mut self, task_id: &str, blocked_by: &str) -> Result<(), String> {
if task_id == blocked_by {
return Err("A task cannot depend on itself".into());
}
if !self.tasks.contains_key(blocked_by) {
return Err(format!("Dependency task '{blocked_by}' does not exist"));
}
if !self.tasks.contains_key(task_id) {
return Err(format!("Task '{task_id}' does not exist"));
}
if self.would_create_cycle(task_id, blocked_by) {
return Err(format!(
"Adding dependency {task_id} -> {blocked_by} would create a cycle"
));
}
if let Some(task) = self.tasks.get_mut(task_id) {
task.blocked_by.insert(blocked_by.to_string());
task.status = TaskStatus::Blocked;
}
if let Some(blocker) = self.tasks.get_mut(blocked_by) {
blocker.blocks.insert(task_id.to_string());
}
Ok(())
}
pub fn complete(&mut self, task_id: &str) -> Vec<String> {
let mut newly_runnable = Vec::new();
let dependents: Vec<String> = self
.tasks
.get(task_id)
.map(|t| t.blocks.iter().cloned().collect())
.unwrap_or_default();
if let Some(task) = self.tasks.get_mut(task_id) {
task.status = TaskStatus::Completed;
}
for dep_id in &dependents {
if let Some(dep) = self.tasks.get_mut(dep_id) {
dep.blocked_by.remove(task_id);
if dep.blocked_by.is_empty() && dep.status == TaskStatus::Blocked {
dep.status = TaskStatus::Pending;
newly_runnable.push(dep_id.clone());
}
}
}
newly_runnable
}
pub fn fail(&mut self, task_id: &str) {
if let Some(task) = self.tasks.get_mut(task_id) {
task.status = TaskStatus::Failed;
}
}
pub fn claim(&mut self, task_id: &str, owner: &str) -> bool {
if let Some(task) = self.tasks.get_mut(task_id)
&& task.is_runnable()
&& task.owner.is_none()
{
task.owner = Some(owner.to_string());
task.status = TaskStatus::InProgress;
return true;
}
false
}
pub fn get(&self, task_id: &str) -> Option<&TaskNode> {
self.tasks.get(task_id)
}
pub fn list(&self) -> Vec<&TaskNode> {
let mut tasks: Vec<&TaskNode> = self.tasks.values().collect();
tasks.sort_by_key(|t| t.id.parse::<usize>().unwrap_or(0));
tasks
}
fn would_create_cycle(&self, task_id: &str, blocked_by: &str) -> bool {
let mut visited = HashSet::new();
let mut stack = vec![blocked_by.to_string()];
while let Some(current) = stack.pop() {
if current == task_id {
return true;
}
if visited.insert(current.clone())
&& let Some(task) = self.tasks.get(&current)
{
for dep in &task.blocked_by {
stack.push(dep.clone());
}
}
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_and_list() {
let mut queue = TaskQueue::new();
let id1 = queue.create(
"Research".into(),
"Research auth patterns".into(),
None,
None,
);
let id2 = queue.create("Implement".into(), "Write the code".into(), None, None);
assert_eq!(id1, "1");
assert_eq!(id2, "2");
assert_eq!(queue.list().len(), 2);
}
#[test]
fn test_dependency_and_completion() {
let mut queue = TaskQueue::new();
let id1 = queue.create("Step 1".into(), "".into(), None, None);
let id2 = queue.create("Step 2".into(), "".into(), None, None);
queue.add_dependency(&id2, &id1).unwrap();
assert!(queue.get(&id1).unwrap().is_runnable());
assert!(!queue.get(&id2).unwrap().is_runnable());
assert_eq!(queue.get(&id2).unwrap().status, TaskStatus::Blocked);
let unblocked = queue.complete(&id1);
assert_eq!(unblocked, vec![id2.clone()]);
assert!(queue.get(&id2).unwrap().is_runnable());
}
#[test]
fn test_fan_in_dependency() {
let mut queue = TaskQueue::new();
let id1 = queue.create("A".into(), "".into(), None, None);
let id2 = queue.create("B".into(), "".into(), None, None);
let id3 = queue.create("C (needs A and B)".into(), "".into(), None, None);
queue.add_dependency(&id3, &id1).unwrap();
queue.add_dependency(&id3, &id2).unwrap();
assert!(!queue.get(&id3).unwrap().is_runnable());
let unblocked = queue.complete(&id1);
assert!(unblocked.is_empty());
assert!(!queue.get(&id3).unwrap().is_runnable());
let unblocked = queue.complete(&id2);
assert_eq!(unblocked, vec![id3.clone()]);
assert!(queue.get(&id3).unwrap().is_runnable());
}
#[test]
fn test_cycle_detection() {
let mut queue = TaskQueue::new();
let id1 = queue.create("A".into(), "".into(), None, None);
let id2 = queue.create("B".into(), "".into(), None, None);
queue.add_dependency(&id2, &id1).unwrap();
let result = queue.add_dependency(&id1, &id2);
assert!(result.is_err());
assert!(result.unwrap_err().contains("cycle"));
}
#[test]
fn test_self_dependency_rejected() {
let mut queue = TaskQueue::new();
let id1 = queue.create("A".into(), "".into(), None, None);
let result = queue.add_dependency(&id1, &id1);
assert!(result.is_err());
}
#[test]
fn test_claim() {
let mut queue = TaskQueue::new();
let id1 = queue.create("Task".into(), "".into(), None, None);
assert!(queue.claim(&id1, "worker-1"));
assert!(!queue.claim(&id1, "worker-2"));
assert_eq!(queue.get(&id1).unwrap().status, TaskStatus::InProgress);
}
}
+22 -11
View File
@@ -6,7 +6,6 @@ use gman::providers::local::LocalProvider;
use indoc::formatdoc;
use inquire::validator::Validation;
use inquire::{Confirm, Password, PasswordDisplayMode, Text, min_length, required};
use std::borrow::Cow;
use std::path::PathBuf;
pub fn ensure_password_file_initialized(local_provider: &mut LocalProvider) -> Result<()> {
@@ -166,18 +165,30 @@ pub fn create_vault_password_file(vault: &mut Vault) -> Result<()> {
Ok(())
}
pub fn interpolate_secrets<'a>(content: &'a str, vault: &Vault) -> (Cow<'a, str>, Vec<String>) {
pub fn interpolate_secrets(content: &str, vault: &Vault) -> (String, Vec<String>) {
let mut missing_secrets = vec![];
let parsed_content = SECRET_RE.replace_all(content, |caps: &fancy_regex::Captures<'_>| {
let secret = vault.get_secret(caps[1].trim(), false);
match secret {
Ok(s) => s,
Err(_) => {
missing_secrets.push(caps[1].to_string());
"".to_string()
let parsed_content: String = content
.lines()
.map(|line| {
if line.trim_start().starts_with('#') {
return line.to_string();
}
}
});
SECRET_RE
.replace_all(line, |caps: &fancy_regex::Captures<'_>| {
let secret = vault.get_secret(caps[1].trim(), false);
match secret {
Ok(s) => s,
Err(_) => {
missing_secrets.push(caps[1].to_string());
"".to_string()
}
}
})
.to_string()
})
.collect::<Vec<_>>()
.join("\n");
(parsed_content, missing_secrets)
}