Initial commit
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
/target
|
||||
/tmp
|
||||
/.env
|
||||
*.log# Hestia CLI
|
||||
!cli/**
|
||||
Generated
+10
@@ -0,0 +1,10 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
# Zeppelin ignored files
|
||||
/ZeppelinRemoteNotebooks/
|
||||
@@ -0,0 +1,17 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ensure-no-gcloud-directory
|
||||
name: Ensure there's no gcloud directory in the commit
|
||||
entry: ./.hooks/pre-commit.sh
|
||||
language: script
|
||||
always_run: true
|
||||
files: .*
|
||||
stages: [pre-commit]
|
||||
|
||||
- id: run-approval-tests
|
||||
name: Run Hestia CI approval tests
|
||||
entry: ./.hooks/approval-tests.sh
|
||||
language: script
|
||||
files: .*
|
||||
stages: [pre-push]
|
||||
Generated
+5326
File diff suppressed because it is too large
Load Diff
+106
@@ -0,0 +1,106 @@
|
||||
[package]
|
||||
name = "loki"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Alex Clarke <alex.j.tusa@gmail.com>"]
|
||||
description = "An all-in-one, batteries included LLM CLI Tool"
|
||||
homepage = "https://github.com/Dark-Alex-17/loki"
|
||||
repository = "https://github.com/Dark-Alex-17/loki"
|
||||
categories = ["command-line-utilities"]
|
||||
keywords = ["chatgpt", "llm", "cli", "ai", "repl"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.69"
|
||||
bytes = "1.4.0"
|
||||
clap = { version = "4.5.40", features = ["cargo", "derive", "wrap_help"] }
|
||||
dirs = "6.0.0"
|
||||
futures-util = "0.3.29"
|
||||
inquire = "0.7.0"
|
||||
is-terminal = "0.4.9"
|
||||
reedline = "0.40.0"
|
||||
serde = { version = "1.0.152", features = ["derive"] }
|
||||
serde_json = { version = "1.0.93", features = ["preserve_order"] }
|
||||
serde_yaml = "0.9.17"
|
||||
tokio = { version = "1.34.0", features = ["rt", "time", "macros", "signal", "rt-multi-thread", "full"] }
|
||||
tokio-graceful = "0.2.2"
|
||||
tokio-stream = { version = "0.1.15", default-features = false, features = ["sync"] }
|
||||
crossterm = "0.28.1"
|
||||
chrono = "0.4.23"
|
||||
bincode = { version = "2.0.0", features = ["serde", "std"], default-features = false }
|
||||
parking_lot = "0.12.1"
|
||||
fancy-regex = "0.14.0"
|
||||
base64 = "0.22.0"
|
||||
nu-ansi-term = "0.50.0"
|
||||
async-trait = "0.1.74"
|
||||
textwrap = "0.16.0"
|
||||
ansi_colours = "1.2.2"
|
||||
reqwest-eventsource = "0.6.0"
|
||||
log = "0.4.28"
|
||||
log4rs = { version = "1.4.0", features = ["file_appender"] }
|
||||
shell-words = "1.1.0"
|
||||
sha2 = "0.10.8"
|
||||
unicode-width = "0.2.0"
|
||||
async-recursion = "1.1.1"
|
||||
http = "1.1.0"
|
||||
http-body-util = "0.1"
|
||||
hyper = { version = "1.0", features = ["full"] }
|
||||
hyper-util = { version = "0.1", features = ["server-auto", "client-legacy"] }
|
||||
time = { version = "0.3.36", features = ["macros"] }
|
||||
indexmap = { version = "2.2.6", features = ["serde"] }
|
||||
hmac = "0.12.1"
|
||||
aws-smithy-eventstream = "0.60.4"
|
||||
urlencoding = "2.1.3"
|
||||
unicode-segmentation = "1.11.0"
|
||||
json-patch = { version = "4.0.0", default-features = false }
|
||||
bitflags = "2.5.0"
|
||||
path-absolutize = "3.1.1"
|
||||
hnsw_rs = "0.3.0"
|
||||
rayon = "1.10.0"
|
||||
uuid = { version = "1.9.1", features = ["v4"] }
|
||||
scraper = { version = "0.23.1", default-features = false, features = ["deterministic"] }
|
||||
sys-locale = "0.3.1"
|
||||
html_to_markdown = "0.1.0"
|
||||
rust-embed = "8.5.0"
|
||||
os_info = { version = "3.8.2", default-features = false }
|
||||
bm25 = { version = "2.0.1", features = ["parallelism"] }
|
||||
which = "8.0.0"
|
||||
fuzzy-matcher = "0.3.7"
|
||||
terminal-colorsaurus = "0.4.8"
|
||||
duct = "1.0.0"
|
||||
argc = "1.23.0"
|
||||
strum_macros = "0.27.2"
|
||||
indoc = "2.0.6"
|
||||
rmcp = { version = "0.6.1", features = ["client", "transport-child-process"] }
|
||||
num_cpus = "1.17.0"
|
||||
rustpython-parser = "0.4.0"
|
||||
rustpython-ast = "0.4.0"
|
||||
colored = "3.0.0"
|
||||
clap_complete = { version = "4.5.58", features = ["unstable-dynamic"] }
|
||||
|
||||
[dependencies.reqwest]
|
||||
version = "0.12.0"
|
||||
features = ["json", "multipart", "socks", "rustls-tls", "rustls-tls-native-roots"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.syntect]
|
||||
version = "5.0.0"
|
||||
default-features = false
|
||||
features = ["parsing", "regex-onig", "plist-load"]
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
crossterm = { version = "0.28.1", features = ["use-dev-tty"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
arboard = { version = "3.3.0", default-features = false, features = ["wayland-data-control"] }
|
||||
|
||||
[target.'cfg(not(any(target_os = "linux", target_os = "android", target_os = "emscripten")))'.dependencies]
|
||||
arboard = { version = "3.3.0", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.0"
|
||||
rand = "0.9.0"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
strip = true
|
||||
opt-level = "z"
|
||||
@@ -1,2 +1,3 @@
|
||||
# loki
|
||||
An all-in-one, batteries included LLM CLI tool
|
||||
# Loki: An all-in-one, batteries included LLM CLI Tool
|
||||
|
||||
Loki is an all-in-one LLM CLI tool.
|
||||
@@ -0,0 +1,69 @@
|
||||
# Agent-specific configuration
|
||||
# Location `<loki-config-dir>/agents/<agent-name>/config.yaml`
|
||||
#
|
||||
# Available Environment Variables:
|
||||
# - <agent-name>_MODEL
|
||||
# - <agent-name>_TEMPERATURE
|
||||
# - <agent-name>_TOP_P
|
||||
# - <agent-name>_AGENT_PRELUDE
|
||||
# - <agent-name>_VARIABLES (as JSON array of key-value pairs; e.g. '[{"name": "username", "value": "alex"}]')
|
||||
|
||||
model: openai:gpt-4o # Specify the LLM to use
|
||||
temperature: null # Set default temperature parameter, range (0, 1)
|
||||
top_p: null # Set default top-p parameter, with a range of (0, 1) or (0, 2) depending on the model
|
||||
agent_prelude: null # Set a session to use when starting the agent. (e.g. temp, default); defaults to globally set agent_prelude
|
||||
name: <agent-name> # Name of the agent, used in the UI and logs
|
||||
description: <description> # Description of the agent, used in the UI
|
||||
version: 1 # Version of the agent
|
||||
mcp_servers: # Optional list of MCP servers to use for the agent
|
||||
- github # Corresponds to the name of an MCP server in the `<loki-config-dir>/functions/mcp.json` file
|
||||
global_tools: # Optional list of additional global tools to enable for the agent; i.e. not tools specific to the agent
|
||||
- web_search
|
||||
- fs
|
||||
- python
|
||||
dynamic_instructions: false # Whether to use dynamic instructions for the agent; if false, static instructions are used
|
||||
instructions: | # Static instructions for the agent; ignored if dynamic instructions are used
|
||||
You are a AI agent designed to demonstrate agent capabilities.
|
||||
|
||||
<tools>
|
||||
{{__tools__}}
|
||||
</tools>
|
||||
|
||||
<system>
|
||||
os: {{__os__}}
|
||||
os_family: {{__os_family__}}
|
||||
arch: {{__arch__}}
|
||||
shell: {{__shell__}}
|
||||
locale: {{__locale__}}
|
||||
now: {{__now__}}
|
||||
cwd: {{__cwd__}}
|
||||
</system>
|
||||
|
||||
<user>
|
||||
username: {{username}}
|
||||
</user>
|
||||
variables: # Optional variables for the agent
|
||||
# The variables defined above like {{__variable_name__}} are automatically available
|
||||
- name: username
|
||||
description: Your user name
|
||||
default: null # A default value for this variable; if null, the variable must be provided when starting the agent
|
||||
conversation_starters: # Optional conversation starters for the agent
|
||||
- What is the meaning of life?
|
||||
- Tell me a joke.
|
||||
- What is the capital of France?
|
||||
- How do I make a cake?
|
||||
- What is the best way to learn programming?
|
||||
- How do I improve my writing skills?
|
||||
- What are some good books to read?
|
||||
- How do I stay motivated?
|
||||
- What is the best way to exercise?
|
||||
- How do I manage my time effectively?
|
||||
documents: # Optional documents to load for the agent
|
||||
- git:/some/repo # Explicitly tell Loki to use the 'git' document loader using an absolute path
|
||||
- pdf:some-pdf-file.pdf # Explicitly tell Loki to use the 'pdf' document loader using a relative path
|
||||
- https://some-website.com/some-page
|
||||
- some-file.pdf # File with relative path to the <loki-config-dir>/agents/<agent-name> directory; i.e. file in the same directory as this config file
|
||||
- ~/some-file.txt # File in the user's home directory
|
||||
- /absolute/path/to/some-file.md # File with absolute path
|
||||
- /absolute/path/**/NAME.txt # Find all NAME.txt files in the specified directory and all its subdirectories
|
||||
- /absolute/path/to/*/README.md # Find all README.md files in all immediate subdirectories of the specified directory (depth=1)
|
||||
@@ -0,0 +1,317 @@
|
||||
# ---- llm ----
|
||||
model: openai:gpt-4o # Specify the LLM to use
|
||||
temperature: null # Set default temperature parameter (0, 1)
|
||||
top_p: null # Set default top-p parameter, with a range of (0, 1) or (0, 2) depending on the model
|
||||
|
||||
# ---- behavior ----
|
||||
stream: true # Controls whether to use the stream-style API.
|
||||
save: true # Indicates whether to persist the message
|
||||
keybindings: emacs # Choose keybinding style (emacs, vi)
|
||||
editor: null # Specifies the command used to edit input buffer or session. (e.g. vim, emacs, nano).
|
||||
wrap: no # Controls text wrapping (no, auto, <max-width>)
|
||||
wrap_code: false # Enables or disables wrapping of code blocks
|
||||
|
||||
# ---- function-calling ----
|
||||
# TODO reference docs for function calling
|
||||
function_calling: true # Enables or disables function calling (Globally).
|
||||
mapping_tools: # Alias for a tool or toolset
|
||||
fs: 'fs_cat,fs_ls,fs_mkdir,fs_rm,fs_write'
|
||||
use_tools: null # Which tools to use by default. (e.g. 'fs,web_search')
|
||||
|
||||
# ---- prelude ----
|
||||
repl_prelude: null # Set a default role or session for REPL mode (e.g. role:<name>, session:<name>, <session>:<role>)
|
||||
cmd_prelude: null # Set a default role or session for CMD mode (e.g. role:<name>, session:<name>, <session>:<role>)
|
||||
agent_prelude: null # Set a session to use when starting an agent (e.g. temp, default)
|
||||
|
||||
# ---- session ----
|
||||
# Controls the persistence of the session. if true, auto save; if false, not save; if null, asking the user
|
||||
save_session: null
|
||||
# Compress session when token count reaches or exceeds this threshold
|
||||
compress_threshold: 4000
|
||||
# Text prompt used for creating a concise summary of session message
|
||||
summarize_prompt: 'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
|
||||
# Text prompt used for including the summary of the entire session
|
||||
summary_prompt: 'This is a summary of the chat history as a recap: '
|
||||
|
||||
# ---- RAG ----
|
||||
# See [RAG-Guide](TODO /RAG-Guide link) for more details.
|
||||
rag_embedding_model: null # Specifies the embedding model used for context retrieval
|
||||
rag_reranker_model: null # Specifies the reranker model used for sorting retrieved documents
|
||||
rag_top_k: 5 # Specifies the number of documents to retrieve for answering queries
|
||||
rag_chunk_size: null # Defines the size of chunks for document processing in characters
|
||||
rag_chunk_overlap: null # Defines the overlap between chunks
|
||||
# Defines the query structure using variables like __CONTEXT__ and __INPUT__ to tailor searches to specific needs
|
||||
rag_template: |
|
||||
Answer the query based on the context while respecting the rules. (user query, some textual context and rules, all inside xml tags)
|
||||
|
||||
<context>
|
||||
__CONTEXT__
|
||||
</context>
|
||||
|
||||
<rules>
|
||||
- If you don't know, just say so.
|
||||
- If you are not sure, ask for clarification.
|
||||
- Answer in the same language as the user query.
|
||||
- If the context appears unreadable or of poor quality, tell the user then answer as best as you can.
|
||||
- If the answer is not in the context but you think you know the answer, explain that to the user then answer with your own knowledge.
|
||||
- Answer directly and without using xml tags.
|
||||
</rules>
|
||||
|
||||
<user_query>
|
||||
__INPUT__
|
||||
</user_query>
|
||||
|
||||
# Define document loaders to control how RAG and `.file`/`--file` load files of specific formats.
|
||||
document_loaders:
|
||||
# You can add custom loaders using the following syntax:
|
||||
# <file-extension>: <command-to-load-the-file>
|
||||
# Note: Use `$1` for input file and `$2` for output file. If `$2` is omitted, use stdout as output.
|
||||
pdf: 'pdftotext $1 -' # Load .pdf file, see https://poppler.freedesktop.org to set up pdftotext
|
||||
docx: 'pandoc --to plain $1' # Load .docx file, see https://pandoc.org to set up pandoc
|
||||
|
||||
# ---- appearance ----
|
||||
highlight: true # Controls syntax highlighting
|
||||
light_theme: false # Activates a light color theme when true. env: LOKI_LIGHT_THEME
|
||||
# Custom REPL left/right prompts, TODO reference docs for prompt variables
|
||||
left_prompt:
|
||||
'{color.green}{?session {?agent {agent}>}{session}{?role /}}{!session {?agent {agent}>}}{role}{?rag @{rag}}{color.cyan}{?session )}{!session >}{color.reset} '
|
||||
right_prompt:
|
||||
'{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'
|
||||
|
||||
# ---- misc ----
|
||||
serve_addr: 127.0.0.1:8000 # Server listening address
|
||||
user_agent: null # Set User-Agent HTTP header, use `auto` for loki/<current-version>
|
||||
save_shell_history: true # Whether to save shell execution command to the history file
|
||||
# URL to sync model changes from
|
||||
sync_models_url: https://raw.githubusercontent.com/Dark-Alex-17/loki/refs/heads/main/models.yaml
|
||||
|
||||
# ---- clients ----
|
||||
clients:
|
||||
# All clients have the following configuration:
|
||||
# - type: xxxx
|
||||
# name: xxxx # Only use it to distinguish clients with the same client type. Optional
|
||||
# models:
|
||||
# - name: xxxx # Chat model
|
||||
# max_input_tokens: 100000
|
||||
# supports_vision: true
|
||||
# supports_function_calling: true
|
||||
# - name: xxxx # Embedding model
|
||||
# type: embedding
|
||||
# default_chunk_size: 1500
|
||||
# max_batch_size: 100
|
||||
# - name: xxxx # Reranker model
|
||||
# type: reranker
|
||||
# patch: # Patch api
|
||||
# chat_completions: # Api type, possible values: chat_completions, embeddings, and rerank
|
||||
# <regex>: # The regex to match model names, e.g. '.*' 'gpt-4o' 'gpt-4o|gpt-4-.*'
|
||||
# url: '' # Patch request url
|
||||
# body: # Patch request body
|
||||
# <json>
|
||||
# headers: # Patch request headers
|
||||
# <key>: <value>
|
||||
# extra:
|
||||
# proxy: socks5://127.0.0.1:1080 # Set proxy
|
||||
# connect_timeout: 10 # Set timeout in seconds for connect to api
|
||||
|
||||
# See https://platform.openai.com/docs/quickstart
|
||||
- type: openai
|
||||
api_base: https://api.openai.com/v1 # Optional
|
||||
api_key: xxx
|
||||
organization_id: org-xxx # Optional
|
||||
|
||||
# For any platform compatible with OpenAI's API
|
||||
- type: openai-compatible
|
||||
name: ollama
|
||||
api_base: http://localhost:11434/v1
|
||||
api_key: xxx # Optional
|
||||
models:
|
||||
- name: deepseek-r1
|
||||
max_input_tokens: 131072
|
||||
- name: llama3.1
|
||||
max_input_tokens: 128000
|
||||
supports_function_calling: true
|
||||
- name: llama3.2-vision
|
||||
max_input_tokens: 131072
|
||||
supports_vision: true
|
||||
- name: nomic-embed-text
|
||||
type: embedding
|
||||
default_chunk_size: 1000
|
||||
max_batch_size: 50
|
||||
|
||||
# See https://ai.google.dev/docs
|
||||
- type: gemini
|
||||
api_base: https://generativelanguage.googleapis.com/v1beta
|
||||
api_key: xxx
|
||||
patch:
|
||||
chat_completions:
|
||||
'.*':
|
||||
body:
|
||||
safetySettings:
|
||||
- category: HARM_CATEGORY_HARASSMENT
|
||||
threshold: BLOCK_NONE
|
||||
- category: HARM_CATEGORY_HATE_SPEECH
|
||||
threshold: BLOCK_NONE
|
||||
- category: HARM_CATEGORY_SEXUALLY_EXPLICIT
|
||||
threshold: BLOCK_NONE
|
||||
- category: HARM_CATEGORY_DANGEROUS_CONTENT
|
||||
threshold: BLOCK_NONE
|
||||
|
||||
# See https://docs.anthropic.com/claude/reference/getting-started-with-the-api
|
||||
- type: claude
|
||||
api_base: https://api.anthropic.com/v1 # Optional
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.mistral.ai/
|
||||
- type: openai-compatible
|
||||
name: mistral
|
||||
api_base: https://api.mistral.ai/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.x.ai/docs
|
||||
- type: openai-compatible
|
||||
name: xai
|
||||
api_base: https://api.x.ai/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.ai21.com/docs/quickstart
|
||||
- type: openai-compatible
|
||||
name: ai12
|
||||
api_base: https://api.ai21.com/studio/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.cohere.com/docs/the-cohere-platform
|
||||
- type: cohere
|
||||
api_base: https://api.cohere.ai/v2 # Optional
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.perplexity.ai/docs/getting-started
|
||||
- type: openai-compatible
|
||||
name: perplexity
|
||||
api_base: https://api.perplexity.ai
|
||||
api_key: xxx
|
||||
|
||||
# See https://console.groq.com/docs/quickstart
|
||||
- type: openai-compatible
|
||||
name: groq
|
||||
api_base: https://api.groq.com/openai/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart
|
||||
- type: azure-openai
|
||||
api_base: https://{RESOURCE}.openai.azure.com
|
||||
api_key: xxx
|
||||
models:
|
||||
- name: gpt-4o # Model deployment name
|
||||
max_input_tokens: 128000
|
||||
supports_vision: true
|
||||
supports_function_calling: true
|
||||
|
||||
# See https://cloud.google.com/vertex-ai
|
||||
- type: vertexai
|
||||
project_id: xxx
|
||||
location: xxx
|
||||
# Specifies an application-default-credentials (adc) file
|
||||
# Run `gcloud auth application-default login` to init the adc file
|
||||
# see https://cloud.google.com/docs/authentication/external/set-up-adc
|
||||
adc_file: <gcloud-config-dir>/application_default_credentials.json> # Optional field
|
||||
patch:
|
||||
chat_completions:
|
||||
'gemini-.*':
|
||||
body:
|
||||
safetySettings:
|
||||
- category: HARM_CATEGORY_HARASSMENT
|
||||
threshold: BLOCK_ONLY_HIGH
|
||||
- category: HARM_CATEGORY_HATE_SPEECH
|
||||
threshold: BLOCK_ONLY_HIGH
|
||||
- category: HARM_CATEGORY_SEXUALLY_EXPLICIT
|
||||
threshold: BLOCK_ONLY_HIGH
|
||||
- category: HARM_CATEGORY_DANGEROUS_CONTENT
|
||||
threshold: BLOCK_ONLY_HIGH
|
||||
|
||||
# See https://docs.aws.amazon.com/bedrock/latest/userguide/
|
||||
- type: bedrock
|
||||
access_key_id: xxx
|
||||
secret_access_key: xxx
|
||||
region: xxx
|
||||
session_token: xxx # Optional, only needed for temporary credentials
|
||||
|
||||
# See https://developers.cloudflare.com/workers-ai/
|
||||
- type: openai-compatible
|
||||
name: cloudflare
|
||||
api_base: https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html
|
||||
- type: openai-compatible
|
||||
name: ernie
|
||||
api_base: https://qianfan.baidubce.com/v2
|
||||
api_key: xxx
|
||||
|
||||
# See https://dashscope.aliyun.com/
|
||||
- type: openai-compatible
|
||||
name: qianwen
|
||||
api_base: https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://cloud.tencent.com/product/hunyuan
|
||||
- type: openai-compatible
|
||||
name: hunyuan
|
||||
api_base: https://api.hunyuan.cloud.tencent.com/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://platform.moonshot.cn/docs/intro
|
||||
- type: openai-compatible
|
||||
name: moonshot
|
||||
api_base: https://api.moonshot.cn/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://platform.deepseek.com/api-docs/
|
||||
- type: openai-compatible
|
||||
name: deepseek
|
||||
api_base: https://api.deepseek.com
|
||||
api_key: xxx
|
||||
|
||||
# See https://open.bigmodel.cn/dev/howuse/introduction
|
||||
- type: openai-compatible
|
||||
name: zhipuai
|
||||
api_base: https://open.bigmodel.cn/api/paas/v4
|
||||
api_key: xxx
|
||||
|
||||
# See https://platform.minimaxi.com/document/Fast%20access
|
||||
- type: openai-compatible
|
||||
name: minimax
|
||||
api_base: https://api.minimax.chat/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://openrouter.ai/docs#quick-start
|
||||
- type: openai-compatible
|
||||
name: openrouter
|
||||
api_base: https://openrouter.ai/api/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://github.com/marketplace/models
|
||||
- type: openai-compatible
|
||||
name: github
|
||||
api_base: https://models.inference.ai.azure.com
|
||||
api_key: xxx
|
||||
|
||||
# See https://deepinfra.com/docs
|
||||
- type: openai-compatible
|
||||
name: deepinfra
|
||||
api_base: https://api.deepinfra.com/v1/openai
|
||||
api_key: xxx
|
||||
|
||||
|
||||
# ----- RAG dedicated -----
|
||||
|
||||
# See https://jina.ai
|
||||
- type: openai-compatible
|
||||
name: jina
|
||||
api_base: https://api.jina.ai/v1
|
||||
api_key: xxx
|
||||
|
||||
# See https://docs.voyageai.com/docs/introduction
|
||||
- type: openai-compatible
|
||||
name: voyageai
|
||||
api_base: https://api.voyageai.com/v1
|
||||
api_key: xxx
|
||||
+2100
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,10 @@
|
||||
tab_spaces = 4
|
||||
edition = "2024"
|
||||
reorder_imports = true
|
||||
imports_granularity = "Crate"
|
||||
group_imports = "StdExternalCrate"
|
||||
reorder_modules = true
|
||||
merge_derives = true
|
||||
use_field_init_shorthand = true
|
||||
format_macro_matchers = true
|
||||
format_macro_bodies = true
|
||||
Reference in New Issue
Block a user