diff --git a/assets/arena.html b/assets/arena.html new file mode 100644 index 0000000..92a71bb --- /dev/null +++ b/assets/arena.html @@ -0,0 +1,1106 @@ + + + + + + + + Loki LLM Arena + + + + + + + + + + + + + +
+
+ +
+
+
+ +
+ +
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/assets/functions/scripts/run-agent.py b/assets/functions/scripts/run-agent.py new file mode 100755 index 0000000..6125c5a --- /dev/null +++ b/assets/functions/scripts/run-agent.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python + +# Usage: ./{agent_name}.py + +import os +import re +import json +import sys +import importlib.util +from pathlib import Path + +def _ensure_cwd_venv(): + cwd = Path.cwd() + venv_dir = cwd / ".venv" + if not venv_dir.is_dir(): + return + + py = venv_dir / ("Scripts/python.exe" if os.name == "nt" else "bin/python") + if not py.exists(): + return + + if Path(sys.prefix).resolve() == venv_dir.resolve(): + return + + os.execv(str(py), [str(py)] + sys.argv) + +_ensure_cwd_venv() + + +def main(): + (agent_func, raw_data) = parse_argv() + agent_data = parse_raw_data(raw_data) + + root_dir = "{config_dir}" + setup_env(root_dir, agent_func) + + agent_tools_path = os.path.join(root_dir, "agents/{agent_name}/tools.py") + run(agent_tools_path, agent_func, agent_data) + + +def parse_raw_data(data): + if not data: + raise ValueError("No JSON data") + + try: + return json.loads(data) + except Exception: + raise ValueError("Invalid JSON data") + + +def parse_argv(): + agent_func = sys.argv[1] + agent_data = sys.argv[2] + + if (not agent_data) or (not agent_func): + print("Usage: ./{agent_name}.py ", file=sys.stderr) + sys.exit(1) + + return agent_func, agent_data + + +def setup_env(root_dir, agent_func): + load_env(os.path.join(root_dir, ".env")) + os.environ["LLM_ROOT_DIR"] = root_dir + os.environ["LLM_AGENT_NAME"] = "{agent_name}" + os.environ["LLM_AGENT_FUNC"] = agent_func + os.environ["LLM_AGENT_ROOT_DIR"] = os.path.join(root_dir, "agents", "{agent_name}") + os.environ["LLM_AGENT_CACHE_DIR"] = os.path.join(root_dir, "cache", "{agent_name}") + + +def load_env(file_path): + try: + with open(file_path, "r") as f: + lines = f.readlines() + except: + return + + env_vars = {} + + for line in lines: + line = line.strip() + if line.startswith("#") or not line: + continue + + key, *value_parts = line.split("=") + env_name = key.strip() + + if env_name not in os.environ: + env_value = "=".join(value_parts).strip() + if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")): + env_value = env_value[1:-1] + env_vars[env_name] = env_value + + os.environ.update(env_vars) + + +def run(agent_path, agent_func, agent_data): + spec = importlib.util.spec_from_file_location( + os.path.basename(agent_path), agent_path + ) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + + if not hasattr(mod, agent_func): + raise Exception(f"Not module function '{agent_func}' at '{agent_path}'") + + value = getattr(mod, agent_func)(**agent_data) + return_to_llm(value) + dump_result('{agent_name}' + f':{agent_func}') + + +def return_to_llm(value): + if value is None: + return + + if "LLM_OUTPUT" in os.environ: + writer = open(os.environ["LLM_OUTPUT"], "w") + else: + writer = sys.stdout + + value_type = type(value).__name__ + if value_type in ("str", "int", "float", "bool"): + writer.write(str(value)) + elif value_type == "dict" or value_type == "list": + value_str = json.dumps(value, indent=2) + assert value == json.loads(value_str) + writer.write(value_str) + + +def dump_result(name): + if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)): + return + + show_result = False + try: + if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name): + show_result = True + except: + pass + + if not show_result: + return + + try: + with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f: + data = f.read() + except: + return + + print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/assets/functions/scripts/run-agent.sh b/assets/functions/scripts/run-agent.sh new file mode 100644 index 0000000..fac28af --- /dev/null +++ b/assets/functions/scripts/run-agent.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash + +# Usage: ./{agent_name}.sh + +set -e + +main() { + root_dir="{config_dir}" + parse_argv "$@" + setup_env + tools_path="$root_dir/agents/{agent_name}/tools.sh" + run +} + +parse_argv() { + agent_func="$1" + agent_data="$2" + if [[ -z "$agent_data" ]] || [[ -z "$agent_func" ]]; then + die "usage: ./{agent_name}.sh " + fi +} + +setup_env() { + load_env "$root_dir/.env" + export LLM_ROOT_DIR="$root_dir" + export LLM_AGENT_NAME="{agent_name}" + export LLM_AGENT_FUNC="$agent_func" + export LLM_AGENT_ROOT_DIR="$LLM_ROOT_DIR/agents/{agent_name}" + export LLM_AGENT_CACHE_DIR="$LLM_ROOT_DIR/cache/{agent_name}" +} + +load_env() { + local env_file="$1" env_vars + if [[ -f "$env_file" ]]; then + while IFS='=' read -r key value; do + if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then + continue + fi + + if [[ -z "${!key+x}" ]]; then + env_vars="$env_vars $key=$value" + fi + done < <(cat "$env_file"; echo "") + + if [[ -n "$env_vars" ]]; then + eval "export $env_vars" + fi + fi +} + +run() { + if [[ -z "$agent_data" ]]; then + die "error: no JSON data" + fi + + if [[ "$OS" == "Windows_NT" ]]; then + set -o igncr + tools_path="$(cygpath -w "$tools_path")" + tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')" + fi + + jq_script="$(cat <<-'EOF' +def escape_shell_word: + tostring + | gsub("'"; "'\"'\"'") + | gsub("\n"; "'$'\\n''") + | "'\(.)'"; +def to_args: + to_entries | .[] | + (.key | split("_") | join("-")) as $key | + if .value | type == "array" then + .value | .[] | "--\($key) \(. | escape_shell_word)" + elif .value | type == "boolean" then + if .value then "--\($key)" else "" end + else + "--\($key) \(.value | escape_shell_word)" + end; +[ to_args ] | join(" ") +EOF +)" + args="$(echo "$agent_data" | jq -r "$jq_script" 2>/dev/null)" || { + die "error: invalid JSON data" + } + + if [[ -z "$LLM_OUTPUT" ]]; then + is_temp_llm_output=1 + # shellcheck disable=SC2155 + export LLM_OUTPUT="$(mktemp)" + fi + + eval "'$tools_path' '$agent_func' $args" + + if [[ "$is_temp_llm_output" -eq 1 ]]; then + cat "$LLM_OUTPUT" + else + dump_result "{agent_name}:${LLM_AGENT_FUNC}" + fi +} + +dump_result() { + if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then + return; + fi + + if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then + cat <&2 + exit 1 +} + +main "$@" + diff --git a/assets/functions/scripts/run-tool.py b/assets/functions/scripts/run-tool.py new file mode 100644 index 0000000..b7098a3 --- /dev/null +++ b/assets/functions/scripts/run-tool.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python + +# Usage: ./{function_name}.py + +import os +import re +import json +import sys +import importlib.util +from pathlib import Path + +def _ensure_cwd_venv(): + cwd = Path.cwd() + venv_dir = cwd / ".venv" + if not venv_dir.is_dir(): + return + + py = venv_dir / ("Scripts/python.exe" if os.name == "nt" else "bin/python") + if not py.exists(): + return + + if Path(sys.prefix).resolve() == venv_dir.resolve(): + return + + os.execv(str(py), [str(py)] + sys.argv) + +_ensure_cwd_venv() + + +def main(): + raw_data = parse_argv() + tool_data = parse_raw_data(raw_data) + + root_dir = "{config_dir}/functions" + setup_env(root_dir) + + tool_path = os.path.join(root_dir, "tools/{function_name}.py") + run(tool_path, "run", tool_data) + + +def parse_raw_data(data): + if not data: + raise ValueError("No JSON data") + + try: + return json.loads(data) + except Exception: + raise ValueError("Invalid JSON data") + + +def parse_argv(): + argv = sys.argv[:] + [None] * max(0, 2 - len(sys.argv)) + + tool_data = argv[1] + + if (not tool_data): + print("Usage: ./{function_name}.py ", file=sys.stderr) + sys.exit(1) + + return tool_data + + +def setup_env(root_dir): + load_env(os.path.join(root_dir, ".env")) + os.environ["LLM_ROOT_DIR"] = root_dir + os.environ["LLM_TOOL_NAME"] = "{function_name}" + os.environ["LLM_TOOL_CACHE_DIR"] = os.path.join(root_dir, "cache", "{function_name}") + + +def load_env(file_path): + try: + with open(file_path, "r") as f: + lines = f.readlines() + except: + return + + env_vars = {} + + for line in lines: + line = line.strip() + if line.startswith("#") or not line: + continue + + key, *value_parts = line.split("=") + env_name = key.strip() + + if env_name not in os.environ: + env_value = "=".join(value_parts).strip() + if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")): + env_value = env_value[1:-1] + env_vars[env_name] = env_value + + os.environ.update(env_vars) + + +def run(tool_path, tool_func, tool_data): + spec = importlib.util.spec_from_file_location( + os.path.basename(tool_path), tool_path + ) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + + if not hasattr(mod, tool_func): + raise Exception(f"No module function '{tool_func}' at '{tool_path}'") + + value = getattr(mod, tool_func)(**tool_data) + return_to_llm(value) + dump_result("{function_name}") + + +def return_to_llm(value): + if value is None: + return + + if "LLM_OUTPUT" in os.environ: + writer = open(os.environ["LLM_OUTPUT"], "w") + else: + writer = sys.stdout + + value_type = type(value).__name__ + if value_type in ("str", "int", "float", "bool"): + writer.write(str(value)) + elif value_type == "dict" or value_type == "list": + value_str = json.dumps(value, indent=2) + assert value == json.loads(value_str) + writer.write(value_str) + + +def dump_result(name): + if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)): + return + + show_result = False + try: + if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name): + show_result = True + except: + pass + + if not show_result: + return + + try: + with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f: + data = f.read() + except: + return + + print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/assets/functions/scripts/run-tool.sh b/assets/functions/scripts/run-tool.sh new file mode 100644 index 0000000..02b9bb6 --- /dev/null +++ b/assets/functions/scripts/run-tool.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash + +# Usage: ./{function_name}.sh + +set -e + +main() { + root_dir="{config_dir}/functions" + parse_argv "$@" + setup_env + tool_path="$root_dir/tools/{function_name}.sh" + run +} + +parse_argv() { + tool_data="$1" + if [[ -z "$tool_data" ]]; then + die "usage: ./{function_name}.sh " + fi +} + +setup_env() { + load_env "$root_dir/.env" + export LLM_ROOT_DIR="$root_dir" + export LLM_TOOL_NAME="{function_name}" + export LLM_TOOL_CACHE_DIR="$LLM_ROOT_DIR/cache/{function_name}" +} + +load_env() { + local env_file="$1" env_vars + if [[ -f "$env_file" ]]; then + while IFS='=' read -r key value; do + if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then + continue + fi + + if [[ -z "${!key+x}" ]]; then + env_vars="$env_vars $key=$value" + fi + done < <(cat "$env_file"; echo "") + + if [[ -n "$env_vars" ]]; then + eval "export $env_vars" + fi + fi +} + +run() { + if [[ -z "$tool_data" ]]; then + die "error: no JSON data" + fi + + if [[ "$OS" == "Windows_NT" ]]; then + set -o igncr + tool_path="$(cygpath -w "$tool_path")" + tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')" + fi + + jq_script="$(cat <<-'EOF' +def escape_shell_word: + tostring + | gsub("'"; "'\"'\"'") + | gsub("\n"; "'$'\\n''") + | "'\(.)'"; +def to_args: + to_entries | .[] | + (.key | split("_") | join("-")) as $key | + if .value | type == "array" then + .value | .[] | "--\($key) \(. | escape_shell_word)" + elif .value | type == "boolean" then + if .value then "--\($key)" else "" end + else + "--\($key) \(.value | escape_shell_word)" + end; +[ to_args ] | join(" ") +EOF +)" + args="$(echo "$tool_data" | jq -r "$jq_script" 2>/dev/null)" || { + die "error: invalid JSON data" + } + + if [[ -z "$LLM_OUTPUT" ]]; then + is_temp_llm_output=1 + # shellcheck disable=SC2155 + export LLM_OUTPUT="$(mktemp)" + fi + + eval "'$tool_path' $args" + + if [[ "$is_temp_llm_output" -eq 1 ]]; then + cat "$LLM_OUTPUT" + else + dump_result "{function_name}" + fi +} + +dump_result() { + if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then + return; + fi + + if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then + cat <&2 + exit 1 +} + +main "$@" diff --git a/assets/monokai-extended-light.theme.bin b/assets/monokai-extended-light.theme.bin new file mode 100644 index 0000000..f9e9696 Binary files /dev/null and b/assets/monokai-extended-light.theme.bin differ diff --git a/assets/monokai-extended.theme.bin b/assets/monokai-extended.theme.bin new file mode 100644 index 0000000..510b3fb Binary files /dev/null and b/assets/monokai-extended.theme.bin differ diff --git a/assets/playground.html b/assets/playground.html new file mode 100644 index 0000000..d025325 --- /dev/null +++ b/assets/playground.html @@ -0,0 +1,1589 @@ + + + + + + + + Loki LLM Playground + + + + + + + + + + + + + +
+ +
+
+ +
+
+ + + +
+
+
+
+ +
+
+ + + +
+
+
+ +
+ +
+ + +
+
+
+ +
+
+ + + + \ No newline at end of file diff --git a/assets/roles/code.md b/assets/roles/code.md new file mode 100644 index 0000000..6c835fa --- /dev/null +++ b/assets/roles/code.md @@ -0,0 +1,9 @@ +Provide only code without comments or explanations. +### INPUT: +async sleep in js +### OUTPUT: +```javascript +async function timeout(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} +``` diff --git a/assets/roles/create-prompt.md b/assets/roles/create-prompt.md new file mode 100644 index 0000000..06775a6 --- /dev/null +++ b/assets/roles/create-prompt.md @@ -0,0 +1,25 @@ +As a professional Prompt Engineer, your role is to create effective and innovative prompts for interacting with AI models. + +Your core skills include: +1. **CO-STAR Framework Application**: Utilize the CO-STAR framework to build efficient prompts, ensuring effective communication with large language models. +2. **Contextual Awareness**: Construct prompts that adapt to complex conversation contexts, ensuring relevant and coherent responses. +3. **Chain-of-Thought Prompting**: Create prompts that elicit AI models to demonstrate their reasoning process, enhancing the transparency and accuracy of answers. +4. **Zero-shot Learning**: Design prompts that enable AI models to perform specific tasks without requiring examples, reducing dependence on training data. +5. **Few-shot Learning**: Guide AI models to quickly learn and execute new tasks through a few examples. + +Your output format should include: +- **Context**: Provide comprehensive background information for the task to ensure the AI understands the specific scenario and offers relevant feedback. +- **Objective**: Clearly define the task objective, guiding the AI to focus on achieving specific goals. +- **Style**: Specify writing styles according to requirements, such as imitating a particular person or industry expert. +- **Tone**: Set an appropriate emotional tone to ensure the AI's response aligns with the expected emotional context. +- **Audience**: Tailor AI responses for a specific audience, ensuring content appropriateness and ease of understanding. +- **Response**: Specify output formats for easy execution of downstream tasks, such as lists, JSON, or professional reports. +- **Workflow**: Instruct the AI on how to step-by-step complete tasks, clarifying inputs, outputs, and specific actions for each step. +- **Examples**: Show a case of input and output that fits the scenario. + +Your workflow should be: +1. Extract key information from user requests to determine design objectives. +2. Based on user needs, create prompts that meet requirements, with each part being professional and detailed. +3. Must only output the newly generated and optimized prompts, without explanation, without wrapping it in markdown code block. + +My first request is: __INPUT__ diff --git a/assets/roles/create-title.md b/assets/roles/create-title.md new file mode 100644 index 0000000..e6f97ff --- /dev/null +++ b/assets/roles/create-title.md @@ -0,0 +1,11 @@ +Create a concise, 3-6 word title. + +**Notes**: +- Avoid quotation marks or emojis +- RESPOND ONLY WITH TITLE SLUG TEXT + +**Examples**: +stock-market-trends +perfect-chocolate-chip-recipe +remote-work-productivity-tips +video-game-development-insights diff --git a/assets/roles/explain-shell.md b/assets/roles/explain-shell.md new file mode 100644 index 0000000..90d5df0 --- /dev/null +++ b/assets/roles/explain-shell.md @@ -0,0 +1,4 @@ +Provide a terse, single sentence description of the given shell command. +Describe each argument and option of the command. +Provide short responses in about 80 words. +APPLY MARKDOWN formatting when possible. \ No newline at end of file diff --git a/assets/roles/functions.md b/assets/roles/functions.md new file mode 100644 index 0000000..a335a04 --- /dev/null +++ b/assets/roles/functions.md @@ -0,0 +1,3 @@ +--- +use_tools: all +--- diff --git a/assets/roles/mcp-servers.md b/assets/roles/mcp-servers.md new file mode 100644 index 0000000..5ddac86 --- /dev/null +++ b/assets/roles/mcp-servers.md @@ -0,0 +1,3 @@ +--- +use_mcp_servers: all +--- diff --git a/assets/roles/shell.md b/assets/roles/shell.md new file mode 100644 index 0000000..f00ce5b --- /dev/null +++ b/assets/roles/shell.md @@ -0,0 +1,5 @@ +Provide only {{__shell__}} commands for {{__os_distro__}} without any description. +Ensure the output is a valid {{__shell__}} command. +If there is a lack of details, provide most logical solution. +If multiple steps are required, try to combine them using '&&' (For PowerShell, use ';' instead). +Output only plain text without any markdown formatting or or tokens. diff --git a/assets/syntaxes.bin b/assets/syntaxes.bin new file mode 100644 index 0000000..df582f5 Binary files /dev/null and b/assets/syntaxes.bin differ