Created initial assets

This commit is contained in:
2025-10-07 10:42:46 -06:00
parent acfc7685f4
commit 377ab91af7
16 changed files with 3298 additions and 0 deletions
+1106
View File
File diff suppressed because it is too large Load Diff
+154
View File
@@ -0,0 +1,154 @@
#!/usr/bin/env python
# Usage: ./{agent_name}.py <agent-func> <agent-data>
import os
import re
import json
import sys
import importlib.util
from pathlib import Path
def _ensure_cwd_venv():
cwd = Path.cwd()
venv_dir = cwd / ".venv"
if not venv_dir.is_dir():
return
py = venv_dir / ("Scripts/python.exe" if os.name == "nt" else "bin/python")
if not py.exists():
return
if Path(sys.prefix).resolve() == venv_dir.resolve():
return
os.execv(str(py), [str(py)] + sys.argv)
_ensure_cwd_venv()
def main():
(agent_func, raw_data) = parse_argv()
agent_data = parse_raw_data(raw_data)
root_dir = "{config_dir}"
setup_env(root_dir, agent_func)
agent_tools_path = os.path.join(root_dir, "agents/{agent_name}/tools.py")
run(agent_tools_path, agent_func, agent_data)
def parse_raw_data(data):
if not data:
raise ValueError("No JSON data")
try:
return json.loads(data)
except Exception:
raise ValueError("Invalid JSON data")
def parse_argv():
agent_func = sys.argv[1]
agent_data = sys.argv[2]
if (not agent_data) or (not agent_func):
print("Usage: ./{agent_name}.py <agent-func> <agent-data>", file=sys.stderr)
sys.exit(1)
return agent_func, agent_data
def setup_env(root_dir, agent_func):
load_env(os.path.join(root_dir, ".env"))
os.environ["LLM_ROOT_DIR"] = root_dir
os.environ["LLM_AGENT_NAME"] = "{agent_name}"
os.environ["LLM_AGENT_FUNC"] = agent_func
os.environ["LLM_AGENT_ROOT_DIR"] = os.path.join(root_dir, "agents", "{agent_name}")
os.environ["LLM_AGENT_CACHE_DIR"] = os.path.join(root_dir, "cache", "{agent_name}")
def load_env(file_path):
try:
with open(file_path, "r") as f:
lines = f.readlines()
except:
return
env_vars = {}
for line in lines:
line = line.strip()
if line.startswith("#") or not line:
continue
key, *value_parts = line.split("=")
env_name = key.strip()
if env_name not in os.environ:
env_value = "=".join(value_parts).strip()
if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")):
env_value = env_value[1:-1]
env_vars[env_name] = env_value
os.environ.update(env_vars)
def run(agent_path, agent_func, agent_data):
spec = importlib.util.spec_from_file_location(
os.path.basename(agent_path), agent_path
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
if not hasattr(mod, agent_func):
raise Exception(f"Not module function '{agent_func}' at '{agent_path}'")
value = getattr(mod, agent_func)(**agent_data)
return_to_llm(value)
dump_result('{agent_name}' + f':{agent_func}')
def return_to_llm(value):
if value is None:
return
if "LLM_OUTPUT" in os.environ:
writer = open(os.environ["LLM_OUTPUT"], "w")
else:
writer = sys.stdout
value_type = type(value).__name__
if value_type in ("str", "int", "float", "bool"):
writer.write(str(value))
elif value_type == "dict" or value_type == "list":
value_str = json.dumps(value, indent=2)
assert value == json.loads(value_str)
writer.write(value_str)
def dump_result(name):
if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)):
return
show_result = False
try:
if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name):
show_result = True
except:
pass
if not show_result:
return
try:
with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f:
data = f.read()
except:
return
print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m")
if __name__ == "__main__":
main()
+120
View File
@@ -0,0 +1,120 @@
#!/usr/bin/env bash
# Usage: ./{agent_name}.sh <agent-func> <agent-data>
set -e
main() {
root_dir="{config_dir}"
parse_argv "$@"
setup_env
tools_path="$root_dir/agents/{agent_name}/tools.sh"
run
}
parse_argv() {
agent_func="$1"
agent_data="$2"
if [[ -z "$agent_data" ]] || [[ -z "$agent_func" ]]; then
die "usage: ./{agent_name}.sh <agent-func> <agent-data>"
fi
}
setup_env() {
load_env "$root_dir/.env"
export LLM_ROOT_DIR="$root_dir"
export LLM_AGENT_NAME="{agent_name}"
export LLM_AGENT_FUNC="$agent_func"
export LLM_AGENT_ROOT_DIR="$LLM_ROOT_DIR/agents/{agent_name}"
export LLM_AGENT_CACHE_DIR="$LLM_ROOT_DIR/cache/{agent_name}"
}
load_env() {
local env_file="$1" env_vars
if [[ -f "$env_file" ]]; then
while IFS='=' read -r key value; do
if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then
continue
fi
if [[ -z "${!key+x}" ]]; then
env_vars="$env_vars $key=$value"
fi
done < <(cat "$env_file"; echo "")
if [[ -n "$env_vars" ]]; then
eval "export $env_vars"
fi
fi
}
run() {
if [[ -z "$agent_data" ]]; then
die "error: no JSON data"
fi
if [[ "$OS" == "Windows_NT" ]]; then
set -o igncr
tools_path="$(cygpath -w "$tools_path")"
tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')"
fi
jq_script="$(cat <<-'EOF'
def escape_shell_word:
tostring
| gsub("'"; "'\"'\"'")
| gsub("\n"; "'$'\\n''")
| "'\(.)'";
def to_args:
to_entries | .[] |
(.key | split("_") | join("-")) as $key |
if .value | type == "array" then
.value | .[] | "--\($key) \(. | escape_shell_word)"
elif .value | type == "boolean" then
if .value then "--\($key)" else "" end
else
"--\($key) \(.value | escape_shell_word)"
end;
[ to_args ] | join(" ")
EOF
)"
args="$(echo "$agent_data" | jq -r "$jq_script" 2>/dev/null)" || {
die "error: invalid JSON data"
}
if [[ -z "$LLM_OUTPUT" ]]; then
is_temp_llm_output=1
# shellcheck disable=SC2155
export LLM_OUTPUT="$(mktemp)"
fi
eval "'$tools_path' '$agent_func' $args"
if [[ "$is_temp_llm_output" -eq 1 ]]; then
cat "$LLM_OUTPUT"
else
dump_result "{agent_name}:${LLM_AGENT_FUNC}"
fi
}
dump_result() {
if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then
return;
fi
if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then
cat <<EOF
$(echo -e "\e[2m")----------------------
$(cat "$LLM_OUTPUT")
----------------------$(echo -e "\e[0m")
EOF
fi
}
die() {
echo "$*" >&2
exit 1
}
main "$@"
+153
View File
@@ -0,0 +1,153 @@
#!/usr/bin/env python
# Usage: ./{function_name}.py <tool-data>
import os
import re
import json
import sys
import importlib.util
from pathlib import Path
def _ensure_cwd_venv():
cwd = Path.cwd()
venv_dir = cwd / ".venv"
if not venv_dir.is_dir():
return
py = venv_dir / ("Scripts/python.exe" if os.name == "nt" else "bin/python")
if not py.exists():
return
if Path(sys.prefix).resolve() == venv_dir.resolve():
return
os.execv(str(py), [str(py)] + sys.argv)
_ensure_cwd_venv()
def main():
raw_data = parse_argv()
tool_data = parse_raw_data(raw_data)
root_dir = "{config_dir}/functions"
setup_env(root_dir)
tool_path = os.path.join(root_dir, "tools/{function_name}.py")
run(tool_path, "run", tool_data)
def parse_raw_data(data):
if not data:
raise ValueError("No JSON data")
try:
return json.loads(data)
except Exception:
raise ValueError("Invalid JSON data")
def parse_argv():
argv = sys.argv[:] + [None] * max(0, 2 - len(sys.argv))
tool_data = argv[1]
if (not tool_data):
print("Usage: ./{function_name}.py <tool-data>", file=sys.stderr)
sys.exit(1)
return tool_data
def setup_env(root_dir):
load_env(os.path.join(root_dir, ".env"))
os.environ["LLM_ROOT_DIR"] = root_dir
os.environ["LLM_TOOL_NAME"] = "{function_name}"
os.environ["LLM_TOOL_CACHE_DIR"] = os.path.join(root_dir, "cache", "{function_name}")
def load_env(file_path):
try:
with open(file_path, "r") as f:
lines = f.readlines()
except:
return
env_vars = {}
for line in lines:
line = line.strip()
if line.startswith("#") or not line:
continue
key, *value_parts = line.split("=")
env_name = key.strip()
if env_name not in os.environ:
env_value = "=".join(value_parts).strip()
if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")):
env_value = env_value[1:-1]
env_vars[env_name] = env_value
os.environ.update(env_vars)
def run(tool_path, tool_func, tool_data):
spec = importlib.util.spec_from_file_location(
os.path.basename(tool_path), tool_path
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
if not hasattr(mod, tool_func):
raise Exception(f"No module function '{tool_func}' at '{tool_path}'")
value = getattr(mod, tool_func)(**tool_data)
return_to_llm(value)
dump_result("{function_name}")
def return_to_llm(value):
if value is None:
return
if "LLM_OUTPUT" in os.environ:
writer = open(os.environ["LLM_OUTPUT"], "w")
else:
writer = sys.stdout
value_type = type(value).__name__
if value_type in ("str", "int", "float", "bool"):
writer.write(str(value))
elif value_type == "dict" or value_type == "list":
value_str = json.dumps(value, indent=2)
assert value == json.loads(value_str)
writer.write(value_str)
def dump_result(name):
if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)):
return
show_result = False
try:
if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name):
show_result = True
except:
pass
if not show_result:
return
try:
with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f:
data = f.read()
except:
return
print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m")
if __name__ == "__main__":
main()
+116
View File
@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# Usage: ./{function_name}.sh <tool-data>
set -e
main() {
root_dir="{config_dir}/functions"
parse_argv "$@"
setup_env
tool_path="$root_dir/tools/{function_name}.sh"
run
}
parse_argv() {
tool_data="$1"
if [[ -z "$tool_data" ]]; then
die "usage: ./{function_name}.sh <tool-data>"
fi
}
setup_env() {
load_env "$root_dir/.env"
export LLM_ROOT_DIR="$root_dir"
export LLM_TOOL_NAME="{function_name}"
export LLM_TOOL_CACHE_DIR="$LLM_ROOT_DIR/cache/{function_name}"
}
load_env() {
local env_file="$1" env_vars
if [[ -f "$env_file" ]]; then
while IFS='=' read -r key value; do
if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then
continue
fi
if [[ -z "${!key+x}" ]]; then
env_vars="$env_vars $key=$value"
fi
done < <(cat "$env_file"; echo "")
if [[ -n "$env_vars" ]]; then
eval "export $env_vars"
fi
fi
}
run() {
if [[ -z "$tool_data" ]]; then
die "error: no JSON data"
fi
if [[ "$OS" == "Windows_NT" ]]; then
set -o igncr
tool_path="$(cygpath -w "$tool_path")"
tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')"
fi
jq_script="$(cat <<-'EOF'
def escape_shell_word:
tostring
| gsub("'"; "'\"'\"'")
| gsub("\n"; "'$'\\n''")
| "'\(.)'";
def to_args:
to_entries | .[] |
(.key | split("_") | join("-")) as $key |
if .value | type == "array" then
.value | .[] | "--\($key) \(. | escape_shell_word)"
elif .value | type == "boolean" then
if .value then "--\($key)" else "" end
else
"--\($key) \(.value | escape_shell_word)"
end;
[ to_args ] | join(" ")
EOF
)"
args="$(echo "$tool_data" | jq -r "$jq_script" 2>/dev/null)" || {
die "error: invalid JSON data"
}
if [[ -z "$LLM_OUTPUT" ]]; then
is_temp_llm_output=1
# shellcheck disable=SC2155
export LLM_OUTPUT="$(mktemp)"
fi
eval "'$tool_path' $args"
if [[ "$is_temp_llm_output" -eq 1 ]]; then
cat "$LLM_OUTPUT"
else
dump_result "{function_name}"
fi
}
dump_result() {
if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then
return;
fi
if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then
cat <<EOF
$(echo -e "\e[2m")----------------------
$(cat "$LLM_OUTPUT")
----------------------$(echo -e "\e[0m")
EOF
fi
}
die() {
echo "$*" >&2
exit 1
}
main "$@"
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large Load Diff
+9
View File
@@ -0,0 +1,9 @@
Provide only code without comments or explanations.
### INPUT:
async sleep in js
### OUTPUT:
```javascript
async function timeout(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
```
+25
View File
@@ -0,0 +1,25 @@
As a professional Prompt Engineer, your role is to create effective and innovative prompts for interacting with AI models.
Your core skills include:
1. **CO-STAR Framework Application**: Utilize the CO-STAR framework to build efficient prompts, ensuring effective communication with large language models.
2. **Contextual Awareness**: Construct prompts that adapt to complex conversation contexts, ensuring relevant and coherent responses.
3. **Chain-of-Thought Prompting**: Create prompts that elicit AI models to demonstrate their reasoning process, enhancing the transparency and accuracy of answers.
4. **Zero-shot Learning**: Design prompts that enable AI models to perform specific tasks without requiring examples, reducing dependence on training data.
5. **Few-shot Learning**: Guide AI models to quickly learn and execute new tasks through a few examples.
Your output format should include:
- **Context**: Provide comprehensive background information for the task to ensure the AI understands the specific scenario and offers relevant feedback.
- **Objective**: Clearly define the task objective, guiding the AI to focus on achieving specific goals.
- **Style**: Specify writing styles according to requirements, such as imitating a particular person or industry expert.
- **Tone**: Set an appropriate emotional tone to ensure the AI's response aligns with the expected emotional context.
- **Audience**: Tailor AI responses for a specific audience, ensuring content appropriateness and ease of understanding.
- **Response**: Specify output formats for easy execution of downstream tasks, such as lists, JSON, or professional reports.
- **Workflow**: Instruct the AI on how to step-by-step complete tasks, clarifying inputs, outputs, and specific actions for each step.
- **Examples**: Show a case of input and output that fits the scenario.
Your workflow should be:
1. Extract key information from user requests to determine design objectives.
2. Based on user needs, create prompts that meet requirements, with each part being professional and detailed.
3. Must only output the newly generated and optimized prompts, without explanation, without wrapping it in markdown code block.
My first request is: __INPUT__
+11
View File
@@ -0,0 +1,11 @@
Create a concise, 3-6 word title.
**Notes**:
- Avoid quotation marks or emojis
- RESPOND ONLY WITH TITLE SLUG TEXT
**Examples**:
stock-market-trends
perfect-chocolate-chip-recipe
remote-work-productivity-tips
video-game-development-insights
+4
View File
@@ -0,0 +1,4 @@
Provide a terse, single sentence description of the given shell command.
Describe each argument and option of the command.
Provide short responses in about 80 words.
APPLY MARKDOWN formatting when possible.
+3
View File
@@ -0,0 +1,3 @@
---
use_tools: all
---
+3
View File
@@ -0,0 +1,3 @@
---
use_mcp_servers: all
---
+5
View File
@@ -0,0 +1,5 @@
Provide only {{__shell__}} commands for {{__os_distro__}} without any description.
Ensure the output is a valid {{__shell__}} command.
If there is a lack of details, provide most logical solution.
If multiple steps are required, try to combine them using '&&' (For PowerShell, use ';' instead).
Output only plain text without any markdown formatting or <think> or </think> tokens.
Binary file not shown.