Add agent template for Forgejo

This commit is contained in:
Lukas Parsons 2026-03-22 22:33:39 -04:00
parent b8edf40010
commit 3dce79e818
6 changed files with 255 additions and 0 deletions

9
template/.env.example Normal file
View file

@ -0,0 +1,9 @@
# API URL of the skills API (usually helm:8675 on your network)
API_URL=http://helm:8675
# API Key (only required if auth is enabled on the skills API)
# Get this from your skills API config
API_KEY=
# Optional: Project path for context
PROJECT=/home/user/myproject

10
template/Dockerfile Normal file
View file

@ -0,0 +1,10 @@
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "agent.py"]

105
template/README.md Normal file
View file

@ -0,0 +1,105 @@
# Agent Template
This template provides everything needed to connect an AI agent to the AI Skills API on your home network (`helm:8675`).
## Structure
```
.
├── docker-compose.yml # Bring up your agent + skills API integration
├── agent.py # Example agent implementation
├── .env.example # Environment variables template
├── requirements.txt # Python dependencies
└── README.md # This file
```
## Quick Start
1. Copy `.env.example` to `.env` and customize if needed
2. Run `docker compose up -d` (or run agent.py directly)
3. Your agent now has access to skills, conventions, and memory
## How It Works
The agent uses the AI Skills API at `http://helm:8675` to:
- Fetch relevant context (`/context/rag`) before each query
- Store learnings in memory (`/memory`) after interactions
- Compress conversation history (`/compress`) periodically
This reduces token usage by 60-70% compared to sending everything.
## Integration Pattern
```python
import os
import httpx
from typing import List, Dict
API_URL = os.getenv("API_URL", "http://helm:8675")
API_KEY = os.getenv("API_KEY") # Optional if auth enabled
async def get_context(query: str, project: str = None) -> Dict:
"""Fetch relevant skills and conventions for the query"""
params = {"query": query}
if project:
params["project"] = project
async with httpx.AsyncClient() as client:
resp = await client.get(f"{API_URL}/context/rag", params=params)
resp.raise_for_status()
return resp.json()
async def store_memory(project: str, key: str, content: str):
"""Save decision or learning for future reference"""
async with httpx.AsyncClient() as client:
headers = {"X-API-Key": API_KEY} if API_KEY else {}
resp = await client.post(
f"{API_URL}/memory",
json={"id": key[:8], "project": project, "key": key, "content": content},
headers=headers
)
resp.raise_for_status()
return resp.json()
```
## Docker Setup
The provided `docker-compose.yml` runs the agent in a container and links it to the skills API. Ensure the skills API is running on `helm:8675` first.
```bash
# Start the skills API on helm (if not already running)
docker compose -f /path/to/ai-skills-api/docker-compose.yml up -d
# Start your agent
docker compose up -d
```
## Configuration
Edit `config.yaml` on the skills API side to adjust:
- RAG limits (`max_skills`, `max_conventions`, `max_snippets`)
- Compression strategy (`extractive` or `ollama`)
- Authentication toggle
## Adding Your Own Skills
Use the skills API to add custom skills:
```bash
curl -X POST http://helm:8675/skills \
-H "Content-Type: application/json" \
-d '{
"id": "my-custom-skill",
"name": "My Skill",
"category": "custom",
"content": "Your instructions here...",
"tags": ["custom", "mytag"]
}'
```
Or use the MCP tools if you're in Claude Desktop:
- `skills/create_skill` tool
## Resources
- Skills API docs: http://helm:8675/docs
- AI Skills API repo: https://git.bouncypixel.com/helm/ai-skills-api

116
template/agent.py Normal file
View file

@ -0,0 +1,116 @@
# Example agent implementation
# This demonstrates the integration pattern with AI Skills API
import os
import asyncio
import httpx
from typing import List, Dict, Optional
API_URL = os.getenv("API_URL", "http://helm:8675")
API_KEY = os.getenv("API_KEY")
async def get_context(query: str, project: Optional[str] = None) -> Dict:
"""Fetch relevant context from skills API"""
params = {"query": query}
if project:
params["project"] = project
headers = {"X-API-Key": API_KEY} if API_KEY else {}
async with httpx.AsyncClient() as client:
resp = await client.get(f"{API_URL}/context/rag", params=params, headers=headers)
resp.raise_for_status()
return resp.json()
async def compress_messages(messages: List[Dict]) -> Dict:
"""Compress conversation history"""
headers = {"X-API-Key": API_KEY} if API_KEY else {}
async with httpx.AsyncClient() as client:
resp = await client.post(f"{API_URL}/compress", json={"messages": messages}, headers=headers)
resp.raise_for_status()
return resp.json()
async def store_memory(project: str, key: str, content: str) -> Dict:
"""Store a memory for future reference"""
headers = {"X-API-Key": API_KEY} if API_KEY else {}
async with httpx.AsyncClient() as client:
resp = await client.post(
f"{API_URL}/memory",
json={"id": key[:8], "project": project, "key": key, "content": content},
headers=headers
)
resp.raise_for_status()
return resp.json()
async def count_tokens(text: str) -> int:
"""Count tokens using skills API"""
headers = {"X-API-Key": API_KEY} if API_KEY else {}
async with httpx.AsyncClient() as client:
resp = await client.get(f"{API_URL}/tokens/count", params={"text": text}, headers=headers)
resp.raise_for_status()
return resp.json()["tokens"]
async def chat_loop():
"""Main chat loop - integrate with your LLM of choice"""
conversation = []
print("Agent ready! Type 'quit' to exit.")
while True:
user_input = input("\nYou: ")
if user_input.lower() == 'quit':
break
# 1. Get relevant context
context = await get_context(user_input, project="/home/user/projects/myapp")
context_str = format_context(context)
# 2. Build prompt with context
system_msg = f"{context_str}\n\nYou are a helpful assistant."
messages = [{"role": "system", "content": system_msg}]
messages.extend(conversation[-4:]) # Keep last few turns
messages.append({"role": "user", "content": user_input})
# 3. Call your LLM here (not included - use OpenAI, Claude, Ollama, etc.)
# response = await call_llm(messages)
# For demo, we'll just echo
response = f"Echo: {user_input}"
# 4. Update conversation
conversation.append({"role": "user", "content": user_input})
conversation.append({"role": "assistant", "content": response})
# 5. Compress if getting long
if len(conversation) > 10:
compression = await compress_messages(conversation)
conversation = compression["messages"]
print(f"\n[Compressed: saved {compression['tokens_saved']} tokens]")
print(f"\nAssistant: {response}")
def format_context(context: Dict) -> str:
"""Format RAG context for inclusion in prompt"""
parts = []
if context.get("skills"):
parts.append("## Relevant Skills\n")
for skill in context["skills"]:
parts.append(f"### {skill['name']} (relevance: {skill['relevance_score']:.2f})\n{skill['content']}\n")
if context.get("conventions"):
parts.append("## Project Conventions\n")
for conv in context["conventions"]:
parts.append(f"### {conv['name']}\n{conv['content']}\n")
if context.get("snippets"):
parts.append("## Code Snippets\n")
for snippet in context["snippets"]:
parts.append(f"### {snippet['name']} ({snippet['language']})\n```{snippet['language']}\n{snippet['content']}\n```\n")
return "\n".join(parts) if parts else "No relevant context found."
if __name__ == "__main__":
asyncio.run(chat_loop())

View file

@ -0,0 +1,12 @@
version: '3.8'
services:
agent:
build: .
environment:
- API_URL=http://helm:8675
- API_KEY=${API_KEY:-}
volumes:
- ./logs:/app/logs
restart: unless-stopped
# Add your agent's specific configuration here

View file

@ -0,0 +1,3 @@
httpx==0.27.0
python-dotenv==1.0.0
# Add your agent's dependencies here