Change API port from 8080 to 8675 across all configs and docs

This commit is contained in:
Lukas Parsons 2026-03-22 21:54:51 -04:00
parent 82fd963577
commit 62c875c9a6
8 changed files with 21 additions and 21 deletions

View file

@ -34,7 +34,7 @@ Add to your Claude Desktop config (`~/Library/Application Support/Claude/claude_
"command": "python", "command": "python",
"args": ["/path/to/ai-skills-api/mcp/skills.py"], "args": ["/path/to/ai-skills-api/mcp/skills.py"],
"env": { "env": {
"SKILLS_API_URL": "http://localhost:8080" "SKILLS_API_URL": "http://helm:8675"
} }
} }
} }

View file

@ -16,8 +16,8 @@ pip install -r requirements.txt
uvicorn main:app --reload uvicorn main:app --reload
``` ```
API available at `http://localhost:8080` API available at `http://helm:8675`
Docs at `http://localhost:8080/docs` Docs at `http://helm:8675/docs`
## Endpoints ## Endpoints
@ -52,7 +52,7 @@ Docs at `http://localhost:8080/docs`
### Create a skill ### Create a skill
```bash ```bash
curl -X POST http://localhost:8080/skills \ curl -X POST http://helm:8675/skills \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
"id": "homelab-docker-compose", "id": "homelab-docker-compose",
@ -65,12 +65,12 @@ curl -X POST http://localhost:8080/skills \
### Get context bundle ### Get context bundle
```bash ```bash
curl "http://localhost:8080/context?project=/home/server/apps/media-server&skills=homelab-docker-compose,react-v2" curl "http://helm:8675/context?project=/home/server/apps/media-server&skills=homelab-docker-compose,react-v2"
``` ```
### Check cache ### Check cache
```bash ```bash
curl -X POST http://localhost:8080/cache/lookup \ curl -X POST http://helm:8675/cache/lookup \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
"prompt": "How do I configure traefik?", "prompt": "How do I configure traefik?",

View file

@ -11,14 +11,14 @@ This is what actually reduces API consumption.
```bash ```bash
# First ask (miss - hits API) # First ask (miss - hits API)
curl -X POST http://localhost:8080/cache/semantic-lookup \ curl -X POST http://helm:8675/cache/semantic-lookup \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{"prompt": "How do I setup Traefik?", "model": "claude-3-opus"}' -d '{"prompt": "How do I setup Traefik?", "model": "claude-3-opus"}'
# Response: {"hit": false} # Response: {"hit": false}
# -> Call LLM, get response # -> Call LLM, get response
# -> Store response: # -> Store response:
curl -X POST http://localhost:8080/cache/semantic-store \ curl -X POST http://helm:8675/cache/semantic-store \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
"prompt": "How do I setup Traefik?", "prompt": "How do I setup Traefik?",
@ -29,7 +29,7 @@ curl -X POST http://localhost:8080/cache/semantic-store \
}' }'
# Second ask, slightly different (HIT - no API call) # Second ask, slightly different (HIT - no API call)
curl -X POST http://localhost:8080/cache/semantic-lookup \ curl -X POST http://helm:8675/cache/semantic-lookup \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{"prompt": "Traefik setup help", "model": "claude-3-opus"}' -d '{"prompt": "Traefik setup help", "model": "claude-3-opus"}'
@ -51,7 +51,7 @@ curl "http://localhost:8080/context?project=/opt/home-server"
# Returns: 50 skills, 10 conventions = ~3000 tokens # Returns: 50 skills, 10 conventions = ~3000 tokens
# RAG endpoint - returns only relevant # RAG endpoint - returns only relevant
curl "http://localhost:8080/context/rag?query=How+do+I+setup+Docker+Compose&project=/opt/home-server" curl "http://helm:8675/context/rag?query=How+do+I+setup+Docker+Compose&project=/opt/home-server"
# Returns: 3 skills about Docker, 2 conventions = ~600 tokens # Returns: 3 skills about Docker, 2 conventions = ~600 tokens
``` ```
@ -66,7 +66,7 @@ curl "http://localhost:8080/context/rag?query=How+do+I+setup+Docker+Compose&proj
```bash ```bash
# Compress a long conversation # Compress a long conversation
curl -X POST http://localhost:8080/compress \ curl -X POST http://helm:8675/compress \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
"messages": [...], # Your conversation history "messages": [...], # Your conversation history
@ -95,7 +95,7 @@ curl -X POST http://localhost:8080/compress \
async def query_llm(prompt, conversation_history, project=None): async def query_llm(prompt, conversation_history, project=None):
# 1. Check semantic cache FIRST # 1. Check semantic cache FIRST
cache_result = await httpx.post( cache_result = await httpx.post(
"http://localhost:8080/cache/semantic-lookup", "http://helm:8675/cache/semantic-lookup",
json={"prompt": prompt, "model": "claude-3-opus"} json={"prompt": prompt, "model": "claude-3-opus"}
) )
@ -105,13 +105,13 @@ async def query_llm(prompt, conversation_history, project=None):
# 2. Get ONLY relevant context (not everything) # 2. Get ONLY relevant context (not everything)
context = await httpx.get( context = await httpx.get(
"http://localhost:8080/context/rag", "http://helm:8675/context/rag",
params={"query": prompt, "project": project} params={"query": prompt, "project": project}
) )
# 3. Compress conversation history # 3. Compress conversation history
compressed = await httpx.post( compressed = await httpx.post(
"http://localhost:8080/compress", "http://helm:8675/compress",
json={"messages": conversation_history, "keep_last_n": 3} json={"messages": conversation_history, "keep_last_n": 3}
) )
@ -130,7 +130,7 @@ async def query_llm(prompt, conversation_history, project=None):
# 6. Store in semantic cache # 6. Store in semantic cache
await httpx.post( await httpx.post(
"http://localhost:8080/cache/semantic-store", "http://helm:8675/cache/semantic-store",
json={ json={
"prompt": prompt, "prompt": prompt,
"response": response, "response": response,

View file

@ -2,7 +2,7 @@ services:
api: api:
build: . build: .
ports: ports:
- "8080:8080" - "8675:8080"
environment: environment:
- DATABASE_URL=sqlite+aiosqlite:///./ai.db - DATABASE_URL=sqlite+aiosqlite:///./ai.db
volumes: volumes:

View file

@ -3,7 +3,7 @@
import httpx import httpx
BASE_URL = "http://localhost:8080" BASE_URL = "http://helm:8675"
SKILLS = [ SKILLS = [
{ {
@ -214,7 +214,7 @@ def seed():
except Exception as e: except Exception as e:
print(f"{snippet['id']}: {e}") print(f"{snippet['id']}: {e}")
print("\nDone! Check http://localhost:8080/docs") print("\nDone! Check http://helm:8675/docs")
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -1,2 +1,2 @@
SKILLS_API_URL=http://localhost:8080 SKILLS_API_URL=http://helm:8675
GAME_SERVERS_DIR=/opt/game-servers GAME_SERVERS_DIR=/opt/game-servers

View file

@ -29,7 +29,7 @@ services:
dockerfile: mcp/Dockerfile dockerfile: mcp/Dockerfile
command: python skills.py command: python skills.py
environment: environment:
- SKILLS_API_URL=http://host.docker.internal:8080 - SKILLS_API_URL=http://host.docker.internal:8675
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
network_mode: host network_mode: host

View file

@ -4,7 +4,7 @@ import os
mcp = FastMCP("skills") mcp = FastMCP("skills")
SKILLS_API_URL = os.getenv("SKILLS_API_URL", "http://localhost:8080") SKILLS_API_URL = os.getenv("SKILLS_API_URL", "http://helm:8675")
@mcp.tool() @mcp.tool()