Configuration Reference
The following is a complete reference of the plano_config.yml that controls the behavior of a single instance of
the Arch gateway. This where you enable capabilities like routing to upstream LLm providers, defining prompt_targets
where prompts get routed to, apply guardrails, and enable critical agent observability features.
1# Arch Gateway configuration version
2version: v0.3.0
3
4
5# External HTTP agents - API type is controlled by request path (/v1/responses, /v1/messages, /v1/chat/completions)
6agents:
7 - id: weather_agent # Example agent for weather
8 url: http://host.docker.internal:10510
9
10 - id: flight_agent # Example agent for flights
11 url: http://host.docker.internal:10520
12
13
14# MCP filters applied to requests/responses (e.g., input validation, query rewriting)
15filters:
16 - id: input_guards # Example filter for input validation
17 url: http://host.docker.internal:10500
18 # type: mcp (default)
19 # transport: streamable-http (default)
20 # tool: input_guards (default - same as filter id)
21
22
23# LLM provider configurations with API keys and model routing
24model_providers:
25 - model: openai/gpt-4o
26 access_key: $OPENAI_API_KEY
27 default: true
28
29 - model: openai/gpt-4o-mini
30 access_key: $OPENAI_API_KEY
31
32 - model: anthropic/claude-sonnet-4-0
33 access_key: $ANTHROPIC_API_KEY
34
35 - model: mistral/ministral-3b-latest
36 access_key: $MISTRAL_API_KEY
37
38
39# Model aliases - use friendly names instead of full provider model names
40model_aliases:
41 fast-llm:
42 target: gpt-4o-mini
43
44 smart-llm:
45 target: gpt-4o
46
47
48# HTTP listeners - entry points for agent routing, prompt targets, and direct LLM access
49listeners:
50 # Agent listener for routing requests to multiple agents
51 - type: agent
52 name: travel_booking_service
53 port: 8001
54 router: plano_orchestrator_v1
55 address: 0.0.0.0
56 agents:
57 - id: rag_agent
58 description: virtual assistant for retrieval augmented generation tasks
59 filter_chain:
60 - input_guards
61
62 # Model listener for direct LLM access
63 - type: model
64 name: model_1
65 address: 0.0.0.0
66 port: 12000
67
68 # Prompt listener for function calling (for prompt_targets)
69 - type: prompt
70 name: prompt_function_listener
71 address: 0.0.0.0
72 port: 10000
73 # This listener is used for prompt_targets and function calling
74
75
76# Reusable service endpoints
77endpoints:
78 app_server:
79 endpoint: 127.0.0.1:80
80 connect_timeout: 0.005s
81
82 mistral_local:
83 endpoint: 127.0.0.1:8001
84
85
86# Prompt targets for function calling and API orchestration
87prompt_targets:
88 - name: get_current_weather
89 description: Get current weather at a location.
90 parameters:
91 - name: location
92 description: The location to get the weather for
93 required: true
94 type: string
95 format: City, State
96 - name: days
97 description: the number of days for the request
98 required: true
99 type: int
100 endpoint:
101 name: app_server
102 path: /weather
103 http_method: POST
104
105
106# OpenTelemetry tracing configuration
107tracing:
108 # Random sampling percentage (1-100)
109 random_sampling: 100