-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.example
More file actions
391 lines (329 loc) · 18.7 KB
/
.env.example
File metadata and controls
391 lines (329 loc) · 18.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
# ── Agent42 Configuration ──────────────────────────────────────────────────────
# ── LLM Provider API Keys ────────────────────────────────────────────────────
# OpenRouter — single key, 200+ models, free tier included (recommended)
OPENROUTER_API_KEY=sk-or-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Direct providers (optional — only needed if you prefer direct API access)
# OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# ANTHROPIC_API_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# DEEPSEEK_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GEMINI_API_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Cerebras — free inference (1M tokens/day, ~3000 tok/s)
# Sign up: https://cloud.cerebras.ai/
# CEREBRAS_API_KEY=csk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Groq — free inference (rate-limited on free plan, no credit card required)
# Get key: https://console.groq.com/ (free account)
# GROQ_API_KEY=gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Mistral La Plateforme — CHEAP tier (credits required, 2 RPM on free experiment plan)
# Get key: https://console.mistral.ai/ (experiment plan: free, no credit card required)
# MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Mistral Codestral — FREE code specialist endpoint (separate key from La Plateforme)
# Get key: https://console.mistral.ai/ (Codestral tab — separate from main API key)
# CODESTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# SambaNova — CHEAP tier (credits required, funded account needed)
# Get key: https://cloud.sambanova.ai/ (free trial credits available at signup)
# SAMBANOVA_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Together AI -- CHEAP tier (credits required, funded account needed)
# Get key: https://api.together.xyz/settings/api-keys ($1 free credit at signup)
# TOGETHER_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Local model hosting
# VLLM_API_KEY=not-needed
# VLLM_BASE_URL=http://localhost:8000/v1
# ── Dashboard Auth ────────────────────────────────────────────────────────────
# NOTE: On first run, a setup wizard in the browser will guide you through
# setting a password and generating a secure hash. You can skip editing this
# section if you prefer the wizard.
DASHBOARD_USERNAME=admin
# Option A: Plaintext password (initial setup only — switch to hash for production)
DASHBOARD_PASSWORD=changeme-right-now
# Option B: Bcrypt hash (RECOMMENDED for production)
# Generate: python -c "import bcrypt; print(bcrypt.hashpw(b'yourpassword', bcrypt.gensalt()).decode())"
# DASHBOARD_PASSWORD_HASH=$2b$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# JWT secret — REQUIRED for persistent sessions
# Generate: python -c "import secrets; print(secrets.token_hex(32))"
# If not set, a random secret is generated per-run (sessions lost on restart)
JWT_SECRET=
# ── Dashboard Network ────────────────────────────────────────────────────────
# Host to bind dashboard to (default: 127.0.0.1 — localhost only)
# Set to 0.0.0.0 to expose externally (ensure firewall + auth are configured)
DASHBOARD_HOST=127.0.0.1
# CORS allowed origins (comma-separated). Empty = same-origin only (safest).
# Example: http://localhost:3000,https://yourdomain.com
CORS_ALLOWED_ORIGINS=
# Max login attempts per minute per IP (brute force protection)
LOGIN_RATE_LIMIT=5
# Max concurrent WebSocket connections
MAX_WEBSOCKET_CONNECTIONS=50
# ── Orchestrator ──────────────────────────────────────────────────────────────
# Git repo to create worktrees from (optional — configure via dashboard)
# DEFAULT_REPO_PATH=/home/youruser/projects/myproject
# Max agents running simultaneously (0 = auto: dynamic based on CPU/memory)
MAX_CONCURRENT_AGENTS=0
# Task queue file
TASKS_JSON_PATH=tasks.json
# ── Security ──────────────────────────────────────────────────────────────────
# Enable workspace sandboxing (restricts agent file operations)
SANDBOX_ENABLED=true
WORKSPACE_RESTRICT=true
# Daily API spending limit in USD (0 = unlimited)
MAX_DAILY_API_SPEND_USD=0
# Worktree directory (default: ~/.agent42/worktrees)
# AGENT42_WORKTREE_DIR=/path/to/worktrees
# ── Security Scanning (Automated) ────────────────────────────────────────────
# Scheduled security scans with GitHub issue reporting
SECURITY_SCAN_ENABLED=true
# Scan interval (e.g., 8h = 3x/day, 6h = 4x/day, 12h = 2x/day)
SECURITY_SCAN_INTERVAL=8h
# Minimum severity to include in reports: low, medium, high, critical
SECURITY_SCAN_MIN_SEVERITY=medium
# Create/update GitHub issues with scan findings (requires gh CLI)
SECURITY_SCAN_GITHUB_ISSUES=true
# ── Channels (Phase 2) ───────────────────────────────────────────────────────
# Discord bot — https://discord.com/developers/applications
DISCORD_BOT_TOKEN=
DISCORD_GUILD_IDS= # Comma-separated guild IDs (empty = all guilds)
# Slack bot — https://api.slack.com/apps (Socket Mode)
SLACK_BOT_TOKEN=xoxb-xxxxxxxxxxxx
SLACK_APP_TOKEN=xapp-xxxxxxxxxxxx
# Telegram bot — https://t.me/BotFather
TELEGRAM_BOT_TOKEN=
# Email (IMAP/SMTP)
EMAIL_IMAP_HOST=imap.gmail.com
EMAIL_IMAP_PORT=993
EMAIL_IMAP_USER=you@gmail.com
EMAIL_IMAP_PASSWORD=your-app-password
EMAIL_SMTP_HOST=smtp.gmail.com
EMAIL_SMTP_PORT=587
EMAIL_SMTP_USER=you@gmail.com
EMAIL_SMTP_PASSWORD=your-app-password
# ── Skills (Phase 3) ─────────────────────────────────────────────────────────
# Extra skill directories (comma-separated, in addition to builtins + repo/skills/)
# SKILLS_DIRS=/path/to/custom/skills,/another/path
# ── Tools (Phase 4) ──────────────────────────────────────────────────────────
# Web search (Brave Search API)
BRAVE_API_KEY=BSAxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# MCP servers config file (JSON format)
# MCP_SERVERS_JSON=mcp_servers.json
# Cron jobs persistence
CRON_JOBS_PATH=cron_jobs.json
# Custom tools directory — drop .py files with Tool subclasses here
# They will be auto-discovered and registered at startup
# CUSTOM_TOOLS_DIR=custom_tools
# ── Dynamic Model Routing ────────────────────────────────────────────────────
# File where dynamic routing rankings are stored
# MODEL_ROUTING_FILE=data/dynamic_routing.json
# How often to sync OpenRouter model catalog (hours)
# MODEL_CATALOG_REFRESH_HOURS=24
# Percentage of tasks to assign unproven models for evaluation (0-100)
# MODEL_TRIAL_PERCENTAGE=10
# Minimum task completions before a model gets ranked
# MODEL_MIN_TRIALS=5
# Enable web benchmark research from authoritative sources
# MODEL_RESEARCH_ENABLED=true
# How often to research benchmarks (hours, 168 = weekly)
# MODEL_RESEARCH_INTERVAL_HOURS=168
# Model routing policy: free_only | balanced (default) | performance
# free_only - only free OR models + configured native providers (Gemini, etc.)
# balanced - free by default; upgrades complex tasks when OR credits available
# performance - best model regardless of cost (daily spend limit is only constraint)
# MODEL_ROUTING_POLICY=balanced
# How often to re-check OpenRouter account balance (hours, default: 1)
# OPENROUTER_BALANCE_CHECK_HOURS=1.0
# Gemini free tier inclusion (default: true)
# When false, Gemini is excluded from FREE_ROUTING and fallback chain.
# Useful to reduce Google dependency when Cerebras/Groq/Mistral are configured.
# GEMINI_FREE_TIER=true
# OpenRouter free-only mode (default: false)
# When true, only routes to OpenRouter models with :free suffix.
# Prevents incurring OpenRouter paid charges even with a funded account.
# OPENROUTER_FREE_ONLY=false
# ── Memory (Phase 6) ─────────────────────────────────────────────────────────
MEMORY_DIR=.agent42/memory
SESSIONS_DIR=.agent42/sessions
# ── Scope Change Detection ──────────────────────────────────────────────────
# Detect when a user's message is about a different topic than the current
# conversation scope. Creates a new branch/task for out-of-scope requests.
SCOPE_DETECTION_ENABLED=true
# Confidence threshold below which the user is asked to confirm scope change
SCOPE_DETECTION_CONFIDENCE_THRESHOLD=0.5
# Semantic memory (embeddings) — auto-detects from available API keys
# Override with explicit model/provider if needed:
# EMBEDDING_MODEL=text-embedding-3-small
# EMBEDDING_PROVIDER=openai # openai or openrouter
# ── Qdrant Vector Database (optional — enhances semantic search) ─────
# Server mode: connect to a Qdrant server (Docker or Cloud)
# QDRANT_URL=http://localhost:6333
# QDRANT_API_KEY= # Required for Qdrant Cloud
# QDRANT_COLLECTION_PREFIX=agent42 # Prefix for collection names
#
# Embedded mode: local file storage (no server needed)
# Set QDRANT_ENABLED=true without QDRANT_URL to use embedded mode
# QDRANT_ENABLED=true
# QDRANT_LOCAL_PATH=.agent42/qdrant # Path for embedded Qdrant storage
#
# Docker quickstart:
# docker run -p 6333:6333 qdrant/qdrant
# pip install qdrant-client
# ── Redis (optional — fast session cache + embedding cache) ──────────
# REDIS_URL=redis://localhost:6379/0
# REDIS_PASSWORD=
# SESSION_TTL_DAYS=7 # Auto-expire old sessions
# EMBEDDING_CACHE_TTL_HOURS=24 # Cache embedding API responses
#
# Docker quickstart:
# docker run -p 6379:6379 redis:alpine
# pip install redis[hiredis]
# ── SSH Remote Shell ────────────────────────────────────────────────────
# Enable SSH tool for remote server management (disabled by default)
SSH_ENABLED=false
# Allowed host patterns (comma-separated, e.g., "*.mycompany.com,192.168.1.*")
# Empty = all hosts allowed (approval gate still required)
# SSH_ALLOWED_HOSTS=
# Default SSH private key path
# SSH_DEFAULT_KEY_PATH=~/.ssh/id_rsa
# Max file transfer size in MB (upload/download)
SSH_MAX_UPLOAD_MB=50
# Per-command execution timeout in seconds
SSH_COMMAND_TIMEOUT=120
# ── Tunnel Manager ──────────────────────────────────────────────────────
# Expose local services to the internet for testing/preview
TUNNEL_ENABLED=false
# Provider: auto (detect best), cloudflared, serveo, localhost.run
TUNNEL_PROVIDER=auto
# Allowed ports for tunneling (comma-separated, empty = all ports)
# TUNNEL_ALLOWED_PORTS=8000,3000,80,443
# Auto-shutdown tunnels after N minutes
TUNNEL_TTL_MINUTES=60
# ── Knowledge Base / RAG ────────────────────────────────────────────────
# Document import directory
KNOWLEDGE_DIR=.agent42/knowledge
# Chunking parameters (tokens)
KNOWLEDGE_CHUNK_SIZE=500
KNOWLEDGE_CHUNK_OVERLAP=50
# Max search results per query
KNOWLEDGE_MAX_RESULTS=10
# ── Vision / Image Analysis ─────────────────────────────────────────────
# Max image file size in MB
VISION_MAX_IMAGE_MB=10
# Override vision model (empty = auto-detect from available API keys)
# VISION_MODEL=gpt-4o-mini
# ── Apps Platform ──────────────────────────────────────────────────────
# Enable the apps platform (build and serve user applications)
APPS_ENABLED=true
# Base directory for all apps
APPS_DIR=apps
# Dynamic port range for running apps
APPS_PORT_RANGE_START=9100
APPS_PORT_RANGE_END=9199
# Max number of simultaneously running apps
APPS_MAX_RUNNING=5
# Automatically restart crashed apps
APPS_AUTO_RESTART=true
# Seconds between background health-check polls for running apps
APPS_MONITOR_INTERVAL=15
# Default runtime for new apps (static, python, node, docker)
APPS_DEFAULT_RUNTIME=python
# Enable git version control for new apps by default
APPS_GIT_ENABLED_DEFAULT=false
# GitHub Personal Access Token for app repo creation and push (optional)
# Create at: https://github.com/settings/tokens (repo scope)
APPS_GITHUB_TOKEN=
# Default app mode for new apps ("internal" = Agent42 system tool, "external" = public release)
APPS_DEFAULT_MODE=internal
# Require dashboard auth by default for new apps' proxy access
APPS_REQUIRE_AUTH_DEFAULT=false
# ── Chat Sessions ──────────────────────────────────────────────
# Directory for persistent chat session storage (JSONL per session)
CHAT_SESSIONS_DIR=.agent42/chat_sessions
# ── Projects ───────────────────────────────────────────────────
# Directory for project data persistence
PROJECTS_DIR=.agent42/projects
# ── GitHub OAuth (Device Flow) ─────────────────────────────────
# GitHub OAuth App Client ID for device flow authentication
# Create at: https://github.com/settings/applications/new
GITHUB_CLIENT_ID=
# GitHub OAuth token (populated automatically after device flow auth)
GITHUB_OAUTH_TOKEN=
# -- Project Interview Settings -----------------------------------------------
# Enable structured project discovery interviews for complex tasks
PROJECT_INTERVIEW_ENABLED=true
# Interview gating mode: "auto" (complexity-based), "always", or "never"
PROJECT_INTERVIEW_MODE=auto
# Maximum number of interview rounds (themes: overview, requirements, technical, constraints)
PROJECT_INTERVIEW_MAX_ROUNDS=4
# Minimum complexity level to trigger interview: "moderate" or "complex"
PROJECT_INTERVIEW_MIN_COMPLEXITY=moderate
# ── Project-Scoped Memory ────────────────────────────────────────────────
# When enabled, each project gets its own MEMORY.md / HISTORY.md so agent
# learnings, conventions, and reviewer feedback are scoped per-project.
# Standalone tasks (no project_id) still use global memory.
PROJECT_MEMORY_ENABLED=true
# ── Conversational Mode ──────────────────────────────────────────────────
# When enabled, simple messages (greetings, quick questions, status checks)
# get direct LLM responses without creating a task. Substantial work
# (coding, research, etc.) still creates tasks as usual.
CONVERSATIONAL_ENABLED=true
# Model for direct conversational responses (empty = use primary free model)
# CONVERSATIONAL_MODEL=
# ── L1/L2 Agent Tier System ─────────────────────────────────────────────
# L1 = Standard tier (free/admin-chosen models — the worker team)
# L2 = Premium tier (premium models — senior reviewer team)
# L1 workers complete tasks → Approvals → user approves or escalates to L2
# Override primary model for all L1 tasks (empty = per-task-type free defaults)
# L1_DEFAULT_MODEL=
# Override critic model for all L1 tasks
# L1_CRITIC_MODEL=
# Enable L2 premium tier (auto-disabled if no premium API keys are set)
L2_ENABLED=true
# Override model for all L2 tasks (empty = per-task-type premium defaults)
# L2_DEFAULT_MODEL=
# Override L2 agent profile (empty = auto-select based on task type)
# L2_DEFAULT_PROFILE=
# Auto-send all L1 output to L2 without manual approval
L2_AUTO_ESCALATE=false
# Comma-separated task types to auto-escalate (empty = all, when L2_AUTO_ESCALATE=true)
# L2_AUTO_ESCALATE_TASK_TYPES=coding,debugging,app_create
# Comma-separated task types eligible for L2 escalation (empty = all)
# L2_TASK_TYPES=
# Per-task-type L2 model overrides (follows existing admin override pattern)
# AGENT42_L2_MODEL=claude-sonnet-4-6
# AGENT42_L2_CODING_MODEL=claude-sonnet-4-6
# AGENT42_L2_RESEARCH_MODEL=gpt-4o
# AGENT42_L2_STRATEGY_MODEL=gpt-4o
# ── Recursive Language Models (RLM — MIT CSAIL) ─────────────────────────
# RLM processes inputs 100x beyond model context windows by treating the
# prompt as an external variable in a REPL environment.
# Paper: https://arxiv.org/abs/2512.24601
# Package: pip install rlms
RLM_ENABLED=true
# Context size threshold (in tokens) to trigger RLM processing.
# Below this threshold, standard LLM completion is used.
RLM_THRESHOLD_TOKENS=50000
# REPL environment: local (default), docker (safer), modal (cloud), prime
RLM_ENVIRONMENT=local
# Max recursion depth for RLM sub-calls (diminishing returns beyond 3)
RLM_MAX_DEPTH=3
# Max REPL iterations before forcing output (prevents infinite loops)
RLM_MAX_ITERATIONS=20
# Override root model for RLM (empty = use task's primary model)
# RLM_ROOT_MODEL=
# Override sub-call model for RLM recursive calls (empty = same as root)
# RLM_SUB_MODEL=
# Directory for RLM trajectory/execution logs
RLM_LOG_DIR=.agent42/rlm_logs
# Enable rich console output for RLM debugging
RLM_VERBOSE=false
# Max cost per RLM query in USD (guard against runaway recursion)
RLM_COST_LIMIT=1.00
# Timeout per RLM query in seconds
RLM_TIMEOUT_SECONDS=300
# Docker image for isolated REPL (if RLM_ENVIRONMENT=docker)
RLM_DOCKER_IMAGE=python:3.11-slim
# --- Multi-Repository Management ---
# GitHub Personal Access Token for listing and cloning repos (optional)
# Also used by Apps platform if APPS_GITHUB_TOKEN is not set separately.
# Create at: https://github.com/settings/tokens (repo scope)
GITHUB_TOKEN=
# Path to JSON registry of connected repositories
REPOS_JSON_PATH=.agent42/repos.json
# Base directory where GitHub repos are cloned to
REPOS_CLONE_DIR=.agent42/repos