config: default agent settings and provider override
This commit is contained in:
@@ -1,125 +1,82 @@
|
||||
# g3 Configuration Example
|
||||
#
|
||||
# This file demonstrates the new provider configuration format.
|
||||
# Provider references use the format: "<provider_type>.<config_name>"
|
||||
# Most settings have sensible defaults. A minimal config only needs:
|
||||
#
|
||||
# [providers]
|
||||
# default_provider = "anthropic.default"
|
||||
#
|
||||
# [providers.anthropic.default]
|
||||
# api_key = "your-api-key"
|
||||
# model = "claude-sonnet-4-5"
|
||||
#
|
||||
# Everything else below is optional.
|
||||
|
||||
[providers]
|
||||
# Default provider used when no specific provider is specified
|
||||
default_provider = "anthropic.default"
|
||||
|
||||
# Optional: Specify different providers for each mode
|
||||
# If not specified, these fall back to default_provider
|
||||
# planner = "anthropic.planner" # Provider for planning mode
|
||||
# coach = "anthropic.default" # Provider for coach (code reviewer) in autonomous mode
|
||||
# player = "anthropic.default" # Provider for player (code implementer) in autonomous mode
|
||||
# coach = "anthropic.default" # Provider for coach in autonomous mode
|
||||
# player = "anthropic.default" # Provider for player in autonomous mode
|
||||
|
||||
# Named Anthropic configurations
|
||||
[providers.anthropic.default]
|
||||
api_key = "your-anthropic-api-key"
|
||||
model = "claude-sonnet-4-5"
|
||||
max_tokens = 64000
|
||||
temperature = 0.3
|
||||
# max_tokens = 64000 # Optional (default: provider's max)
|
||||
# temperature = 0.3 # Optional
|
||||
# cache_config = "ephemeral" # Optional: Enable prompt caching
|
||||
# enable_1m_context = true # Optional: Enable 1M context (costs extra)
|
||||
# thinking_budget_tokens = 10000 # Optional: Enable extended thinking mode
|
||||
# enable_1m_context = true # Optional: Enable 1M context (costs extra)
|
||||
# thinking_budget_tokens = 10000 # Optional: Enable extended thinking mode
|
||||
|
||||
# Example: A separate config for planning mode with a more capable model
|
||||
# [providers.anthropic.planner]
|
||||
# api_key = "your-anthropic-api-key"
|
||||
# model = "claude-opus-4-5"
|
||||
# max_tokens = 64000
|
||||
# thinking_budget_tokens = 16000
|
||||
|
||||
# Named Databricks configurations
|
||||
[providers.databricks.default]
|
||||
host = "https://your-workspace.cloud.databricks.com"
|
||||
# token = "your-databricks-token" # Optional - will use OAuth if not provided
|
||||
model = "databricks-claude-sonnet-4"
|
||||
max_tokens = 4096
|
||||
temperature = 0.1
|
||||
use_oauth = true
|
||||
# Databricks provider example
|
||||
# [providers.databricks.default]
|
||||
# host = "https://your-workspace.cloud.databricks.com"
|
||||
# model = "databricks-claude-sonnet-4"
|
||||
# use_oauth = true
|
||||
|
||||
# Named OpenAI configurations
|
||||
# OpenAI provider example
|
||||
# [providers.openai.default]
|
||||
# api_key = "your-openai-api-key"
|
||||
# model = "gpt-4-turbo"
|
||||
# max_tokens = 4096
|
||||
# temperature = 0.1
|
||||
|
||||
# Multiple OpenAI-compatible providers can be configured
|
||||
# OpenAI-compatible providers (OpenRouter, Groq, etc.)
|
||||
# [providers.openai_compatible.openrouter]
|
||||
# api_key = "your-openrouter-api-key"
|
||||
# model = "anthropic/claude-3.5-sonnet"
|
||||
# base_url = "https://openrouter.ai/api/v1"
|
||||
# max_tokens = 4096
|
||||
# temperature = 0.1
|
||||
|
||||
# [providers.openai_compatible.groq]
|
||||
# api_key = "your-groq-api-key"
|
||||
# model = "llama-3.3-70b-versatile"
|
||||
# base_url = "https://api.groq.com/openai/v1"
|
||||
# max_tokens = 4096
|
||||
# temperature = 0.1
|
||||
# =============================================================================
|
||||
# Agent settings (all optional - these are the defaults)
|
||||
# =============================================================================
|
||||
# [agent]
|
||||
# fallback_default_max_tokens = 8192
|
||||
# enable_streaming = true
|
||||
# timeout_seconds = 120
|
||||
# auto_compact = true
|
||||
# max_retry_attempts = 3
|
||||
# autonomous_max_retry_attempts = 6
|
||||
# max_context_length = 200000 # Override context window size
|
||||
|
||||
[agent]
|
||||
fallback_default_max_tokens = 8192
|
||||
# max_context_length: Override the context window size for all providers
|
||||
# This is the total size of conversation history, not per-request output limit
|
||||
# max_context_length = 200000
|
||||
enable_streaming = true
|
||||
timeout_seconds = 60
|
||||
max_retry_attempts = 3
|
||||
autonomous_max_retry_attempts = 6
|
||||
allow_multiple_tool_calls = true
|
||||
# =============================================================================
|
||||
# Computer control (all optional - enabled by default)
|
||||
# =============================================================================
|
||||
# [computer_control]
|
||||
# enabled = true # Requires OS accessibility permissions
|
||||
# require_confirmation = true
|
||||
# max_actions_per_second = 5
|
||||
|
||||
# Retry Configuration for Planning/Autonomous Mode
|
||||
#
|
||||
# The retry infrastructure handles transient errors during LLM API calls:
|
||||
# - Rate limits (HTTP 429)
|
||||
# - Network errors (connection failures)
|
||||
# - Server errors (HTTP 5xx)
|
||||
# - Request timeouts
|
||||
# - Model capacity issues (model busy)
|
||||
#
|
||||
# Default retry behavior:
|
||||
# - max_retry_attempts: Used by default interactive mode (3 retries)
|
||||
# - autonomous_max_retry_attempts: Used by planning/autonomous mode (6 retries)
|
||||
#
|
||||
# Note: The retry logic uses exponential backoff with longer delays in
|
||||
# autonomous mode to handle rate limits gracefully.
|
||||
#
|
||||
# Example player retry config (in code):
|
||||
# RetryConfig::planning("player") # Creates: max_retries=3, is_autonomous=true
|
||||
# RetryConfig::planning("player").with_max_retries(6) # Override max retries
|
||||
#
|
||||
# Example coach retry config (in code):
|
||||
# RetryConfig::planning("coach") # Creates: max_retries=3, is_autonomous=true
|
||||
# RetryConfig::planning("coach").with_max_retries(6) # Override max retries
|
||||
#
|
||||
|
||||
[computer_control]
|
||||
enabled = false # Set to true to enable computer control (requires OS permissions)
|
||||
require_confirmation = true
|
||||
max_actions_per_second = 5
|
||||
|
||||
[webdriver]
|
||||
enabled = false
|
||||
safari_port = 4444
|
||||
chrome_port = 9515
|
||||
# Browser to use: "safari" (default) or "chrome-headless"
|
||||
# Safari opens a visible browser window
|
||||
# Chrome headless runs in the background without a visible window
|
||||
browser = "safari"
|
||||
# Optional: Path to Chrome binary (e.g., Chrome for Testing)
|
||||
# If not set, ChromeDriver will use the default Chrome installation
|
||||
# Use this to avoid version mismatch issues between Chrome and ChromeDriver
|
||||
# Run: ./scripts/setup-chrome-for-testing.sh to install matching versions
|
||||
# chrome_binary = "/Users/yourname/.chrome-for-testing/chrome-mac-arm64/Google Chrome for Testing.app/Contents/MacOS/Google Chrome for Testing"
|
||||
# chrome_binary = "/Users/yourname/.chrome-for-testing/chrome-mac-x64/Google Chrome for Testing.app/Contents/MacOS/Google Chrome for Testing"
|
||||
# Optional: Path to ChromeDriver binary
|
||||
# If not set, looks for 'chromedriver' in PATH
|
||||
# The setup script creates a symlink at ~/.local/bin/chromedriver
|
||||
# chromedriver_binary = "/Users/yourname/.local/bin/chromedriver"
|
||||
|
||||
[macax]
|
||||
enabled = false
|
||||
# =============================================================================
|
||||
# WebDriver browser automation (all optional)
|
||||
# =============================================================================
|
||||
# [webdriver]
|
||||
# enabled = true
|
||||
# browser = "chrome-headless" # Default. Alternative: "safari"
|
||||
# chrome_binary = "/path/to/chrome" # Optional: custom Chrome path
|
||||
# chromedriver_binary = "/path/to/driver" # Optional: custom ChromeDriver path
|
||||
|
||||
Reference in New Issue
Block a user