Implement planning mode
This commit is contained in:
@@ -1,35 +1,52 @@
|
||||
[providers]
|
||||
default_provider = "databricks"
|
||||
# Optional: Specify different providers for coach and player in autonomous mode
|
||||
# If not specified, will use default_provider for both
|
||||
# coach = "databricks" # Provider for coach (code reviewer)
|
||||
# player = "anthropic" # Provider for player (code implementer)
|
||||
# Note: Make sure the specified providers are configured below
|
||||
# G3 Configuration Example
|
||||
#
|
||||
# This file demonstrates the new provider configuration format.
|
||||
# Provider references use the format: "<provider_type>.<config_name>"
|
||||
|
||||
[providers.databricks]
|
||||
[providers]
|
||||
# Default provider used when no specific provider is specified
|
||||
default_provider = "anthropic.default"
|
||||
|
||||
# Optional: Specify different providers for each mode
|
||||
# If not specified, these fall back to default_provider
|
||||
# planner = "anthropic.planner" # Provider for planning mode
|
||||
# coach = "anthropic.default" # Provider for coach (code reviewer) in autonomous mode
|
||||
# player = "anthropic.default" # Provider for player (code implementer) in autonomous mode
|
||||
|
||||
# Named Anthropic configurations
|
||||
[providers.anthropic.default]
|
||||
api_key = "your-anthropic-api-key"
|
||||
model = "claude-sonnet-4-5"
|
||||
max_tokens = 64000
|
||||
temperature = 0.3
|
||||
# cache_config = "ephemeral" # Optional: Enable prompt caching
|
||||
# enable_1m_context = true # Optional: Enable 1M context (costs extra)
|
||||
# thinking_budget_tokens = 10000 # Optional: Enable extended thinking mode
|
||||
|
||||
# Example: A separate config for planning mode with a more capable model
|
||||
# [providers.anthropic.planner]
|
||||
# api_key = "your-anthropic-api-key"
|
||||
# model = "claude-opus-4-5"
|
||||
# max_tokens = 64000
|
||||
# thinking_budget_tokens = 16000
|
||||
|
||||
# Named Databricks configurations
|
||||
[providers.databricks.default]
|
||||
host = "https://your-workspace.cloud.databricks.com"
|
||||
# token = "your-databricks-token" # Optional - will use OAuth if not provided
|
||||
model = "databricks-claude-sonnet-4"
|
||||
max_tokens = 4096 # Per-request output limit (how many tokens the model can generate per response)
|
||||
# Note: This is different from max_context_length (total conversation history size)
|
||||
max_tokens = 4096
|
||||
temperature = 0.1
|
||||
use_oauth = true
|
||||
|
||||
[providers.anthropic]
|
||||
api_key = "your-anthropic-api-key"
|
||||
model = "claude-sonnet-4-5"
|
||||
max_tokens = 4096
|
||||
temperature = 0.3 # Slightly higher temperature for more creative implementations
|
||||
# cache_config = "ephemeral" # Optional: Enable prompt caching
|
||||
# Options: "ephemeral", "5minute", "1hour"
|
||||
# Reduces costs and latency for repeated prompts. Uses Anthropic's prompt caching with different TTLs.
|
||||
# enable_1m_context = true # optional, more expensive
|
||||
# thinking_budget_tokens = 10000 # Optional: Enable extended thinking mode with token budget
|
||||
# Allows the model to "think" before responding. Useful for complex reasoning tasks.
|
||||
# Named OpenAI configurations
|
||||
# [providers.openai.default]
|
||||
# api_key = "your-openai-api-key"
|
||||
# model = "gpt-4-turbo"
|
||||
# max_tokens = 4096
|
||||
# temperature = 0.1
|
||||
|
||||
|
||||
# Multiple OpenAI-compatible providers can be configured with custom names
|
||||
# Each provider gets its own section under [providers.openai_compatible.<name>]
|
||||
# Multiple OpenAI-compatible providers can be configured
|
||||
# [providers.openai_compatible.openrouter]
|
||||
# api_key = "your-openrouter-api-key"
|
||||
# model = "anthropic/claude-3.5-sonnet"
|
||||
@@ -44,24 +61,25 @@ temperature = 0.3 # Slightly higher temperature for more creative implementatio
|
||||
# max_tokens = 4096
|
||||
# temperature = 0.1
|
||||
|
||||
# To use one of these providers, set default_provider to the name you chose:
|
||||
# default_provider = "openrouter"
|
||||
|
||||
[agent]
|
||||
fallback_default_max_tokens = 8192
|
||||
# max_context_length: Override the context window size for all providers
|
||||
# This is the total size of conversation history, not per-request output limit
|
||||
# Useful for models with large context windows (e.g., Claude with 200k tokens)
|
||||
# If not set, uses provider-specific defaults based on model capabilities
|
||||
# max_context_length = 200000
|
||||
enable_streaming = true
|
||||
timeout_seconds = 60
|
||||
# Retry configuration for recoverable errors (timeouts, rate limits, etc.)
|
||||
max_retry_attempts = 3 # Default mode retry attempts
|
||||
autonomous_max_retry_attempts = 6 # Autonomous mode retry attempts (higher for long-running tasks)
|
||||
allow_multiple_tool_calls = true # Enable multiple tool calls
|
||||
max_retry_attempts = 3
|
||||
autonomous_max_retry_attempts = 6
|
||||
allow_multiple_tool_calls = true
|
||||
|
||||
[computer_control]
|
||||
enabled = false # Set to true to enable computer control (requires OS permissions)
|
||||
require_confirmation = true
|
||||
max_actions_per_second = 5
|
||||
|
||||
[webdriver]
|
||||
enabled = false
|
||||
safari_port = 4444
|
||||
|
||||
[macax]
|
||||
enabled = false
|
||||
|
||||
Reference in New Issue
Block a user