[providers] default_provider = "databricks" # Optional: Specify different providers for coach and player in autonomous mode # If not specified, will use default_provider for both # coach = "databricks" # Provider for coach (code reviewer) # player = "anthropic" # Provider for player (code implementer) # Note: Make sure the specified providers are configured below [providers.databricks] host = "https://your-workspace.cloud.databricks.com" # token = "your-databricks-token" # Optional - will use OAuth if not provided model = "databricks-claude-sonnet-4" max_tokens = 4096 # Per-request output limit (how many tokens the model can generate per response) # Note: This is different from max_context_length (total conversation history size) temperature = 0.1 use_oauth = true [agent] fallback_default_max_tokens = 8192 # max_context_length: Override the context window size for all providers # This is the total size of conversation history, not per-request output limit # Useful for models with large context windows (e.g., Claude with 200k tokens) # If not set, uses provider-specific defaults based on model capabilities # max_context_length = 200000 enable_streaming = true timeout_seconds = 60 # Retry configuration for recoverable errors (timeouts, rate limits, etc.) max_retry_attempts = 3 # Default mode retry attempts autonomous_max_retry_attempts = 6 # Autonomous mode retry attempts (higher for long-running tasks) [computer_control] enabled = false # Set to true to enable computer control (requires OS permissions) require_confirmation = true max_actions_per_second = 5