Files
g3/config.ollama.example.toml
Michael Neale 217df2f2af ollama support
2025-11-05 12:17:01 +11:00

27 lines
835 B
TOML

# Example G3 configuration using Ollama provider
# Copy this to ~/.config/g3/config.toml or ./g3.toml to use it
[providers]
default_provider = "ollama"
# Ollama configuration (local LLM)
[providers.ollama]
model = "llama3.2" # or qwen2.5, mistral, etc.
# base_url = "http://localhost:11434" # Optional, defaults to localhost
# max_tokens = 2048 # Optional
# temperature = 0.7 # Optional
# Optional: Specify different providers for coach and player in autonomous mode
# coach = "ollama" # Provider for coach (code reviewer)
# player = "ollama" # Provider for player (code implementer)
[agent]
max_context_length = 8192
enable_streaming = true
timeout_seconds = 60
[computer_control]
enabled = false # Set to true to enable computer control (requires OS permissions)
require_confirmation = true
max_actions_per_second = 5