diff --git a/Cargo.lock b/Cargo.lock index 6e41156..93c3eee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -720,6 +720,7 @@ version = "0.1.0" dependencies = [ "anyhow", "config", + "dirs 5.0.1", "serde", "shellexpand", "thiserror 1.0.69", diff --git a/crates/g3-config/Cargo.toml b/crates/g3-config/Cargo.toml index bc9e7c1..d818d23 100644 --- a/crates/g3-config/Cargo.toml +++ b/crates/g3-config/Cargo.toml @@ -11,3 +11,4 @@ anyhow = { workspace = true } thiserror = { workspace = true } toml = "0.8" shellexpand = "3.0" +dirs = "5.0" diff --git a/crates/g3-config/src/lib.rs b/crates/g3-config/src/lib.rs index 5b3596f..46856f9 100644 --- a/crates/g3-config/src/lib.rs +++ b/crates/g3-config/src/lib.rs @@ -71,6 +71,50 @@ impl Default for Config { impl Config { pub fn load(config_path: Option<&str>) -> Result { + // Check if any config file exists + let config_exists = if let Some(path) = config_path { + Path::new(path).exists() + } else { + // Check default locations + let default_paths = [ + "./g3.toml", + "~/.config/g3/config.toml", + "~/.g3.toml", + ]; + + default_paths.iter().any(|path| { + let expanded_path = shellexpand::tilde(path); + Path::new(expanded_path.as_ref()).exists() + }) + }; + + // If no config exists, create and save a default Qwen config + if !config_exists { + let qwen_config = Self::default_qwen_config(); + + // Save to default location + let config_dir = dirs::home_dir() + .map(|mut path| { + path.push(".config"); + path.push("g3"); + path + }) + .unwrap_or_else(|| std::path::PathBuf::from(".")); + + // Create directory if it doesn't exist + std::fs::create_dir_all(&config_dir).ok(); + + let config_file = config_dir.join("config.toml"); + if let Err(e) = qwen_config.save(config_file.to_str().unwrap()) { + eprintln!("Warning: Could not save default config: {}", e); + } else { + println!("Created default Qwen configuration at: {}", config_file.display()); + } + + return Ok(qwen_config); + } + + // Existing config loading logic let mut settings = config::Config::builder(); // Load default configuration @@ -108,6 +152,30 @@ impl Config { Ok(config) } + fn default_qwen_config() -> Self { + Self { + providers: ProvidersConfig { + openai: None, + anthropic: None, + embedded: Some(EmbeddedConfig { + model_path: "~/.cache/g3/models/qwen2.5-7b-instruct-q3_k_m.gguf".to_string(), + model_type: "qwen".to_string(), + context_length: Some(32768), // Qwen2.5 supports 32k context + max_tokens: Some(2048), + temperature: Some(0.1), + gpu_layers: Some(32), + threads: Some(8), + }), + default_provider: "embedded".to_string(), + }, + agent: AgentConfig { + max_context_length: 8192, + enable_streaming: true, + timeout_seconds: 60, + }, + } + } + pub fn save(&self, path: &str) -> Result<()> { let toml_string = toml::to_string_pretty(self)?; std::fs::write(path, toml_string)?; diff --git a/g3_session_hello_test_a63e133911916b31.json b/g3_session_hello_test_a63e133911916b31.json new file mode 100644 index 0000000..d82cba3 --- /dev/null +++ b/g3_session_hello_test_a63e133911916b31.json @@ -0,0 +1,32 @@ +{ + "context_window": { + "conversation_history": [ + { + "content": "You are G3, a general-purpose AI agent. Your goal is to analyze and solve problems by writing code.\n\n# Tool Call Format\n\nWhen you need to execute a tool, write ONLY the JSON tool call on a new line:\n\n{\"tool\": \"tool_name\", \"args\": {\"param\": \"value\"}}\n\nThe tool will execute immediately and you'll receive the result (success or error) to continue with.\n\n# Available Tools\n\n- **shell**: Execute shell commands\n - Format: {\"tool\": \"shell\", \"args\": {\"command\": \"your_command_here\"}}\n - Example: {\"tool\": \"shell\", \"args\": {\"command\": \"ls ~/Downloads\"}}\n\n- **final_output**: Signal task completion with a detailed summary of work done in markdown format\n - Format: {\"tool\": \"final_output\", \"args\": {\"summary\": \"what_was_accomplished\"}}\n\n# Instructions\n\n1. Analyze the request and break down into smaller tasks if appropriate\n2. Execute ONE tool at a time\n3. STOP when the original request was satisfied\n4. Call the final_output tool when done\n\n# Response Guidelines\n\n- Use Markdown formatting for all responses except tool calls.\n- Whenever taking actions, use the pronoun 'I'\n\n", + "role": "system" + }, + { + "content": "Task: hello test", + "role": "user" + }, + { + "content": "I will start by executing a simple task as requested. Let's print \"Hello, Test\" to confirm the task is understood and can be executed.\n\n{\"tool\": \"shell\", \"args\": {\"command\":\"echo Hello, Test\"}}", + "role": "assistant" + }, + { + "content": "Tool result: Hello, Test", + "role": "user" + }, + { + "content": "I will start by executing a simple task as requested. Let's print \"Hello, Test\" to confirm the task is understood and can be executed.\n\nTool executed: shell -> Hello, Test\n\n{\"tool\": \"final_output\", \"args\": {\"summary\": \"# Task Accomplished\n\nThe task 'hello test' was successfully executed. The output was:\n\n```\nHello, Test\n```\n\nThis confirms that the request was understood and completed as intended.\"}}", + "role": "assistant" + } + ], + "percentage_used": 0.9185791015625, + "total_tokens": 32768, + "used_tokens": 301 + }, + "session_id": "hello_test_a63e133911916b31", + "status": "completed", + "timestamp": 1757966669 +} \ No newline at end of file