Compare commits
28 Commits
micn/testi
...
jochen_wri
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f6592efc2 | ||
|
|
99125fc39e | ||
|
|
c58aa80932 | ||
|
|
c6c35bf2ca | ||
|
|
c9fde4ecef | ||
|
|
1e1702001c | ||
|
|
c419833ddf | ||
|
|
c19127f809 | ||
|
|
bd29addefa | ||
|
|
467e300ec2 | ||
|
|
2e252cd298 | ||
|
|
ad198a8501 | ||
|
|
f501751bdf | ||
|
|
a96a15d1fc | ||
|
|
24dc7ad642 | ||
|
|
a097c3abef | ||
|
|
34e55050b3 | ||
|
|
551a577ee1 | ||
|
|
84718223bc | ||
|
|
28a83d2dcf | ||
|
|
0ce905dc74 | ||
|
|
9f0d5add1e | ||
|
|
be6c6bfca4 | ||
|
|
94a41c5c34 | ||
|
|
09dbad2d68 | ||
|
|
ffbf410b17 | ||
|
|
c6f3f12b71 | ||
|
|
e556f06b15 |
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -1365,11 +1365,15 @@ dependencies = [
|
||||
"dirs 5.0.1",
|
||||
"g3-config",
|
||||
"g3-core",
|
||||
"g3-planner",
|
||||
"g3-providers",
|
||||
"hex",
|
||||
"indicatif",
|
||||
"ratatui",
|
||||
"rustyline",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"termimad",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -1409,6 +1413,7 @@ dependencies = [
|
||||
"config",
|
||||
"dirs 5.0.1",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shellexpand",
|
||||
"tempfile",
|
||||
"thiserror 1.0.69",
|
||||
@@ -1496,6 +1501,19 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "g3-planner"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
"const_format",
|
||||
"g3-providers",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "g3-providers"
|
||||
version = "0.1.0"
|
||||
@@ -1652,6 +1670,12 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "home"
|
||||
version = "0.5.9"
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
members = [
|
||||
"crates/g3-cli",
|
||||
"crates/g3-core",
|
||||
"crates/g3-planner",
|
||||
"crates/g3-providers",
|
||||
"crates/g3-config",
|
||||
"crates/g3-execution",
|
||||
|
||||
@@ -34,3 +34,4 @@ temperature = 0.3 # Slightly higher temperature for more creative implementatio
|
||||
fallback_default_max_tokens = 8192
|
||||
enable_streaming = true
|
||||
timeout_seconds = 60
|
||||
allow_multiple_tool_calls = true # Enable multiple tool calls, will usually only work with Anthropic
|
||||
@@ -57,6 +57,7 @@ timeout_seconds = 60
|
||||
# Retry configuration for recoverable errors (timeouts, rate limits, etc.)
|
||||
max_retry_attempts = 3 # Default mode retry attempts
|
||||
autonomous_max_retry_attempts = 6 # Autonomous mode retry attempts (higher for long-running tasks)
|
||||
allow_multiple_tool_calls = true # Enable multiple tool calls
|
||||
|
||||
[computer_control]
|
||||
enabled = false # Set to true to enable computer control (requires OS permissions)
|
||||
|
||||
@@ -7,6 +7,8 @@ description = "CLI interface for G3 AI coding agent"
|
||||
[dependencies]
|
||||
g3-core = { path = "../g3-core" }
|
||||
g3-config = { path = "../g3-config" }
|
||||
g3-planner = { path = "../g3-planner" }
|
||||
g3-providers = { path = "../g3-providers" }
|
||||
clap = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
@@ -17,6 +19,8 @@ serde_json = { workspace = true }
|
||||
rustyline = "17.0.1"
|
||||
dirs = "5.0"
|
||||
tokio-util = "0.7"
|
||||
sha2 = "0.10"
|
||||
hex = "0.4"
|
||||
indicatif = "0.17"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
crossterm = "0.29.0"
|
||||
|
||||
@@ -98,6 +98,25 @@ fn generate_turn_histogram(turn_metrics: &[TurnMetrics]) -> String {
|
||||
histogram
|
||||
}
|
||||
|
||||
/// Format a Duration as human-readable elapsed time (e.g., "1h 23m 45s", "5m 30s", "45s")
|
||||
fn format_elapsed_time(duration: Duration) -> String {
|
||||
let total_secs = duration.as_secs();
|
||||
let hours = total_secs / 3600;
|
||||
let minutes = (total_secs % 3600) / 60;
|
||||
let seconds = total_secs % 60;
|
||||
|
||||
if hours > 0 {
|
||||
format!("{}h {}m {}s", hours, minutes, seconds)
|
||||
} else if minutes > 0 {
|
||||
format!("{}m {}s", minutes, seconds)
|
||||
} else if seconds > 0 {
|
||||
format!("{}s", seconds)
|
||||
} else {
|
||||
// For very short durations, show milliseconds
|
||||
format!("{}ms", duration.as_millis())
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract coach feedback by reading from the coach agent's specific log file
|
||||
/// Uses the coach agent's session ID to find the exact log file
|
||||
fn extract_coach_feedback_from_logs(
|
||||
@@ -159,11 +178,12 @@ fn extract_coach_feedback_from_logs(
|
||||
|
||||
use clap::Parser;
|
||||
use g3_config::Config;
|
||||
use g3_core::{project::Project, ui_writer::UiWriter, Agent};
|
||||
use g3_core::{project::Project, ui_writer::UiWriter, Agent, DiscoveryOptions};
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::DefaultEditor;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use sha2::{Digest, Sha256};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
@@ -246,6 +266,10 @@ pub struct Cli {
|
||||
/// Enable WebDriver browser automation tools
|
||||
#[arg(long)]
|
||||
pub webdriver: bool,
|
||||
|
||||
/// Enable fast codebase discovery before first LLM turn
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub codebase_fast_start: Option<PathBuf>,
|
||||
}
|
||||
|
||||
pub async fn run() -> Result<()> {
|
||||
@@ -675,6 +699,7 @@ async fn run_accumulative_mode(
|
||||
cli.show_code,
|
||||
cli.max_turns,
|
||||
cli.quiet,
|
||||
cli.codebase_fast_start.clone(),
|
||||
) => result,
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
output.print("\n⚠️ Autonomous run cancelled by user (Ctrl+C)");
|
||||
@@ -726,6 +751,7 @@ async fn run_autonomous_machine(
|
||||
show_code: bool,
|
||||
max_turns: usize,
|
||||
_quiet: bool,
|
||||
_codebase_fast_start: Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
println!("AUTONOMOUS_MODE_STARTED");
|
||||
println!("WORKSPACE: {}", project.workspace().display());
|
||||
@@ -756,7 +782,7 @@ async fn run_autonomous_machine(
|
||||
);
|
||||
|
||||
println!("TASK_START");
|
||||
let result = agent.execute_task_with_timing(&task, None, false, show_prompt, show_code, true).await?;
|
||||
let result = agent.execute_task_with_timing(&task, None, false, show_prompt, show_code, true, None).await?;
|
||||
println!("AGENT_RESPONSE:");
|
||||
println!("{}", result.response);
|
||||
println!("END_AGENT_RESPONSE");
|
||||
@@ -783,13 +809,14 @@ async fn run_with_console_mode(
|
||||
cli.show_code,
|
||||
cli.max_turns,
|
||||
cli.quiet,
|
||||
cli.codebase_fast_start.clone(),
|
||||
)
|
||||
.await?;
|
||||
} else if let Some(task) = cli.task {
|
||||
// Single-shot mode
|
||||
let output = SimpleOutput::new();
|
||||
let result = agent
|
||||
.execute_task_with_timing(&task, None, false, cli.show_prompt, cli.show_code, true)
|
||||
.execute_task_with_timing(&task, None, false, cli.show_prompt, cli.show_code, true, None)
|
||||
.await?;
|
||||
output.print_smart(&result.response);
|
||||
} else {
|
||||
@@ -814,12 +841,13 @@ async fn run_with_machine_mode(
|
||||
cli.show_code,
|
||||
cli.max_turns,
|
||||
cli.quiet,
|
||||
cli.codebase_fast_start.clone(),
|
||||
)
|
||||
.await?;
|
||||
} else if let Some(task) = cli.task {
|
||||
// Single-shot mode
|
||||
let result = agent
|
||||
.execute_task_with_timing(&task, None, false, cli.show_prompt, cli.show_code, true)
|
||||
.execute_task_with_timing(&task, None, false, cli.show_prompt, cli.show_code, true, None)
|
||||
.await?;
|
||||
println!("AGENT_RESPONSE:");
|
||||
println!("{}", result.response);
|
||||
@@ -1211,7 +1239,7 @@ async fn execute_task<W: UiWriter>(
|
||||
// Execute task with cancellation support
|
||||
let execution_result = tokio::select! {
|
||||
result = agent.execute_task_with_timing_cancellable(
|
||||
input, None, false, show_prompt, show_code, true, cancellation_token.clone()
|
||||
input, None, false, show_prompt, show_code, true, cancellation_token.clone(), None
|
||||
) => {
|
||||
result
|
||||
}
|
||||
@@ -1402,7 +1430,7 @@ async fn execute_task_machine(
|
||||
// Execute task with cancellation support
|
||||
let execution_result = tokio::select! {
|
||||
result = agent.execute_task_with_timing_cancellable(
|
||||
input, None, false, show_prompt, show_code, true, cancellation_token.clone()
|
||||
input, None, false, show_prompt, show_code, true, cancellation_token.clone(), None
|
||||
) => {
|
||||
result
|
||||
}
|
||||
@@ -1551,6 +1579,7 @@ async fn run_autonomous(
|
||||
show_code: bool,
|
||||
max_turns: usize,
|
||||
quiet: bool,
|
||||
codebase_fast_start: Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let start_time = std::time::Instant::now();
|
||||
let output = SimpleOutput::new();
|
||||
@@ -1660,17 +1689,52 @@ async fn run_autonomous(
|
||||
} else {
|
||||
output.print("📋 Requirements loaded from requirements.md");
|
||||
}
|
||||
|
||||
// Calculate SHA256 of requirements
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(requirements.as_bytes());
|
||||
let requirements_sha = hex::encode(hasher.finalize());
|
||||
|
||||
output.print(&format!("🔒 Requirements SHA256: {}", requirements_sha));
|
||||
|
||||
// Pass SHA to agent for staleness checking
|
||||
agent.set_requirements_sha(requirements_sha.clone());
|
||||
|
||||
let loop_start = Instant::now();
|
||||
output.print("🔄 Starting coach-player feedback loop...");
|
||||
|
||||
// Check if implementation files already exist
|
||||
let skip_first_player = project.has_implementation_files();
|
||||
if skip_first_player {
|
||||
output.print("📂 Detected existing implementation files in workspace");
|
||||
output.print("⏭️ Skipping first player turn - proceeding directly to coach review");
|
||||
} else {
|
||||
output.print("📂 No existing implementation files detected");
|
||||
output.print("🎯 Starting with player implementation");
|
||||
// Load fast-discovery messages before the loop starts (if enabled)
|
||||
let (discovery_messages, discovery_working_dir): (Vec<g3_providers::Message>, Option<String>) =
|
||||
if let Some(ref codebase_path) = codebase_fast_start {
|
||||
// Canonicalize the path to ensure it's absolute
|
||||
let canonical_path = codebase_path.canonicalize().unwrap_or_else(|_| codebase_path.clone());
|
||||
let path_str = canonical_path.to_string_lossy();
|
||||
output.print(&format!("🔍 Fast-discovery mode: will explore codebase at {}", path_str));
|
||||
// Get the provider from the agent and use async LLM-based discovery
|
||||
match agent.get_provider() {
|
||||
Ok(provider) => {
|
||||
// Create a status callback that prints to output
|
||||
let output_clone = output.clone();
|
||||
let status_callback: g3_planner::StatusCallback = Box::new(move |msg: &str| {
|
||||
output_clone.print(msg);
|
||||
});
|
||||
match g3_planner::get_initial_discovery_messages(&path_str, Some(&requirements), provider, Some(&status_callback)).await {
|
||||
Ok(messages) => (messages, Some(path_str.to_string())),
|
||||
Err(e) => {
|
||||
output.print(&format!("⚠️ LLM discovery failed: {}, skipping fast-start", e));
|
||||
(Vec::new(), None)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
output.print(&format!("⚠️ Could not get provider: {}, skipping fast-start", e));
|
||||
(Vec::new(), None)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(Vec::new(), None)
|
||||
};
|
||||
let has_discovery = !discovery_messages.is_empty();
|
||||
|
||||
let mut turn = 1;
|
||||
let mut coach_feedback = String::new();
|
||||
@@ -1679,8 +1743,7 @@ async fn run_autonomous(
|
||||
loop {
|
||||
let turn_start_time = Instant::now();
|
||||
let turn_start_tokens = agent.get_context_window().used_tokens;
|
||||
// Skip player turn if it's the first turn and implementation files exist
|
||||
if !(turn == 1 && skip_first_player) {
|
||||
|
||||
output.print(&format!(
|
||||
"\n=== TURN {}/{} - PLAYER MODE ===",
|
||||
turn, max_turns
|
||||
@@ -1692,8 +1755,8 @@ async fn run_autonomous(
|
||||
// Player mode: implement requirements (with coach feedback if available)
|
||||
let player_prompt = if coach_feedback.is_empty() {
|
||||
format!(
|
||||
"You are G3 in implementation mode. Read and implement the following requirements:\n\n{}\n\nImplement this step by step, creating all necessary files and code.",
|
||||
requirements
|
||||
"You are G3 in implementation mode. Read and implement the following requirements:\n\n{}\n\nRequirements SHA256: {}\n\nImplement this step by step, creating all necessary files and code.",
|
||||
requirements, requirements_sha
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
@@ -1702,7 +1765,7 @@ async fn run_autonomous(
|
||||
)
|
||||
};
|
||||
|
||||
output.print("🎯 Starting player implementation...");
|
||||
output.print(&format!("🎯 Starting player implementation... (elapsed: {})", format_elapsed_time(loop_start.elapsed())));
|
||||
|
||||
// Display what feedback the player is receiving
|
||||
// If there's no coach feedback on subsequent turns, this is an error
|
||||
@@ -1737,6 +1800,12 @@ async fn run_autonomous(
|
||||
show_prompt,
|
||||
show_code,
|
||||
true,
|
||||
if has_discovery {
|
||||
Some(DiscoveryOptions {
|
||||
messages: &discovery_messages,
|
||||
fast_start_path: discovery_working_dir.as_deref(),
|
||||
})
|
||||
} else { None },
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -1868,7 +1937,6 @@ async fn run_autonomous(
|
||||
|
||||
// Give some time for file operations to complete
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
|
||||
}
|
||||
|
||||
// Create a new agent instance for coach mode to ensure fresh context
|
||||
// Use the same config with overrides that was passed to the player agent
|
||||
@@ -1924,7 +1992,7 @@ Remember: Be clear in your review and concise in your feedback. APPROVE iff the
|
||||
requirements
|
||||
);
|
||||
|
||||
output.print("🎓 Starting coach review...");
|
||||
output.print(&format!("🎓 Starting coach review... (elapsed: {})", format_elapsed_time(loop_start.elapsed())));
|
||||
|
||||
// Execute coach task with retry on error
|
||||
let mut coach_retry_count = 0;
|
||||
@@ -1934,7 +2002,13 @@ Remember: Be clear in your review and concise in your feedback. APPROVE iff the
|
||||
|
||||
loop {
|
||||
match coach_agent
|
||||
.execute_task_with_timing(&coach_prompt, None, false, show_prompt, show_code, true)
|
||||
.execute_task_with_timing(&coach_prompt, None, false, show_prompt, show_code, true,
|
||||
if has_discovery {
|
||||
Some(DiscoveryOptions {
|
||||
messages: &discovery_messages,
|
||||
fast_start_path: discovery_working_dir.as_deref(),
|
||||
})
|
||||
} else { None })
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
@@ -2164,9 +2238,9 @@ Remember: Be clear in your review and concise in your feedback. APPROVE iff the
|
||||
output.print(&"=".repeat(60));
|
||||
|
||||
if implementation_approved {
|
||||
output.print("\n🎉 Autonomous mode completed successfully");
|
||||
output.print(&format!("\n🎉 Autonomous mode completed successfully (total loop time: {})", format_elapsed_time(loop_start.elapsed())));
|
||||
} else {
|
||||
output.print("\n🔄 Autonomous mode terminated (max iterations)");
|
||||
output.print(&format!("\n🔄 Autonomous mode terminated (max iterations) (total loop time: {})", format_elapsed_time(loop_start.elapsed())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -91,4 +91,18 @@ impl UiWriter for MachineUiWriter {
|
||||
fn wants_full_output(&self) -> bool {
|
||||
true // Machine mode wants complete, untruncated output
|
||||
}
|
||||
|
||||
fn prompt_user_yes_no(&self, message: &str) -> bool {
|
||||
// In machine mode, we can't interactively prompt, so we log the request and return true
|
||||
// to allow automation to proceed.
|
||||
println!("PROMPT_USER_YES_NO: {}", message);
|
||||
true
|
||||
}
|
||||
|
||||
fn prompt_user_choice(&self, message: &str, options: &[&str]) -> usize {
|
||||
println!("PROMPT_USER_CHOICE: {}", message);
|
||||
println!("OPTIONS: {:?}", options);
|
||||
// Default to first option (index 0) for automation
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
/// Simple output helper for printing messages
|
||||
#[derive(Clone)]
|
||||
pub struct SimpleOutput {
|
||||
machine_mode: bool,
|
||||
}
|
||||
|
||||
@@ -343,5 +343,40 @@ impl UiWriter for ConsoleUiWriter {
|
||||
fn flush(&self) {
|
||||
let _ = io::stdout().flush();
|
||||
}
|
||||
|
||||
fn prompt_user_yes_no(&self, message: &str) -> bool {
|
||||
print!("{} [y/N] ", message);
|
||||
let _ = io::stdout().flush();
|
||||
|
||||
let mut input = String::new();
|
||||
if io::stdin().read_line(&mut input).is_ok() {
|
||||
let trimmed = input.trim().to_lowercase();
|
||||
trimmed == "y" || trimmed == "yes"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_user_choice(&self, message: &str, options: &[&str]) -> usize {
|
||||
println!("{} ", message);
|
||||
for (i, option) in options.iter().enumerate() {
|
||||
println!(" [{}] {}", i + 1, option);
|
||||
}
|
||||
print!("Select an option (1-{}): ", options.len());
|
||||
let _ = io::stdout().flush();
|
||||
|
||||
loop {
|
||||
let mut input = String::new();
|
||||
if io::stdin().read_line(&mut input).is_ok() {
|
||||
if let Ok(choice) = input.trim().parse::<usize>() {
|
||||
if choice > 0 && choice <= options.len() {
|
||||
return choice - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
print!("Invalid choice. Please select (1-{}): ", options.len());
|
||||
let _ = io::stdout().flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,11 +36,20 @@ fn main() {
|
||||
// Copy the dylib to the output directory so it can be found at runtime
|
||||
let target_dir = manifest_dir.parent().unwrap().parent().unwrap().join("target");
|
||||
let profile = env::var("PROFILE").unwrap_or_else(|_| "debug".to_string());
|
||||
let output_dir = target_dir.join(&profile);
|
||||
|
||||
// Determine the actual target directory (could be llvm-cov-target or regular target)
|
||||
let target_dir_name = env::var("CARGO_TARGET_DIR")
|
||||
.unwrap_or_else(|_| target_dir.to_string_lossy().to_string());
|
||||
let actual_target_dir = PathBuf::from(&target_dir_name);
|
||||
let output_dir = actual_target_dir.join(&profile);
|
||||
|
||||
let dylib_src = lib_path.join("libVisionBridge.dylib");
|
||||
let dylib_dst = output_dir.join("libVisionBridge.dylib");
|
||||
|
||||
// Create output directory if it doesn't exist
|
||||
std::fs::create_dir_all(&output_dir)
|
||||
.expect(&format!("Failed to create output directory {}", output_dir.display()));
|
||||
|
||||
std::fs::copy(&dylib_src, &dylib_dst)
|
||||
.expect(&format!("Failed to copy dylib from {} to {}", dylib_src.display(), dylib_dst.display()));
|
||||
|
||||
|
||||
@@ -15,3 +15,4 @@ dirs = "5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.8"
|
||||
serde_json = { workspace = true }
|
||||
|
||||
@@ -70,10 +70,17 @@ pub struct AgentConfig {
|
||||
pub max_context_length: Option<u32>,
|
||||
pub fallback_default_max_tokens: usize,
|
||||
pub enable_streaming: bool,
|
||||
pub allow_multiple_tool_calls: bool,
|
||||
pub timeout_seconds: u64,
|
||||
pub auto_compact: bool,
|
||||
pub max_retry_attempts: u32,
|
||||
pub autonomous_max_retry_attempts: u32,
|
||||
#[serde(default = "default_check_todo_staleness")]
|
||||
pub check_todo_staleness: bool,
|
||||
}
|
||||
|
||||
fn default_check_todo_staleness() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -145,10 +152,12 @@ impl Default for Config {
|
||||
max_context_length: None,
|
||||
fallback_default_max_tokens: 8192,
|
||||
enable_streaming: true,
|
||||
allow_multiple_tool_calls: false,
|
||||
timeout_seconds: 60,
|
||||
auto_compact: true,
|
||||
max_retry_attempts: 3,
|
||||
autonomous_max_retry_attempts: 6,
|
||||
check_todo_staleness: true,
|
||||
},
|
||||
computer_control: ComputerControlConfig::default(),
|
||||
webdriver: WebDriverConfig::default(),
|
||||
@@ -265,10 +274,12 @@ impl Config {
|
||||
max_context_length: None,
|
||||
fallback_default_max_tokens: 8192,
|
||||
enable_streaming: true,
|
||||
allow_multiple_tool_calls: false,
|
||||
timeout_seconds: 60,
|
||||
auto_compact: true,
|
||||
max_retry_attempts: 3,
|
||||
autonomous_max_retry_attempts: 6,
|
||||
check_todo_staleness: true,
|
||||
},
|
||||
computer_control: ComputerControlConfig::default(),
|
||||
webdriver: WebDriverConfig::default(),
|
||||
|
||||
40
crates/g3-config/tests/test_multiple_tool_calls.rs
Normal file
40
crates/g3-config/tests/test_multiple_tool_calls.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
#[cfg(test)]
|
||||
mod test_multiple_tool_calls {
|
||||
use g3_config::{Config, AgentConfig};
|
||||
|
||||
#[test]
|
||||
fn test_config_has_multiple_tool_calls_field() {
|
||||
let config = Config::default();
|
||||
|
||||
// Test that the field exists and defaults to false
|
||||
assert_eq!(config.agent.allow_multiple_tool_calls, false);
|
||||
|
||||
// Test that we can create a config with the field set to true
|
||||
let mut custom_config = Config::default();
|
||||
custom_config.agent.allow_multiple_tool_calls = true;
|
||||
assert_eq!(custom_config.agent.allow_multiple_tool_calls, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_agent_config_serialization() {
|
||||
let agent_config = AgentConfig {
|
||||
max_context_length: Some(100000),
|
||||
fallback_default_max_tokens: 8192,
|
||||
enable_streaming: true,
|
||||
allow_multiple_tool_calls: true,
|
||||
timeout_seconds: 60,
|
||||
auto_compact: true,
|
||||
max_retry_attempts: 3,
|
||||
autonomous_max_retry_attempts: 6,
|
||||
check_todo_staleness: true,
|
||||
};
|
||||
|
||||
// Test serialization
|
||||
let json = serde_json::to_string(&agent_config).unwrap();
|
||||
assert!(json.contains("\"allow_multiple_tool_calls\":true"));
|
||||
|
||||
// Test deserialization
|
||||
let deserialized: AgentConfig = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(deserialized.allow_multiple_tool_calls, true);
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,9 @@ authors = ["G3 Team"]
|
||||
description = "Web console for monitoring and managing g3 instances"
|
||||
license = "MIT"
|
||||
|
||||
[lib]
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "g3-console"
|
||||
path = "src/main.rs"
|
||||
|
||||
5
crates/g3-console/src/lib.rs
Normal file
5
crates/g3-console/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod api;
|
||||
pub mod logs;
|
||||
pub mod models;
|
||||
pub mod process;
|
||||
pub mod launch;
|
||||
@@ -1,8 +1,6 @@
|
||||
mod api;
|
||||
mod logs;
|
||||
mod models;
|
||||
mod process;
|
||||
mod launch;
|
||||
use g3_console::api;
|
||||
use g3_console::process;
|
||||
use g3_console::launch;
|
||||
|
||||
use api::control::{kill_instance, launch_instance, restart_instance};
|
||||
use api::instances::{get_instance, get_file_content, list_instances};
|
||||
|
||||
@@ -48,7 +48,7 @@ pub async fn another_async(x: i32) -> Result<(), ()> {
|
||||
println!("{}\n", "=".repeat(80));
|
||||
|
||||
let mut parser = Parser::new();
|
||||
let language: Language = tree_sitter_rust::language().into();
|
||||
let language: Language = tree_sitter_rust::LANGUAGE.into();
|
||||
parser.set_language(&language)?;
|
||||
|
||||
let tree = parser.parse(source_code, None).unwrap();
|
||||
|
||||
@@ -46,7 +46,7 @@ class MyClass:
|
||||
println!("{}\n", "=".repeat(80));
|
||||
|
||||
let mut parser = Parser::new();
|
||||
let language: Language = tree_sitter_python::language().into();
|
||||
let language: Language = tree_sitter_python::LANGUAGE.into();
|
||||
parser.set_language(&language)?;
|
||||
|
||||
let tree = parser.parse(source_code, None).unwrap();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//! Test Python async query
|
||||
|
||||
use tree_sitter::{Parser, Query, QueryCursor, Language};
|
||||
use streaming_iterator::StreamingIterator;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let source_code = r#"
|
||||
@@ -12,7 +13,7 @@ async def async_function():
|
||||
"#;
|
||||
|
||||
let mut parser = Parser::new();
|
||||
let language: Language = tree_sitter_python::language().into();
|
||||
let language: Language = tree_sitter_python::LANGUAGE.into();
|
||||
parser.set_language(&language)?;
|
||||
|
||||
let tree = parser.parse(source_code, None).unwrap();
|
||||
|
||||
@@ -27,14 +27,18 @@ use g3_computer_control::WebDriverController;
|
||||
use g3_config::Config;
|
||||
use g3_execution::CodeExecutor;
|
||||
use g3_providers::{CacheControl, CompletionRequest, Message, MessageRole, ProviderRegistry, Tool};
|
||||
use chrono::Local;
|
||||
#[allow(unused_imports)]
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use prompts::{SYSTEM_PROMPT_FOR_NON_NATIVE_TOOL_USE, SYSTEM_PROMPT_FOR_NATIVE_TOOL_USE};
|
||||
use prompts::{SYSTEM_PROMPT_FOR_NON_NATIVE_TOOL_USE, get_system_prompt_for_native};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ToolCall {
|
||||
@@ -42,6 +46,13 @@ pub struct ToolCall {
|
||||
pub args: serde_json::Value, // Should be a JSON object with tool-specific arguments
|
||||
}
|
||||
|
||||
/// Options for fast-start discovery execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscoveryOptions<'a> {
|
||||
pub messages: &'a [Message],
|
||||
pub fast_start_path: Option<&'a str>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum StreamState {
|
||||
Generating,
|
||||
@@ -755,6 +766,9 @@ pub struct Agent<W: UiWriter> {
|
||||
macax_controller:
|
||||
std::sync::Arc<tokio::sync::RwLock<Option<g3_computer_control::MacAxController>>>,
|
||||
tool_call_count: usize,
|
||||
requirements_sha: Option<String>,
|
||||
/// Working directory for tool execution (set by --codebase-fast-start)
|
||||
working_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl<W: UiWriter> Agent<W> {
|
||||
@@ -958,7 +972,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
|
||||
let system_prompt = if provider_has_native_tool_calling {
|
||||
// For native tool calling providers, use a more explicit system prompt
|
||||
SYSTEM_PROMPT_FOR_NATIVE_TOOL_USE.to_string()
|
||||
get_system_prompt_for_native(config.agent.allow_multiple_tool_calls)
|
||||
} else {
|
||||
// For non-native providers (embedded models), use JSON format instructions
|
||||
SYSTEM_PROMPT_FOR_NON_NATIVE_TOOL_USE.to_string()
|
||||
@@ -1026,6 +1040,8 @@ impl<W: UiWriter> Agent<W> {
|
||||
}))
|
||||
},
|
||||
tool_call_count: 0,
|
||||
requirements_sha: None,
|
||||
working_dir: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1214,11 +1230,73 @@ impl<W: UiWriter> Agent<W> {
|
||||
Ok(context_length)
|
||||
}
|
||||
|
||||
fn tool_log_handle() -> Option<&'static Mutex<std::fs::File>> {
|
||||
static TOOL_LOG: OnceLock<Option<Mutex<std::fs::File>>> = OnceLock::new();
|
||||
|
||||
TOOL_LOG
|
||||
.get_or_init(|| {
|
||||
if let Err(e) = std::fs::create_dir_all("logs") {
|
||||
error!("Failed to create logs directory for tool log: {}", e);
|
||||
return None;
|
||||
}
|
||||
|
||||
let ts = Local::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
let path = format!("logs/tool_calls_{}.log", ts);
|
||||
|
||||
match OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&path)
|
||||
{
|
||||
Ok(file) => Some(Mutex::new(file)),
|
||||
Err(e) => {
|
||||
error!("Failed to open tool log file {}: {}", path, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
fn log_tool_call(&self, tool_call: &ToolCall, response: &str) {
|
||||
if let Some(handle) = Self::tool_log_handle() {
|
||||
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
|
||||
let args_str = serde_json::to_string(&tool_call.args)
|
||||
.unwrap_or_else(|_| "<unserializable>".to_string());
|
||||
|
||||
fn sanitize(s: &str) -> String {
|
||||
s.replace('\n', "\\n")
|
||||
}
|
||||
fn truncate(s: &str, limit: usize) -> String {
|
||||
s.chars().take(limit).collect()
|
||||
}
|
||||
|
||||
let args_snippet = truncate(&sanitize(&args_str), 80);
|
||||
let response_snippet = truncate(&sanitize(response), 80);
|
||||
|
||||
let tool_field = format!("{:<15}", tool_call.tool);
|
||||
let line = format!(
|
||||
"{} {} {} 🟩 {}\n",
|
||||
timestamp, tool_field, args_snippet, response_snippet
|
||||
);
|
||||
|
||||
if let Ok(mut file) = handle.lock() {
|
||||
let _ = file.write_all(line.as_bytes());
|
||||
let _ = file.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_provider_info(&self) -> Result<(String, String)> {
|
||||
let provider = self.providers.get(None)?;
|
||||
Ok((provider.name().to_string(), provider.model().to_string()))
|
||||
}
|
||||
|
||||
/// Get the default LLM provider
|
||||
pub fn get_provider(&self) -> Result<&dyn g3_providers::LLMProvider> {
|
||||
self.providers.get(None)
|
||||
}
|
||||
|
||||
/// Get the current session ID for this agent
|
||||
pub fn get_session_id(&self) -> Option<&str> {
|
||||
self.session_id.as_deref()
|
||||
@@ -1230,7 +1308,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
language: Option<&str>,
|
||||
_auto_execute: bool,
|
||||
) -> Result<TaskResult> {
|
||||
self.execute_task_with_options(description, language, false, false, false)
|
||||
self.execute_task_with_options(description, language, false, false, false, None)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -1241,6 +1319,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
_auto_execute: bool,
|
||||
show_prompt: bool,
|
||||
show_code: bool,
|
||||
discovery_options: Option<DiscoveryOptions<'_>>,
|
||||
) -> Result<TaskResult> {
|
||||
self.execute_task_with_timing(
|
||||
description,
|
||||
@@ -1249,6 +1328,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
show_prompt,
|
||||
show_code,
|
||||
false,
|
||||
discovery_options,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -1261,6 +1341,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
show_prompt: bool,
|
||||
show_code: bool,
|
||||
show_timing: bool,
|
||||
discovery_options: Option<DiscoveryOptions<'_>>,
|
||||
) -> Result<TaskResult> {
|
||||
// Create a cancellation token that never cancels for backward compatibility
|
||||
let cancellation_token = CancellationToken::new();
|
||||
@@ -1272,6 +1353,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
show_code,
|
||||
show_timing,
|
||||
cancellation_token,
|
||||
discovery_options,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -1286,6 +1368,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
show_code: bool,
|
||||
show_timing: bool,
|
||||
cancellation_token: CancellationToken,
|
||||
discovery_options: Option<DiscoveryOptions<'_>>,
|
||||
) -> Result<TaskResult> {
|
||||
// Execute the task directly without splitting
|
||||
self.execute_single_task(
|
||||
@@ -1294,6 +1377,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
show_code,
|
||||
show_timing,
|
||||
cancellation_token,
|
||||
discovery_options,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -1305,6 +1389,7 @@ impl<W: UiWriter> Agent<W> {
|
||||
_show_code: bool,
|
||||
show_timing: bool,
|
||||
cancellation_token: CancellationToken,
|
||||
discovery_options: Option<DiscoveryOptions<'_>>,
|
||||
) -> Result<TaskResult> {
|
||||
// Reset the JSON tool call filter state at the start of each new task
|
||||
// This prevents the filter from staying in suppression mode between user interactions
|
||||
@@ -1322,6 +1407,39 @@ impl<W: UiWriter> Agent<W> {
|
||||
let user_message = Message::new(MessageRole::User, format!("Task: {}", description));
|
||||
self.context_window.add_message(user_message);
|
||||
|
||||
// Execute fast-discovery tool calls if provided (immediately after user message)
|
||||
if let Some(ref options) = discovery_options {
|
||||
self.ui_writer.println("▶️ Playing back discovery commands...");
|
||||
// Store the working directory for subsequent tool calls in the streaming loop
|
||||
if let Some(path) = options.fast_start_path {
|
||||
self.working_dir = Some(path.to_string());
|
||||
}
|
||||
let provider = self.providers.get(None)?;
|
||||
let supports_cache = provider.supports_cache_control();
|
||||
let message_count = options.messages.len();
|
||||
|
||||
for (idx, discovery_msg) in options.messages.iter().enumerate() {
|
||||
if let Ok(tool_call) = serde_json::from_str::<ToolCall>(&discovery_msg.content) {
|
||||
self.add_message_to_context(discovery_msg.clone());
|
||||
let result = self.execute_tool_call_in_dir(&tool_call, options.fast_start_path).await
|
||||
.unwrap_or_else(|e| format!("Error: {}", e));
|
||||
|
||||
// Add cache_control to the last user message if provider supports it (anthropic)
|
||||
let is_last = idx == message_count - 1;
|
||||
let result_message = if is_last && supports_cache {
|
||||
Message::with_cache_control(
|
||||
MessageRole::User,
|
||||
format!("Tool result: {}", result),
|
||||
CacheControl::ephemeral(),
|
||||
)
|
||||
} else {
|
||||
Message::new(MessageRole::User, format!("Tool result: {}", result))
|
||||
};
|
||||
self.add_message_to_context(result_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use the complete conversation history for the request
|
||||
let messages = self.context_window.conversation_history.clone();
|
||||
|
||||
@@ -1512,6 +1630,24 @@ impl<W: UiWriter> Agent<W> {
|
||||
&self.context_window
|
||||
}
|
||||
|
||||
/// Add a message directly to the context window.
|
||||
/// Used for injecting discovery messages before the first LLM turn.
|
||||
pub fn add_message_to_context(&mut self, message: Message) {
|
||||
self.context_window.add_message(message);
|
||||
}
|
||||
|
||||
/// Execute a tool call and return the result.
|
||||
/// This is a public wrapper around execute_tool for use by external callers
|
||||
/// like the planner's fast-discovery feature.
|
||||
pub async fn execute_tool_call(&mut self, tool_call: &ToolCall) -> Result<String> {
|
||||
self.execute_tool(tool_call).await
|
||||
}
|
||||
|
||||
/// Execute a tool call with an optional working directory (for discovery commands)
|
||||
pub async fn execute_tool_call_in_dir(&mut self, tool_call: &ToolCall, working_dir: Option<&str>) -> Result<String> {
|
||||
self.execute_tool_in_dir(tool_call, working_dir).await
|
||||
}
|
||||
|
||||
/// Log an error message to the session JSON file as the last message
|
||||
/// This is used in autonomous mode to record context length exceeded errors
|
||||
pub fn log_error_to_session(
|
||||
@@ -1918,6 +2054,10 @@ impl<W: UiWriter> Agent<W> {
|
||||
&self.config
|
||||
}
|
||||
|
||||
pub fn set_requirements_sha(&mut self, sha: String) {
|
||||
self.requirements_sha = Some(sha);
|
||||
}
|
||||
|
||||
async fn stream_completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
@@ -2091,6 +2231,15 @@ impl<W: UiWriter> Agent<W> {
|
||||
"required": ["content"]
|
||||
}),
|
||||
},
|
||||
Tool {
|
||||
name: "code_coverage".to_string(),
|
||||
description: "Generate a code coverage report for the entire workspace using cargo llvm-cov. This runs all tests with coverage instrumentation and returns a summary of coverage statistics. Requires llvm-tools-preview and cargo-llvm-cov to be installed (they will be auto-installed if missing).".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": []
|
||||
}),
|
||||
},
|
||||
];
|
||||
|
||||
// Add code_search tool
|
||||
@@ -2729,8 +2878,12 @@ impl<W: UiWriter> Agent<W> {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
|
||||
}
|
||||
|
||||
// Get provider info for logging, then drop it to avoid borrow issues
|
||||
let (provider_name, provider_model) = {
|
||||
let provider = self.providers.get(None)?;
|
||||
debug!("Got provider: {}", provider.name());
|
||||
(provider.name().to_string(), provider.model().to_string())
|
||||
};
|
||||
debug!("Got provider: {}", provider_name);
|
||||
|
||||
// Create error context for detailed logging
|
||||
let last_prompt = request
|
||||
@@ -2743,8 +2896,8 @@ impl<W: UiWriter> Agent<W> {
|
||||
|
||||
let error_context = ErrorContext::new(
|
||||
"stream_completion".to_string(),
|
||||
provider.name().to_string(),
|
||||
provider.model().to_string(),
|
||||
provider_name.clone(),
|
||||
provider_model.clone(),
|
||||
last_prompt,
|
||||
self.session_id.clone(),
|
||||
self.context_window.used_tokens,
|
||||
@@ -2757,8 +2910,8 @@ impl<W: UiWriter> Agent<W> {
|
||||
|
||||
// Log initial request details
|
||||
debug!("Starting stream with provider={}, model={}, messages={}, tools={}, max_tokens={:?}",
|
||||
provider.name(),
|
||||
provider.model(),
|
||||
provider_name,
|
||||
provider_model,
|
||||
request.messages.len(),
|
||||
request.tools.is_some(),
|
||||
request.max_tokens
|
||||
@@ -2848,10 +3001,125 @@ impl<W: UiWriter> Agent<W> {
|
||||
// Process chunk with the new parser
|
||||
let completed_tools = parser.process_chunk(&chunk);
|
||||
|
||||
// Handle completed tool calls
|
||||
if let Some(tool_call) = completed_tools.into_iter().next() {
|
||||
// Handle completed tool calls - process all if multiple calls enabled
|
||||
let tools_to_process: Vec<ToolCall> = if self.config.agent.allow_multiple_tool_calls {
|
||||
completed_tools
|
||||
} else {
|
||||
// Original behavior - only take the first tool
|
||||
completed_tools.into_iter().take(1).collect()
|
||||
};
|
||||
|
||||
// Helper function to check if two tool calls are duplicates
|
||||
let are_duplicates = |tc1: &ToolCall, tc2: &ToolCall| -> bool {
|
||||
tc1.tool == tc2.tool && tc1.args == tc2.args
|
||||
};
|
||||
|
||||
// De-duplicate tool calls and track duplicates
|
||||
let mut seen_in_chunk: Vec<ToolCall> = Vec::new();
|
||||
let mut deduplicated_tools: Vec<(ToolCall, Option<String>)> = Vec::new();
|
||||
|
||||
for tool_call in tools_to_process {
|
||||
let mut duplicate_type = None;
|
||||
|
||||
// Check for duplicates in current chunk
|
||||
if seen_in_chunk.iter().any(|tc| are_duplicates(tc, &tool_call)) {
|
||||
duplicate_type = Some("DUP IN CHUNK".to_string());
|
||||
} else {
|
||||
// Check for duplicate against previous message in history
|
||||
// Look at the last assistant message that contains tool calls
|
||||
let mut found_in_prev = false;
|
||||
for msg in self.context_window.conversation_history.iter().rev() {
|
||||
if matches!(msg.role, MessageRole::Assistant) {
|
||||
// Try to parse tool calls from the message content
|
||||
if msg.content.contains(r#"\"tool\""#) {
|
||||
// Simple JSON extraction for tool calls
|
||||
let content = &msg.content;
|
||||
let mut start_idx = 0;
|
||||
while let Some(tool_start) = content[start_idx..].find(r#"{\"tool\""#) {
|
||||
let tool_start = start_idx + tool_start;
|
||||
// Find the end of this JSON object
|
||||
let mut brace_count = 0;
|
||||
let mut in_string = false;
|
||||
let mut escape_next = false;
|
||||
let mut end_idx = tool_start;
|
||||
|
||||
for (i, ch) in content[tool_start..].char_indices() {
|
||||
if escape_next {
|
||||
escape_next = false;
|
||||
continue;
|
||||
}
|
||||
if ch == '\\' && in_string {
|
||||
escape_next = true;
|
||||
continue;
|
||||
}
|
||||
if ch == '"' && !escape_next {
|
||||
in_string = !in_string;
|
||||
}
|
||||
if !in_string {
|
||||
if ch == '{' {
|
||||
brace_count += 1;
|
||||
} else if ch == '}' {
|
||||
brace_count -= 1;
|
||||
if brace_count == 0 {
|
||||
end_idx = tool_start + i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if end_idx > tool_start {
|
||||
let tool_json = &content[tool_start..end_idx];
|
||||
if let Ok(prev_tool) = serde_json::from_str::<ToolCall>(tool_json) {
|
||||
if are_duplicates(&prev_tool, &tool_call) {
|
||||
found_in_prev = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
start_idx = end_idx;
|
||||
}
|
||||
}
|
||||
// Only check the most recent assistant message
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found_in_prev {
|
||||
duplicate_type = Some("DUP IN MSG".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Add to seen list if not a duplicate in chunk
|
||||
if duplicate_type.as_ref().map_or(true, |s| s != "DUP IN CHUNK") {
|
||||
seen_in_chunk.push(tool_call.clone());
|
||||
}
|
||||
|
||||
deduplicated_tools.push((tool_call, duplicate_type));
|
||||
}
|
||||
|
||||
// Process each tool call
|
||||
for (tool_call, duplicate_type) in deduplicated_tools {
|
||||
debug!("Processing completed tool call: {:?}", tool_call);
|
||||
|
||||
// If it's a duplicate, log it and return a warning
|
||||
if let Some(dup_type) = &duplicate_type {
|
||||
// Log the duplicate with red prefix
|
||||
let prefixed_tool_name = format!("🟥 {} {}", tool_call.tool, dup_type);
|
||||
let warning_msg = format!(
|
||||
"⚠️ Duplicate tool call detected ({}): Skipping execution of {} with args {}",
|
||||
dup_type,
|
||||
tool_call.tool,
|
||||
serde_json::to_string(&tool_call.args).unwrap_or_else(|_| "<unserializable>".to_string())
|
||||
);
|
||||
|
||||
// Log to tool log with red prefix
|
||||
let mut modified_tool_call = tool_call.clone();
|
||||
modified_tool_call.tool = prefixed_tool_name;
|
||||
self.log_tool_call(&modified_tool_call, &warning_msg);
|
||||
continue; // Skip execution of duplicate
|
||||
}
|
||||
|
||||
// Check if we should auto-compact at 90% BEFORE executing the tool
|
||||
// We need to do this before any borrows of self
|
||||
if self.auto_compact && self.context_window.percentage_used() >= 90.0 {
|
||||
@@ -2962,11 +3230,14 @@ impl<W: UiWriter> Agent<W> {
|
||||
self.ui_writer.print_tool_output_header();
|
||||
}
|
||||
|
||||
// Clone working_dir to avoid borrow checker issues
|
||||
let working_dir = self.working_dir.clone();
|
||||
let exec_start = Instant::now();
|
||||
// Add 8-minute timeout for tool execution
|
||||
let tool_result = match tokio::time::timeout(
|
||||
Duration::from_secs(8 * 60), // 8 minutes
|
||||
self.execute_tool(&tool_call),
|
||||
// Use working_dir if set (from --codebase-fast-start)
|
||||
self.execute_tool_in_dir(&tool_call, working_dir.as_deref()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -3140,8 +3411,17 @@ impl<W: UiWriter> Agent<W> {
|
||||
current_response.clear();
|
||||
// Reset response_started flag for next iteration
|
||||
response_started = false;
|
||||
|
||||
// For single tool mode, break immediately
|
||||
if !self.config.agent.allow_multiple_tool_calls {
|
||||
break; // Break out of current stream to start a new one
|
||||
}
|
||||
} // End of for loop processing each tool call
|
||||
|
||||
// If we processed any tools in multiple mode, break out to start new stream
|
||||
if tool_executed && self.config.agent.allow_multiple_tool_calls {
|
||||
break;
|
||||
}
|
||||
|
||||
// If no tool calls were completed, continue streaming normally
|
||||
if !tool_executed {
|
||||
@@ -3223,8 +3503,8 @@ impl<W: UiWriter> Agent<W> {
|
||||
error!("Iteration: {}/{}", iteration_count, MAX_ITERATIONS);
|
||||
error!(
|
||||
"Provider: {} (model: {})",
|
||||
provider.name(),
|
||||
provider.model()
|
||||
provider_name,
|
||||
provider_model
|
||||
);
|
||||
error!("Chunks received: {}", chunks_received);
|
||||
error!("Parser state:");
|
||||
@@ -3503,9 +3783,29 @@ impl<W: UiWriter> Agent<W> {
|
||||
pub async fn execute_tool(&mut self, tool_call: &ToolCall) -> Result<String> {
|
||||
// Increment tool call count
|
||||
self.tool_call_count += 1;
|
||||
self.execute_tool_in_dir(tool_call, None).await
|
||||
}
|
||||
|
||||
/// Execute a tool with an optional working directory (for discovery commands)
|
||||
pub async fn execute_tool_in_dir(&mut self, tool_call: &ToolCall, working_dir: Option<&str>) -> Result<String> {
|
||||
// Only increment tool call count if not already incremented by execute_tool
|
||||
if working_dir.is_some() {
|
||||
self.tool_call_count += 1;
|
||||
}
|
||||
|
||||
let result = self.execute_tool_inner_in_dir(tool_call, working_dir).await;
|
||||
let log_str = match &result {
|
||||
Ok(s) => s.clone(),
|
||||
Err(e) => format!("ERROR: {}", e),
|
||||
};
|
||||
self.log_tool_call(tool_call, &log_str);
|
||||
result
|
||||
}
|
||||
|
||||
async fn execute_tool_inner_in_dir(&mut self, tool_call: &ToolCall, working_dir: Option<&str>) -> Result<String> {
|
||||
debug!("=== EXECUTING TOOL ===");
|
||||
debug!("Tool name: {}", tool_call.tool);
|
||||
debug!("Working directory passed to execute_tool_inner_in_dir: {:?}", working_dir);
|
||||
debug!("Tool args (raw): {:?}", tool_call.args);
|
||||
debug!(
|
||||
"Tool args (JSON): {}",
|
||||
@@ -3541,8 +3841,10 @@ impl<W: UiWriter> Agent<W> {
|
||||
ui_writer: &self.ui_writer,
|
||||
};
|
||||
|
||||
debug!("ABOUT TO CALL execute_bash_streaming_in_dir: escaped_command='{}', working_dir={:?}", escaped_command, working_dir);
|
||||
|
||||
match executor
|
||||
.execute_bash_streaming(&escaped_command, &receiver)
|
||||
.execute_bash_streaming_in_dir(&escaped_command, &receiver, working_dir)
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
@@ -4051,6 +4353,56 @@ impl<W: UiWriter> Agent<W> {
|
||||
let mut todo = self.todo_content.write().await;
|
||||
*todo = content.clone();
|
||||
|
||||
// Check for staleness if enabled and we have a requirements SHA
|
||||
if self.config.agent.check_todo_staleness {
|
||||
if let Some(req_sha) = &self.requirements_sha {
|
||||
// Parse the first line for the SHA header
|
||||
if let Some(first_line) = content.lines().next() {
|
||||
if first_line.starts_with("{{Based on the requirements file with SHA256:") {
|
||||
let parts: Vec<&str> = first_line.split("SHA256:").collect();
|
||||
if parts.len() > 1 {
|
||||
let todo_sha = parts[1].trim().trim_end_matches("}}").trim();
|
||||
if todo_sha != req_sha {
|
||||
let warning = format!(
|
||||
"⚠️ TODO list is stale! It was generated from a different requirements file.\nExpected SHA: {}\nFound SHA: {}",
|
||||
req_sha, todo_sha
|
||||
);
|
||||
self.ui_writer.print_context_status(&warning);
|
||||
|
||||
// Beep 6 times
|
||||
print!("\x07\x07\x07\x07\x07\x07");
|
||||
let _ = std::io::stdout().flush();
|
||||
|
||||
let options = ["Ignore and Continue", "Mark as Stale", "Quit Application"];
|
||||
let choice = self.ui_writer.prompt_user_choice("Requirements have changed! What would you like to do?", &options);
|
||||
|
||||
match choice {
|
||||
0 => {
|
||||
// Ignore and Continue
|
||||
self.ui_writer.print_context_status("⚠️ Ignoring staleness warning.");
|
||||
}
|
||||
1 => {
|
||||
// Mark as Stale
|
||||
// We return a message to the agent so it knows to regenerate/fix it.
|
||||
return Ok("⚠️ TODO list is stale (requirements changed). Please regenerate the TODO list to match the new requirements.".to_string());
|
||||
}
|
||||
2 => {
|
||||
// Quit Application
|
||||
self.ui_writer.print_context_status("❌ Quitting application as requested.");
|
||||
std::process::exit(0);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Header missing, but we have a SHA. Warn the user?
|
||||
// For now, maybe just proceed... assuming it's an old TODO.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if content.trim().is_empty() {
|
||||
Ok("📝 TODO list is empty".to_string())
|
||||
} else {
|
||||
@@ -4097,6 +4449,46 @@ impl<W: UiWriter> Agent<W> {
|
||||
Ok("❌ Missing content argument".to_string())
|
||||
}
|
||||
}
|
||||
"code_coverage" => {
|
||||
debug!("Processing code_coverage tool call");
|
||||
self.ui_writer.print_context_status("🔍 Generating code coverage report...");
|
||||
|
||||
// Ensure coverage tools are installed
|
||||
match g3_execution::ensure_coverage_tools_installed() {
|
||||
Ok(already_installed) => {
|
||||
if !already_installed {
|
||||
self.ui_writer.print_context_status("✅ Coverage tools installed successfully");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Ok(format!("❌ Failed to install coverage tools: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
// Run cargo llvm-cov --workspace
|
||||
let output = std::process::Command::new("cargo")
|
||||
.args(&["llvm-cov", "--workspace"])
|
||||
.current_dir(std::env::current_dir()?)
|
||||
.output()?;
|
||||
|
||||
if output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
// Combine output
|
||||
let mut result = String::from("✅ Code coverage report generated successfully\n\n");
|
||||
result.push_str("## Coverage Summary\n");
|
||||
result.push_str(&stdout);
|
||||
if !stderr.is_empty() {
|
||||
result.push_str("\n## Warnings\n");
|
||||
result.push_str(&stderr);
|
||||
}
|
||||
Ok(result)
|
||||
} else {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
Ok(format!("❌ Failed to generate coverage report:\n{}", stderr))
|
||||
}
|
||||
}
|
||||
"webdriver_start" => {
|
||||
debug!("Processing webdriver_start tool call");
|
||||
|
||||
|
||||
@@ -98,49 +98,6 @@ impl Project {
|
||||
self.requirements_text.is_some() || self.requirements_path.is_some()
|
||||
}
|
||||
|
||||
/// Check if implementation files exist in the workspace
|
||||
pub fn has_implementation_files(&self) -> bool {
|
||||
self.check_dir_for_implementation_files(&self.workspace_dir)
|
||||
}
|
||||
|
||||
/// Recursively check a directory for implementation files
|
||||
#[allow(clippy::only_used_in_recursion)]
|
||||
fn check_dir_for_implementation_files(&self, dir: &Path) -> bool {
|
||||
// Common source file extensions
|
||||
let extensions = vec![
|
||||
"swift", "rs", "py", "js", "ts", "java", "cpp", "c",
|
||||
"go", "rb", "php", "cs", "kt", "scala", "m", "h"
|
||||
];
|
||||
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() {
|
||||
// Check if it's a source file
|
||||
if let Some(ext) = path.extension() {
|
||||
if let Some(ext_str) = ext.to_str() {
|
||||
if extensions.contains(&ext_str) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if path.is_dir() {
|
||||
// Skip hidden directories and common non-source directories
|
||||
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if !name.starts_with('.') && name != "logs" && name != "target" && name != "node_modules" {
|
||||
// Recursively check subdirectories
|
||||
if self.check_dir_for_implementation_files(&path) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Read the requirements file content
|
||||
pub fn read_requirements(&self) -> Result<Option<String>> {
|
||||
// Prioritize requirements text override
|
||||
|
||||
@@ -74,6 +74,10 @@ Every multi-step task follows this pattern:
|
||||
|
||||
Note: todo_write replaces the entire todo.g3.md file, so always read first to preserve content. TODO lists persist across g3 sessions in the workspace directory.
|
||||
|
||||
IMPORTANT: If you are provided with a SHA256 hash of the requirements file, you MUST include it as the very first line of the todo.g3.md file in the following format:
|
||||
`{{Based on the requirements file with SHA256: <SHA>}}`
|
||||
This ensures the TODO list is tracked against the specific version of requirements it was generated from.
|
||||
|
||||
## Examples
|
||||
|
||||
**Example 1: Feature Implementation**
|
||||
@@ -185,7 +189,25 @@ Do not explain what you're going to do - just do it by calling the tools.
|
||||
";
|
||||
|
||||
pub const SYSTEM_PROMPT_FOR_NATIVE_TOOL_USE: &'static str =
|
||||
concatcp!(CODING_STYLE, SYSTEM_NATIVE_TOOL_CALLS);
|
||||
concatcp!(SYSTEM_NATIVE_TOOL_CALLS, CODING_STYLE);
|
||||
|
||||
/// Generate system prompt based on whether multiple tool calls are allowed
|
||||
pub fn get_system_prompt_for_native(allow_multiple: bool) -> String {
|
||||
if allow_multiple {
|
||||
// Replace the "ONE tool" instruction with multiple tools instruction
|
||||
let base = SYSTEM_PROMPT_FOR_NATIVE_TOOL_USE.to_string();
|
||||
base.replace(
|
||||
"2. Call the appropriate tool with the required parameters",
|
||||
"2. Call the appropriate tool(s) with the required parameters - you may call multiple tools in parallel when appropriate.
|
||||
<use_parallel_tool_calls>
|
||||
For maximum efficiency, whenever you perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially. Prioritize calling tools in parallel whenever possible. For example, when reading 3 files, run 3 tool calls in parallel to read all 3 files into context at the same time. When running multiple read-only commands like `ls` or `list_dir`, always run all of the commands in parallel. Err on the side of maximizing parallel tool calls rather than running too many tools sequentially.
|
||||
</use_parallel_tool_calls>
|
||||
"
|
||||
)
|
||||
} else {
|
||||
SYSTEM_PROMPT_FOR_NATIVE_TOOL_USE.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
const SYSTEM_NON_NATIVE_TOOL_USE: &'static str =
|
||||
"You are G3, a general-purpose AI agent. Your goal is to analyze and solve problems by writing code.
|
||||
@@ -285,6 +307,10 @@ Every multi-step task follows this pattern:
|
||||
|
||||
Note: todo_write replaces the entire list, so always read first to preserve content.
|
||||
|
||||
IMPORTANT: If you are provided with a SHA256 hash of the requirements file, you MUST include it as the very first line of the todo.g3.md file in the following format:
|
||||
`{{Based on the requirements file with SHA256: <SHA>}}`
|
||||
This ensures the TODO list is tracked against the specific version of requirements it was generated from.
|
||||
|
||||
## Examples
|
||||
|
||||
**Example 1: Feature Implementation**
|
||||
@@ -345,4 +371,4 @@ If you can complete it with 1-2 tool calls, skip TODO.
|
||||
";
|
||||
|
||||
pub const SYSTEM_PROMPT_FOR_NON_NATIVE_TOOL_USE: &'static str =
|
||||
concatcp!(CODING_STYLE, SYSTEM_NON_NATIVE_TOOL_USE);
|
||||
concatcp!(SYSTEM_NON_NATIVE_TOOL_USE, CODING_STYLE);
|
||||
|
||||
@@ -56,6 +56,13 @@ pub trait UiWriter: Send + Sync {
|
||||
/// Returns true if this UI writer wants full, untruncated output
|
||||
/// Default is false (truncate for human readability)
|
||||
fn wants_full_output(&self) -> bool { false }
|
||||
|
||||
/// Prompt the user for a yes/no confirmation
|
||||
fn prompt_user_yes_no(&self, message: &str) -> bool;
|
||||
|
||||
/// Prompt the user to choose from a list of options
|
||||
/// Returns the index of the selected option
|
||||
fn prompt_user_choice(&self, message: &str, options: &[&str]) -> usize;
|
||||
}
|
||||
|
||||
/// A no-op implementation for when UI output is not needed
|
||||
@@ -80,4 +87,6 @@ impl UiWriter for NullUiWriter {
|
||||
fn notify_sse_received(&self) {}
|
||||
fn flush(&self) {}
|
||||
fn wants_full_output(&self) -> bool { false }
|
||||
fn prompt_user_yes_no(&self, _message: &str) -> bool { true }
|
||||
fn prompt_user_choice(&self, _message: &str, _options: &[&str]) -> usize { 0 }
|
||||
}
|
||||
@@ -551,6 +551,7 @@ async fn test_cpp_search() {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_kotlin_search() {
|
||||
let request = CodeSearchRequest {
|
||||
searches: vec![SearchSpec {
|
||||
|
||||
193
crates/g3-core/tests/todo_staleness_test.rs
Normal file
193
crates/g3-core/tests/todo_staleness_test.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use g3_core::{Agent, ToolCall};
|
||||
use g3_core::ui_writer::UiWriter;
|
||||
use g3_config::Config;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tempfile::TempDir;
|
||||
use serial_test::serial;
|
||||
|
||||
// Mock UI Writer for testing
|
||||
#[derive(Clone)]
|
||||
struct MockUiWriter {
|
||||
output: Arc<Mutex<Vec<String>>>,
|
||||
prompt_responses: Arc<Mutex<Vec<bool>>>,
|
||||
choice_responses: Arc<Mutex<Vec<usize>>>,
|
||||
}
|
||||
|
||||
impl MockUiWriter {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
output: Arc::new(Mutex::new(Vec::new())),
|
||||
prompt_responses: Arc::new(Mutex::new(Vec::new())),
|
||||
choice_responses: Arc::new(Mutex::new(Vec::new())),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_prompt_response(&self, response: bool) {
|
||||
self.prompt_responses.lock().unwrap().push(response);
|
||||
}
|
||||
|
||||
fn set_choice_response(&self, response: usize) {
|
||||
self.choice_responses.lock().unwrap().push(response);
|
||||
}
|
||||
|
||||
fn get_output(&self) -> Vec<String> {
|
||||
self.output.lock().unwrap().clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl UiWriter for MockUiWriter {
|
||||
fn print(&self, message: &str) {
|
||||
self.output.lock().unwrap().push(message.to_string());
|
||||
}
|
||||
fn println(&self, message: &str) {
|
||||
self.output.lock().unwrap().push(message.to_string());
|
||||
}
|
||||
fn print_inline(&self, message: &str) {
|
||||
self.output.lock().unwrap().push(message.to_string());
|
||||
}
|
||||
fn print_system_prompt(&self, _prompt: &str) {}
|
||||
fn print_context_status(&self, message: &str) {
|
||||
self.output.lock().unwrap().push(format!("STATUS: {}", message));
|
||||
}
|
||||
fn print_context_thinning(&self, _message: &str) {}
|
||||
fn print_tool_header(&self, _tool_name: &str) {}
|
||||
fn print_tool_arg(&self, _key: &str, _value: &str) {}
|
||||
fn print_tool_output_header(&self) {}
|
||||
fn update_tool_output_line(&self, _line: &str) {}
|
||||
fn print_tool_output_line(&self, _line: &str) {}
|
||||
fn print_tool_output_summary(&self, _hidden_count: usize) {}
|
||||
fn print_tool_timing(&self, _duration_str: &str) {}
|
||||
fn print_agent_prompt(&self) {}
|
||||
fn print_agent_response(&self, _content: &str) {}
|
||||
fn notify_sse_received(&self) {}
|
||||
fn flush(&self) {}
|
||||
fn wants_full_output(&self) -> bool { false }
|
||||
fn prompt_user_yes_no(&self, message: &str) -> bool {
|
||||
self.output.lock().unwrap().push(format!("PROMPT: {}", message));
|
||||
self.prompt_responses.lock().unwrap().pop().unwrap_or(true)
|
||||
}
|
||||
fn prompt_user_choice(&self, message: &str, options: &[&str]) -> usize {
|
||||
self.output.lock().unwrap().push(format!("CHOICE: {} Options: {:?}", message, options));
|
||||
self.choice_responses.lock().unwrap().pop().unwrap_or(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_todo_staleness_check_matching_sha() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let todo_path = temp_dir.path().join("todo.g3.md");
|
||||
std::env::set_current_dir(&temp_dir).unwrap();
|
||||
|
||||
let sha = "abc123hash";
|
||||
let content = format!("{{{{Based on the requirements file with SHA256: {}}}}}\n- [ ] Task 1", sha);
|
||||
std::fs::write(&todo_path, content).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.agent.check_todo_staleness = true;
|
||||
|
||||
let ui_writer = MockUiWriter::new();
|
||||
let mut agent = Agent::new_autonomous(config, ui_writer).await.unwrap();
|
||||
agent.set_requirements_sha(sha.to_string());
|
||||
|
||||
let tool_call = ToolCall {
|
||||
tool: "todo_read".to_string(),
|
||||
args: serde_json::json!({}),
|
||||
};
|
||||
let result = agent.execute_tool(&tool_call).await.unwrap();
|
||||
|
||||
assert!(result.contains("📝 TODO list:"));
|
||||
assert!(!result.contains("⚠️ TODO list is stale"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_todo_staleness_check_mismatch_sha_ignore() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let todo_path = temp_dir.path().join("todo.g3.md");
|
||||
std::env::set_current_dir(&temp_dir).unwrap();
|
||||
|
||||
let sha_file = "old_sha";
|
||||
let sha_req = "new_sha";
|
||||
let content = format!("{{{{Based on the requirements file with SHA256: {}}}}}\n- [ ] Task 1", sha_file);
|
||||
std::fs::write(&todo_path, content).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.agent.check_todo_staleness = true;
|
||||
|
||||
let ui_writer = MockUiWriter::new();
|
||||
ui_writer.set_choice_response(0); // Ignore
|
||||
|
||||
let mut agent = Agent::new_autonomous(config, ui_writer).await.unwrap();
|
||||
agent.set_requirements_sha(sha_req.to_string());
|
||||
|
||||
let tool_call = ToolCall {
|
||||
tool: "todo_read".to_string(),
|
||||
args: serde_json::json!({}),
|
||||
};
|
||||
let result = agent.execute_tool(&tool_call).await.unwrap();
|
||||
|
||||
assert!(result.contains("📝 TODO list:"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_todo_staleness_check_mismatch_sha_mark_stale() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let todo_path = temp_dir.path().join("todo.g3.md");
|
||||
std::env::set_current_dir(&temp_dir).unwrap();
|
||||
|
||||
let sha_file = "old_sha";
|
||||
let sha_req = "new_sha";
|
||||
let content = format!("{{{{Based on the requirements file with SHA256: {}}}}}\n- [ ] Task 1", sha_file);
|
||||
std::fs::write(&todo_path, content).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.agent.check_todo_staleness = true;
|
||||
|
||||
let ui_writer = MockUiWriter::new();
|
||||
ui_writer.set_choice_response(1); // Mark as Stale
|
||||
|
||||
let mut agent = Agent::new_autonomous(config, ui_writer).await.unwrap();
|
||||
agent.set_requirements_sha(sha_req.to_string());
|
||||
|
||||
let tool_call = ToolCall {
|
||||
tool: "todo_read".to_string(),
|
||||
args: serde_json::json!({}),
|
||||
};
|
||||
let result = agent.execute_tool(&tool_call).await.unwrap();
|
||||
|
||||
assert!(result.contains("⚠️ TODO list is stale"));
|
||||
assert!(result.contains("Please regenerate"));
|
||||
}
|
||||
|
||||
// Note: We cannot easily test "Quit" (index 2) because it calls std::process::exit(0)
|
||||
// which would kill the test runner. We skip that test case here.
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_todo_staleness_check_disabled() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let todo_path = temp_dir.path().join("todo.g3.md");
|
||||
std::env::set_current_dir(&temp_dir).unwrap();
|
||||
|
||||
let sha_file = "old_sha";
|
||||
let sha_req = "new_sha";
|
||||
let content = format!("{{{{Based on the requirements file with SHA256: {}}}}}\n- [ ] Task 1", sha_file);
|
||||
std::fs::write(&todo_path, content).unwrap();
|
||||
|
||||
let mut config = Config::default();
|
||||
config.agent.check_todo_staleness = false;
|
||||
|
||||
let ui_writer = MockUiWriter::new();
|
||||
let mut agent = Agent::new_autonomous(config, ui_writer).await.unwrap();
|
||||
agent.set_requirements_sha(sha_req.to_string());
|
||||
|
||||
let tool_call = ToolCall {
|
||||
tool: "todo_read".to_string(),
|
||||
args: serde_json::json!({}),
|
||||
};
|
||||
let result = agent.execute_tool(&tool_call).await.unwrap();
|
||||
|
||||
assert!(result.contains("📝 TODO list:"));
|
||||
}
|
||||
13
crates/g3-execution/examples/setup_coverage_tools.rs
Normal file
13
crates/g3-execution/examples/setup_coverage_tools.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use g3_execution::ensure_coverage_tools_installed;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
// Ensure coverage tools are installed
|
||||
let already_installed = ensure_coverage_tools_installed()?;
|
||||
|
||||
if already_installed {
|
||||
println!("All coverage tools are already installed!");
|
||||
} else {
|
||||
println!("Coverage tools have been installed successfully!");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -5,6 +5,17 @@ use tempfile::NamedTempFile;
|
||||
use std::io::Write;
|
||||
use tracing::{info, debug, error};
|
||||
|
||||
/// Expand tilde (~) in a path to the user's home directory
|
||||
fn expand_tilde(path: &str) -> String {
|
||||
if path.starts_with("~") {
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
let home_str = home.to_string_lossy();
|
||||
return path.replacen("~", &home_str, 1);
|
||||
}
|
||||
}
|
||||
path.to_string()
|
||||
}
|
||||
|
||||
pub struct CodeExecutor {
|
||||
// Future: add configuration for execution limits, sandboxing, etc.
|
||||
}
|
||||
@@ -241,11 +252,33 @@ impl CodeExecutor {
|
||||
&self,
|
||||
code: &str,
|
||||
receiver: &R
|
||||
) -> Result<ExecutionResult> {
|
||||
self.execute_bash_streaming_in_dir(code, receiver, None).await
|
||||
}
|
||||
|
||||
/// Execute bash command with streaming output in a specific directory
|
||||
pub async fn execute_bash_streaming_in_dir<R: OutputReceiver>(
|
||||
&self,
|
||||
code: &str,
|
||||
receiver: &R,
|
||||
working_dir: Option<&str>,
|
||||
) -> Result<ExecutionResult> {
|
||||
use std::process::Stdio;
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
use tokio::process::Command as TokioCommand;
|
||||
|
||||
// CRITICAL DEBUG: Print to stderr so it's always visible
|
||||
debug!("========== execute_bash_streaming_in_dir START ==========");
|
||||
debug!("Code to execute: {}", code);
|
||||
debug!("Working directory parameter: {:?}", working_dir);
|
||||
debug!("FULL DIAGNOSTIC: code='{}', working_dir={:?}", code, working_dir);
|
||||
|
||||
if let Some(dir) = working_dir {
|
||||
debug!("Working dir exists check: {}", std::path::Path::new(dir).exists());
|
||||
debug!("Working dir is_dir check: {}", std::path::Path::new(dir).is_dir());
|
||||
}
|
||||
debug!("Current process working directory: {:?}", std::env::current_dir());
|
||||
|
||||
// Check if this is a detached/daemon command that should run independently
|
||||
// Look for patterns like: setsid, nohup with &, or explicit backgrounding with disown
|
||||
let is_detached = code.trim_start().starts_with("setsid ")
|
||||
@@ -255,10 +288,17 @@ impl CodeExecutor {
|
||||
|
||||
if is_detached {
|
||||
// For detached commands, just spawn and return immediately
|
||||
TokioCommand::new("bash")
|
||||
.arg("-c")
|
||||
.arg(code)
|
||||
.spawn()?;
|
||||
let mut cmd = TokioCommand::new("bash");
|
||||
cmd.arg("-c")
|
||||
.arg(code);
|
||||
|
||||
// Set working directory if provided
|
||||
if let Some(dir) = working_dir {
|
||||
let expanded_dir = expand_tilde(dir);
|
||||
cmd.current_dir(&expanded_dir);
|
||||
}
|
||||
|
||||
cmd.spawn()?;
|
||||
|
||||
// Don't wait for the process - it's meant to run independently
|
||||
return Ok(ExecutionResult {
|
||||
@@ -269,12 +309,33 @@ impl CodeExecutor {
|
||||
});
|
||||
}
|
||||
|
||||
let mut child = TokioCommand::new("bash")
|
||||
.arg("-c")
|
||||
let mut cmd = TokioCommand::new("bash");
|
||||
cmd.arg("-c")
|
||||
.arg(code)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?;
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
// Set working directory if provided
|
||||
if let Some(dir) = working_dir {
|
||||
debug!("Setting current_dir on command to: {}", dir);
|
||||
let expanded_dir = expand_tilde(dir);
|
||||
debug!("Expanded working dir: {}", expanded_dir);
|
||||
debug!("Expanded dir exists: {}", std::path::Path::new(&expanded_dir).exists());
|
||||
debug!("Expanded dir is_dir: {}", std::path::Path::new(&expanded_dir).is_dir());
|
||||
cmd.current_dir(&expanded_dir);
|
||||
}
|
||||
|
||||
debug!("About to spawn command...");
|
||||
let spawn_result = cmd.spawn();
|
||||
debug!("Spawn result: {:?}", spawn_result.is_ok());
|
||||
let mut child = match spawn_result {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
debug!("SPAWN ERROR: {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
debug!("Command spawned successfully");
|
||||
|
||||
let stdout = child.stdout.take().unwrap();
|
||||
let stderr = child.stderr.take().unwrap();
|
||||
@@ -322,11 +383,106 @@ impl CodeExecutor {
|
||||
|
||||
let status = child.wait().await?;
|
||||
|
||||
Ok(ExecutionResult {
|
||||
let result = ExecutionResult {
|
||||
stdout: stdout_output.join("\n"),
|
||||
stderr: stderr_output.join("\n"),
|
||||
exit_code: status.code().unwrap_or(-1),
|
||||
success: status.success(),
|
||||
})
|
||||
};
|
||||
|
||||
debug!("========== execute_bash_streaming_in_dir END ==========");
|
||||
debug!("Exit code: {}", result.exit_code);
|
||||
debug!("Success: {}", result.success);
|
||||
debug!("Stdout length: {}", result.stdout.len());
|
||||
debug!("Stderr length: {}", result.stderr.len());
|
||||
if !result.stderr.is_empty() {
|
||||
debug!("Stderr content: {}", result.stderr);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if rustup component llvm-tools-preview is installed
|
||||
pub fn is_llvm_tools_installed() -> Result<bool> {
|
||||
let output = Command::new("rustup")
|
||||
.args(&["component", "list", "--installed"])
|
||||
.output()?;
|
||||
|
||||
let installed = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.any(|line| line.trim() == "llvm-tools-preview" || line.starts_with("llvm-tools"));
|
||||
|
||||
Ok(installed)
|
||||
}
|
||||
|
||||
/// Check if cargo-llvm-cov is installed
|
||||
pub fn is_cargo_llvm_cov_installed() -> Result<bool> {
|
||||
let output = Command::new("cargo")
|
||||
.args(&["--list"])
|
||||
.output()?;
|
||||
|
||||
let installed = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.any(|line| line.trim().starts_with("llvm-cov"));
|
||||
|
||||
Ok(installed)
|
||||
}
|
||||
|
||||
/// Install llvm-tools-preview via rustup
|
||||
pub fn install_llvm_tools() -> Result<()> {
|
||||
info!("Installing llvm-tools-preview...");
|
||||
let output = Command::new("rustup")
|
||||
.args(&["component", "add", "llvm-tools-preview"])
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
anyhow::bail!("Failed to install llvm-tools-preview: {}", stderr);
|
||||
}
|
||||
|
||||
info!("✅ llvm-tools-preview installed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Install cargo-llvm-cov via cargo install
|
||||
pub fn install_cargo_llvm_cov() -> Result<()> {
|
||||
info!("Installing cargo-llvm-cov... (this may take a few minutes)");
|
||||
let output = Command::new("cargo")
|
||||
.args(&["install", "cargo-llvm-cov"])
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
anyhow::bail!("Failed to install cargo-llvm-cov: {}", stderr);
|
||||
}
|
||||
|
||||
info!("✅ cargo-llvm-cov installed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure both llvm-tools-preview and cargo-llvm-cov are installed
|
||||
/// Returns Ok(true) if tools were already installed, Ok(false) if they were installed by this function
|
||||
pub fn ensure_coverage_tools_installed() -> Result<bool> {
|
||||
let mut already_installed = true;
|
||||
|
||||
// Check and install llvm-tools-preview
|
||||
if !is_llvm_tools_installed()? {
|
||||
info!("llvm-tools-preview not found, installing...");
|
||||
install_llvm_tools()?;
|
||||
already_installed = false;
|
||||
} else {
|
||||
info!("✅ llvm-tools-preview is already installed");
|
||||
}
|
||||
|
||||
// Check and install cargo-llvm-cov
|
||||
if !is_cargo_llvm_cov_installed()? {
|
||||
info!("cargo-llvm-cov not found, installing...");
|
||||
install_cargo_llvm_cov()?;
|
||||
already_installed = false;
|
||||
} else {
|
||||
info!("✅ cargo-llvm-cov is already installed");
|
||||
}
|
||||
|
||||
Ok(already_installed)
|
||||
}
|
||||
|
||||
14
crates/g3-planner/Cargo.toml
Normal file
14
crates/g3-planner/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "g3-planner"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Fast-discovery planner for G3 AI coding agent"
|
||||
|
||||
[dependencies]
|
||||
g3-providers = { path = "../g3-providers" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
const_format = "0.2"
|
||||
anyhow = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
724
crates/g3-planner/src/code_explore.rs
Normal file
724
crates/g3-planner/src/code_explore.rs
Normal file
@@ -0,0 +1,724 @@
|
||||
//! Code exploration module for analyzing codebases
|
||||
//!
|
||||
//! This module provides functions to explore and analyze codebases
|
||||
//! for various programming languages, returning structured reports
|
||||
//! about the code structure.
|
||||
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
/// Main entry point for exploring a codebase at the given path.
|
||||
/// Detects which languages are present and generates a comprehensive report.
|
||||
pub fn explore_codebase(path: &str) -> String {
|
||||
let path = expand_tilde(path);
|
||||
let mut report = String::new();
|
||||
let mut languages_found = Vec::new();
|
||||
|
||||
// Check for each language and add to report if found
|
||||
if has_rust_files(&path) {
|
||||
languages_found.push("Rust".to_string());
|
||||
report.push_str(&explore_rust(&path));
|
||||
}
|
||||
if has_java_files(&path) {
|
||||
languages_found.push("Java".to_string());
|
||||
report.push_str(&explore_java(&path));
|
||||
}
|
||||
if has_kotlin_files(&path) {
|
||||
languages_found.push("Kotlin".to_string());
|
||||
report.push_str(&explore_kotlin(&path));
|
||||
}
|
||||
if has_swift_files(&path) {
|
||||
languages_found.push("Swift".to_string());
|
||||
report.push_str(&explore_swift(&path));
|
||||
}
|
||||
if has_go_files(&path) {
|
||||
languages_found.push("Go".to_string());
|
||||
report.push_str(&explore_go(&path));
|
||||
}
|
||||
if has_python_files(&path) {
|
||||
languages_found.push("Python".to_string());
|
||||
report.push_str(&explore_python(&path));
|
||||
}
|
||||
if has_typescript_files(&path) {
|
||||
languages_found.push("TypeScript".to_string());
|
||||
report.push_str(&explore_typescript(&path));
|
||||
}
|
||||
if has_javascript_files(&path) {
|
||||
languages_found.push("JavaScript".to_string());
|
||||
report.push_str(&explore_javascript(&path));
|
||||
}
|
||||
if has_cpp_files(&path) {
|
||||
languages_found.push("C/C++".to_string());
|
||||
report.push_str(&explore_cpp(&path));
|
||||
}
|
||||
if has_markdown_files(&path) {
|
||||
languages_found.push("Markdown".to_string());
|
||||
report.push_str(&explore_markdown(&path));
|
||||
}
|
||||
if has_yaml_files(&path) {
|
||||
languages_found.push("YAML".to_string());
|
||||
report.push_str(&explore_yaml(&path));
|
||||
}
|
||||
if has_sql_files(&path) {
|
||||
languages_found.push("SQL".to_string());
|
||||
report.push_str(&explore_sql(&path));
|
||||
}
|
||||
if has_ruby_files(&path) {
|
||||
languages_found.push("Ruby".to_string());
|
||||
report.push_str(&explore_ruby(&path));
|
||||
}
|
||||
|
||||
if languages_found.is_empty() {
|
||||
report.push_str("No recognized programming languages found in the codebase.\n");
|
||||
} else {
|
||||
let header = format!(
|
||||
"=== CODEBASE ANALYSIS ===\nLanguages detected: {}\n\n",
|
||||
languages_found.join(", ")
|
||||
);
|
||||
report = header + &report;
|
||||
}
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Expand tilde to home directory
|
||||
fn expand_tilde(path: &str) -> String {
|
||||
if path.starts_with("~/") {
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
return path.replacen("~", &home.to_string_lossy(), 1);
|
||||
}
|
||||
}
|
||||
path.to_string()
|
||||
}
|
||||
|
||||
/// Run a shell command and return its output
|
||||
fn run_command(cmd: &str, working_dir: &str) -> String {
|
||||
let output = Command::new("sh")
|
||||
.arg("-c")
|
||||
.arg(cmd)
|
||||
.current_dir(working_dir)
|
||||
.output();
|
||||
|
||||
match output {
|
||||
Ok(out) => {
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let stderr = String::from_utf8_lossy(&out.stderr);
|
||||
if !stdout.is_empty() {
|
||||
stdout.to_string()
|
||||
} else if !stderr.is_empty() {
|
||||
format!("(stderr): {}", stderr)
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
Err(e) => format!("Error running command: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if files with given extension exist
|
||||
fn has_files_with_extension(path: &str, extension: &str) -> bool {
|
||||
let cmd = format!(
|
||||
"find . -name '.git' -prune -o -type f -name '*.{}' -print | head -1",
|
||||
extension
|
||||
);
|
||||
!run_command(&cmd, path).trim().is_empty()
|
||||
}
|
||||
|
||||
// Language detection functions
|
||||
fn has_rust_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "rs") || Path::new(path).join("Cargo.toml").exists()
|
||||
}
|
||||
|
||||
fn has_java_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "java")
|
||||
}
|
||||
|
||||
fn has_kotlin_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "kt") || has_files_with_extension(path, "kts")
|
||||
}
|
||||
|
||||
fn has_swift_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "swift")
|
||||
}
|
||||
|
||||
fn has_go_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "go")
|
||||
}
|
||||
|
||||
fn has_python_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "py")
|
||||
}
|
||||
|
||||
fn has_typescript_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "ts") || has_files_with_extension(path, "tsx")
|
||||
}
|
||||
|
||||
fn has_javascript_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "js") || has_files_with_extension(path, "jsx")
|
||||
}
|
||||
|
||||
fn has_cpp_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "cpp")
|
||||
|| has_files_with_extension(path, "cc")
|
||||
|| has_files_with_extension(path, "c")
|
||||
|| has_files_with_extension(path, "h")
|
||||
|| has_files_with_extension(path, "hpp")
|
||||
}
|
||||
|
||||
fn has_markdown_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "md")
|
||||
}
|
||||
|
||||
fn has_yaml_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "yaml") || has_files_with_extension(path, "yml")
|
||||
}
|
||||
|
||||
fn has_sql_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "sql")
|
||||
}
|
||||
|
||||
fn has_ruby_files(path: &str) -> bool {
|
||||
has_files_with_extension(path, "rb")
|
||||
}
|
||||
|
||||
/// Explore Rust codebase
|
||||
pub fn explore_rust(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== RUST ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.rs' . 2>/dev/null | grep -v '/target/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Dependencies (Cargo.toml)
|
||||
report.push_str("--- Dependencies (Cargo.toml) ---\n");
|
||||
let cargo = run_command("cat Cargo.toml 2>/dev/null | head -50", path);
|
||||
report.push_str(&cargo);
|
||||
report.push('\n');
|
||||
|
||||
// Data structures
|
||||
report.push_str("--- Data Structures (Structs, Enums, Types) ---\n");
|
||||
let structs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rs' '^(pub )?(struct|enum|type|union) ' . 2>/dev/null | grep -v '/target/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&structs);
|
||||
report.push('\n');
|
||||
|
||||
// Traits and implementations
|
||||
report.push_str("--- Traits & Implementations ---\n");
|
||||
let traits = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rs' '^(pub )?trait |^impl ' . 2>/dev/null | grep -v '/target/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&traits);
|
||||
report.push('\n');
|
||||
|
||||
// Public functions
|
||||
report.push_str("--- Public Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rs' '^pub (async )?fn ' . 2>/dev/null | grep -v '/target/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Java codebase
|
||||
pub fn explore_java(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== JAVA ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.java' . 2>/dev/null | grep -v '/build/' | grep -v '/target/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Build files
|
||||
report.push_str("--- Build Configuration ---\n");
|
||||
let build = run_command(
|
||||
"cat pom.xml 2>/dev/null | head -50 || cat build.gradle 2>/dev/null | head -50",
|
||||
path,
|
||||
);
|
||||
report.push_str(&build);
|
||||
report.push('\n');
|
||||
|
||||
// Classes and interfaces
|
||||
report.push_str("--- Classes & Interfaces ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.java' '^(public |private |protected )?(abstract )?(class|interface|enum|record) ' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Public methods
|
||||
report.push_str("--- Public Methods ---\n");
|
||||
let methods = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.java' '^\s+public .+\(' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&methods);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Kotlin codebase
|
||||
pub fn explore_kotlin(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== KOTLIN ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.kt' -g '*.kts' . 2>/dev/null | grep -v '/build/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Build files
|
||||
report.push_str("--- Build Configuration ---\n");
|
||||
let build = run_command("cat build.gradle.kts 2>/dev/null | head -50 || cat build.gradle 2>/dev/null | head -50", path);
|
||||
report.push_str(&build);
|
||||
report.push('\n');
|
||||
|
||||
// Classes, objects, interfaces
|
||||
report.push_str("--- Classes, Objects & Interfaces ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.kt' '^(data |sealed |open |abstract )?(class|interface|object|enum class) ' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.kt' '^(suspend |private |internal |public )?fun ' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Swift codebase
|
||||
pub fn explore_swift(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== SWIFT ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.swift' . 2>/dev/null | grep -v '/.build/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Package.swift
|
||||
report.push_str("--- Package Configuration ---\n");
|
||||
let pkg = run_command("cat Package.swift 2>/dev/null | head -50", path);
|
||||
report.push_str(&pkg);
|
||||
report.push('\n');
|
||||
|
||||
// Classes, structs, protocols
|
||||
report.push_str("--- Types (Classes, Structs, Protocols, Enums) ---\n");
|
||||
let types = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.swift' '^(public |private |internal |open |final )?(class|struct|protocol|enum|actor) ' . 2>/dev/null | grep -v '/.build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&types);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.swift' '^\s*(public |private |internal |open )?func ' . 2>/dev/null | grep -v '/.build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Go codebase
|
||||
pub fn explore_go(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== GO ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.go' . 2>/dev/null | grep -v '/vendor/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// go.mod
|
||||
report.push_str("--- Module Configuration ---\n");
|
||||
let gomod = run_command("cat go.mod 2>/dev/null | head -50", path);
|
||||
report.push_str(&gomod);
|
||||
report.push('\n');
|
||||
|
||||
// Types (structs, interfaces)
|
||||
report.push_str("--- Types (Structs & Interfaces) ---\n");
|
||||
let types = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.go' '^type .+ (struct|interface)' . 2>/dev/null | grep -v '/vendor/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&types);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.go' '^func ' . 2>/dev/null | grep -v '/vendor/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Python codebase
|
||||
pub fn explore_python(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== PYTHON ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.py' . 2>/dev/null | grep -v '/__pycache__/' | grep -v '/venv/' | grep -v '/.venv/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Requirements/setup
|
||||
report.push_str("--- Dependencies ---\n");
|
||||
let deps = run_command(
|
||||
"cat requirements.txt 2>/dev/null | head -30 || cat pyproject.toml 2>/dev/null | head -50 || cat setup.py 2>/dev/null | head -30",
|
||||
path,
|
||||
);
|
||||
report.push_str(&deps);
|
||||
report.push('\n');
|
||||
|
||||
// Classes
|
||||
report.push_str("--- Classes ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.py' '^class ' . 2>/dev/null | grep -v '/__pycache__/' | grep -v '/venv/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.py' '^def |^async def ' . 2>/dev/null | grep -v '/__pycache__/' | grep -v '/venv/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore TypeScript codebase
|
||||
pub fn explore_typescript(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== TYPESCRIPT ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.ts' -g '*.tsx' . 2>/dev/null | grep -v '/node_modules/' | grep -v '/dist/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// package.json
|
||||
report.push_str("--- Package Configuration ---\n");
|
||||
let pkg = run_command("cat package.json 2>/dev/null | head -50", path);
|
||||
report.push_str(&pkg);
|
||||
report.push('\n');
|
||||
|
||||
// Types, interfaces, classes
|
||||
report.push_str("--- Types, Interfaces & Classes ---\n");
|
||||
let types = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.ts' -g '*.tsx' '^export (type|interface|class|enum|abstract class) ' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&types);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Exported Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.ts' -g '*.tsx' '^export (async )?function |^export const .+ = (async )?\(' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore JavaScript codebase
|
||||
pub fn explore_javascript(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== JAVASCRIPT ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.js' -g '*.jsx' . 2>/dev/null | grep -v '/node_modules/' | grep -v '/dist/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// package.json
|
||||
report.push_str("--- Package Configuration ---\n");
|
||||
let pkg = run_command("cat package.json 2>/dev/null | head -50", path);
|
||||
report.push_str(&pkg);
|
||||
report.push('\n');
|
||||
|
||||
// Classes
|
||||
report.push_str("--- Classes ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.js' -g '*.jsx' '^(export )?(default )?(class ) ' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Functions
|
||||
report.push_str("--- Exported Functions ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.js' -g '*.jsx' '^(export )?(async )?function |^module\.exports' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore C/C++ codebase
|
||||
pub fn explore_cpp(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== C/C++ ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.c' -g '*.cpp' -g '*.cc' -g '*.h' -g '*.hpp' . 2>/dev/null | grep -v '/build/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Build files
|
||||
report.push_str("--- Build Configuration ---\n");
|
||||
let build = run_command(
|
||||
"cat CMakeLists.txt 2>/dev/null | head -50 || cat Makefile 2>/dev/null | head -50",
|
||||
path,
|
||||
);
|
||||
report.push_str(&build);
|
||||
report.push('\n');
|
||||
|
||||
// Classes and structs
|
||||
report.push_str("--- Classes & Structs ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.cpp' -g '*.cc' -g '*.h' -g '*.hpp' '^(class|struct|enum|union|typedef) ' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Functions (simplified pattern)
|
||||
report.push_str("--- Function Declarations ---\n");
|
||||
let funcs = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.h' -g '*.hpp' '^[a-zA-Z_][a-zA-Z0-9_<>: ]*\s+[a-zA-Z_][a-zA-Z0-9_]*\s*\(' . 2>/dev/null | grep -v '/build/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&funcs);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Markdown documentation
|
||||
pub fn explore_markdown(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== MARKDOWN DOCUMENTATION ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- Documentation Files ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.md' . 2>/dev/null | grep -v '/node_modules/' | grep -v '/vendor/' | sort | head -50",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// README content
|
||||
report.push_str("--- README Overview ---\n");
|
||||
let readme = run_command(
|
||||
"cat README.md 2>/dev/null | head -100 || cat readme.md 2>/dev/null | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&readme);
|
||||
report.push('\n');
|
||||
|
||||
// Headers from all markdown files
|
||||
report.push_str("--- Document Headers ---\n");
|
||||
let headers = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename -g '*.md' '^#{1,3} ' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&headers);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore YAML configuration files
|
||||
pub fn explore_yaml(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== YAML CONFIGURATION ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- YAML Files ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.yaml' -g '*.yml' . 2>/dev/null | grep -v '/node_modules/' | grep -v '/vendor/' | sort | head -50",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Top-level keys from YAML files
|
||||
report.push_str("--- Top-Level Keys ---\n");
|
||||
let keys = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename -g '*.yaml' -g '*.yml' '^[a-zA-Z_][a-zA-Z0-9_-]*:' . 2>/dev/null | grep -v '/node_modules/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&keys);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore SQL files
|
||||
pub fn explore_sql(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== SQL ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- SQL Files ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.sql' . 2>/dev/null | sort | head -50",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Tables
|
||||
report.push_str("--- Table Definitions ---\n");
|
||||
let tables = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename -i -g '*.sql' 'CREATE TABLE' . 2>/dev/null | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&tables);
|
||||
report.push('\n');
|
||||
|
||||
// Views and procedures
|
||||
report.push_str("--- Views & Procedures ---\n");
|
||||
let views = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename -i -g '*.sql' 'CREATE (VIEW|PROCEDURE|FUNCTION)' . 2>/dev/null | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&views);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
/// Explore Ruby codebase
|
||||
pub fn explore_ruby(path: &str) -> String {
|
||||
let mut report = String::new();
|
||||
report.push_str("\n=== RUBY ===\n\n");
|
||||
|
||||
// File structure
|
||||
report.push_str("--- File Structure ---\n");
|
||||
let files = run_command(
|
||||
"rg --files -g '*.rb' . 2>/dev/null | grep -v '/vendor/' | sort | head -100",
|
||||
path,
|
||||
);
|
||||
report.push_str(&files);
|
||||
report.push('\n');
|
||||
|
||||
// Gemfile
|
||||
report.push_str("--- Dependencies (Gemfile) ---\n");
|
||||
let gemfile = run_command("cat Gemfile 2>/dev/null | head -50", path);
|
||||
report.push_str(&gemfile);
|
||||
report.push('\n');
|
||||
|
||||
// Classes and modules
|
||||
report.push_str("--- Classes & Modules ---\n");
|
||||
let classes = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rb' '^(class|module) ' . 2>/dev/null | grep -v '/vendor/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&classes);
|
||||
report.push('\n');
|
||||
|
||||
// Methods
|
||||
report.push_str("--- Methods ---\n");
|
||||
let methods = run_command(
|
||||
r#"rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rb' '^\s*def ' . 2>/dev/null | grep -v '/vendor/' | head -100"#,
|
||||
path,
|
||||
);
|
||||
report.push_str(&methods);
|
||||
report.push('\n');
|
||||
|
||||
report
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_expand_tilde() {
|
||||
let path = expand_tilde("~/test");
|
||||
assert!(!path.starts_with("~"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_explore_codebase_returns_string() {
|
||||
// Test with current directory
|
||||
let result = explore_codebase(".");
|
||||
assert!(!result.is_empty());
|
||||
}
|
||||
}
|
||||
325
crates/g3-planner/src/lib.rs
Normal file
325
crates/g3-planner/src/lib.rs
Normal file
@@ -0,0 +1,325 @@
|
||||
//! g3-planner: Fast-discovery planner for G3 AI coding agent
|
||||
//!
|
||||
//! This crate provides functionality to generate initial discovery tool calls
|
||||
//! that are injected into the conversation before the first LLM turn.
|
||||
|
||||
mod code_explore;
|
||||
pub mod prompts;
|
||||
|
||||
pub use code_explore::explore_codebase;
|
||||
|
||||
use anyhow::Result;
|
||||
use g3_providers::{CompletionRequest, LLMProvider, Message, MessageRole};
|
||||
use chrono::Local;
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io::Write;
|
||||
use prompts::{DISCOVERY_REQUIREMENTS_PROMPT, DISCOVERY_SYSTEM_PROMPT};
|
||||
|
||||
/// Type alias for a status callback function
|
||||
pub type StatusCallback = Box<dyn Fn(&str) + Send + Sync>;
|
||||
|
||||
/// Generates initial discovery messages for fast codebase exploration.
|
||||
///
|
||||
/// This function:
|
||||
/// 1. Runs explore_codebase to get a codebase report
|
||||
/// 2. Sends the report to the LLM with DISCOVERY_SYSTEM_PROMPT
|
||||
/// 3. Extracts shell commands from the LLM response
|
||||
/// 4. Returns Assistant messages with tool calls for each command
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `codebase_path` - The path to the codebase to explore
|
||||
/// * `provider` - An LLM provider to query for exploration commands
|
||||
/// * `requirements_text` - Optional requirements text to include in the discovery prompt
|
||||
/// * `status_callback` - Optional callback for status updates
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A `Result<Vec<Message>>` containing Assistant messages with JSON tool call strings.
|
||||
pub async fn get_initial_discovery_messages(
|
||||
codebase_path: &str,
|
||||
requirements_text: Option<&str>,
|
||||
provider: &dyn LLMProvider,
|
||||
status_callback: Option<&StatusCallback>,
|
||||
) -> Result<Vec<Message>> {
|
||||
// Helper to call status callback if provided
|
||||
let status = |msg: &str| {
|
||||
if let Some(cb) = status_callback {
|
||||
cb(msg);
|
||||
}
|
||||
};
|
||||
|
||||
status("🔍 Starting code discovery...");
|
||||
|
||||
// Step 1: Run explore_codebase to get the codebase report
|
||||
let codebase_report = explore_codebase(codebase_path);
|
||||
|
||||
// Write the codebase report to logs directory
|
||||
write_code_report(&codebase_report)?;
|
||||
|
||||
// Step 2: Build the prompt with the codebase report appended
|
||||
let user_prompt = if let Some(requirements) = requirements_text {
|
||||
format!(
|
||||
"{}\n\n
|
||||
=== REQUIREMENTS ===\n\n{}\n\n
|
||||
=== CODEBASE REPORT ===\n\n{}",
|
||||
DISCOVERY_REQUIREMENTS_PROMPT, requirements, codebase_report
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{}\n\n=== CODEBASE REPORT ===\n\n{}",
|
||||
DISCOVERY_REQUIREMENTS_PROMPT, codebase_report
|
||||
)
|
||||
};
|
||||
|
||||
// Step 3: Create messages for the LLM
|
||||
let messages = vec![
|
||||
Message::new(MessageRole::System, DISCOVERY_SYSTEM_PROMPT.to_string()),
|
||||
Message::new(MessageRole::User, user_prompt),
|
||||
];
|
||||
|
||||
// Step 4: Send to LLM
|
||||
let request = CompletionRequest {
|
||||
messages,
|
||||
max_tokens: Some(provider.max_tokens()),
|
||||
temperature: Some(provider.temperature()),
|
||||
stream: false,
|
||||
tools: None,
|
||||
};
|
||||
|
||||
status("🤖 Calling LLM for discovery commands...");
|
||||
|
||||
let response = provider.complete(request).await?;
|
||||
|
||||
// Step 5: Extract shell commands from the response
|
||||
let shell_commands = extract_shell_commands(&response.content);
|
||||
|
||||
status(&format!("📋 Extracted {} discovery commands", shell_commands.len()));
|
||||
|
||||
// Write the discovery commands to logs directory
|
||||
write_discovery_commands(&shell_commands)?;
|
||||
|
||||
// Step 6: Format as tool messages
|
||||
let tool_messages = shell_commands
|
||||
.into_iter()
|
||||
.map(|cmd| create_tool_message("shell", &cmd))
|
||||
.collect();
|
||||
|
||||
Ok(tool_messages)
|
||||
}
|
||||
|
||||
/// Creates an Assistant message with a tool call in g3's JSON format.
|
||||
pub fn create_tool_message(tool: &str, command: &str) -> Message {
|
||||
let tool_call = serde_json::json!({
|
||||
"tool": tool,
|
||||
"args": {
|
||||
"command": command
|
||||
}
|
||||
});
|
||||
|
||||
Message::new(MessageRole::Assistant, tool_call.to_string())
|
||||
}
|
||||
|
||||
/// Extract shell commands from the LLM response.
|
||||
/// Looks for {{CODE EXPLORATION COMMANDS}} section and extracts commands from code blocks.
|
||||
pub fn extract_shell_commands(response: &str) -> Vec<String> {
|
||||
let mut commands = Vec::new();
|
||||
|
||||
let section_marker = "{{CODE EXPLORATION COMMANDS}}";
|
||||
let section_start = match response.find(section_marker) {
|
||||
Some(pos) => pos + section_marker.len(),
|
||||
None => return commands,
|
||||
};
|
||||
|
||||
let section_content = &response[section_start..];
|
||||
let mut in_code_block = false;
|
||||
let mut current_block = String::new();
|
||||
|
||||
for line in section_content.lines() {
|
||||
let trimmed = line.trim();
|
||||
|
||||
if trimmed.starts_with("```") {
|
||||
if in_code_block {
|
||||
// End of code block - extract commands
|
||||
for cmd_line in current_block.lines() {
|
||||
let cmd = cmd_line.trim();
|
||||
if !cmd.is_empty() && !cmd.starts_with('#') {
|
||||
commands.push(cmd.to_string());
|
||||
}
|
||||
}
|
||||
current_block.clear();
|
||||
}
|
||||
in_code_block = !in_code_block;
|
||||
} else if in_code_block {
|
||||
current_block.push_str(line);
|
||||
current_block.push('\n');
|
||||
}
|
||||
}
|
||||
|
||||
commands
|
||||
}
|
||||
|
||||
/// Extract the summary section from the LLM response
|
||||
pub fn extract_summary(response: &str) -> Option<String> {
|
||||
let section_marker = "{{SUMMARY BASED ON INITIAL INFO}}";
|
||||
let section_start = match response.find(section_marker) {
|
||||
Some(pos) => pos + section_marker.len(),
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let section_content = &response[section_start..];
|
||||
let section_end = section_content.find("{{").unwrap_or(section_content.len());
|
||||
|
||||
let summary = section_content[..section_end].trim().to_string();
|
||||
if summary.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(summary)
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the codebase report to logs directory
|
||||
fn write_code_report(report: &str) -> Result<()> {
|
||||
// Ensure logs directory exists
|
||||
fs::create_dir_all("logs")?;
|
||||
|
||||
// Generate timestamp in same format as tool_calls log
|
||||
let timestamp = Local::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
let filename = format!("logs/code_report_{}.log", timestamp);
|
||||
|
||||
// Write the report to file
|
||||
let mut file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&filename)?;
|
||||
|
||||
file.write_all(report.as_bytes())?;
|
||||
file.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the discovery commands to logs directory
|
||||
fn write_discovery_commands(commands: &[String]) -> Result<()> {
|
||||
// Ensure logs directory exists
|
||||
fs::create_dir_all("logs")?;
|
||||
|
||||
// Generate timestamp in same format as tool_calls log
|
||||
let timestamp = Local::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
let filename = format!("logs/discovery_commands_{}.log", timestamp);
|
||||
|
||||
// Write the commands to file
|
||||
let mut file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&filename)?;
|
||||
|
||||
// Write header
|
||||
file.write_all(b"# Discovery Commands\n")?;
|
||||
file.write_all(b"# Generated by g3-planner\n\n")?;
|
||||
|
||||
// Write each command on a separate line
|
||||
for cmd in commands {
|
||||
file.write_all(cmd.as_bytes())?;
|
||||
file.write_all(b"\n")?;
|
||||
}
|
||||
file.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_tool_message_format() {
|
||||
let msg = create_tool_message("shell", "ls -la");
|
||||
|
||||
assert!(matches!(msg.role, MessageRole::Assistant));
|
||||
|
||||
let parsed: serde_json::Value = serde_json::from_str(&msg.content).unwrap();
|
||||
assert_eq!(parsed["tool"], "shell");
|
||||
assert_eq!(parsed["args"]["command"], "ls -la");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_basic() {
|
||||
let response = r#"
|
||||
Some text here.
|
||||
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
cat README.md
|
||||
rg --files -g '*.rs'
|
||||
```
|
||||
|
||||
More text.
|
||||
"#;
|
||||
|
||||
let commands = extract_shell_commands(response);
|
||||
assert_eq!(commands.len(), 3);
|
||||
assert_eq!(commands[0], "ls -la");
|
||||
assert_eq!(commands[1], "cat README.md");
|
||||
assert_eq!(commands[2], "rg --files -g '*.rs'");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_with_comments() {
|
||||
let response = r#"
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```
|
||||
# This is a comment
|
||||
ls -la
|
||||
# Another comment
|
||||
cat file.txt
|
||||
```
|
||||
"#;
|
||||
|
||||
let commands = extract_shell_commands(response);
|
||||
assert_eq!(commands.len(), 2);
|
||||
assert_eq!(commands[0], "ls -la");
|
||||
assert_eq!(commands[1], "cat file.txt");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_no_section() {
|
||||
let response = "Some response without the expected section.";
|
||||
let commands = extract_shell_commands(response);
|
||||
assert!(commands.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_summary() {
|
||||
let response = r#"
|
||||
{{SUMMARY BASED ON INITIAL INFO}}
|
||||
|
||||
This is a summary of the codebase.
|
||||
It has multiple lines.
|
||||
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```
|
||||
ls -la
|
||||
```
|
||||
"#;
|
||||
|
||||
let summary = extract_summary(response);
|
||||
assert!(summary.is_some());
|
||||
let summary_text = summary.unwrap();
|
||||
assert!(summary_text.contains("This is a summary"));
|
||||
assert!(summary_text.contains("multiple lines"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_summary_no_section() {
|
||||
let response = "Response without summary section.";
|
||||
let summary = extract_summary(response);
|
||||
assert!(summary.is_none());
|
||||
}
|
||||
}
|
||||
37
crates/g3-planner/src/prompts.rs
Normal file
37
crates/g3-planner/src/prompts.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
//! Prompts used for discovery phase
|
||||
|
||||
/// System prompt for discovery mode - instructs the LLM to analyze codebase and generate exploration commands
|
||||
pub const DISCOVERY_SYSTEM_PROMPT: &str = r#"You are an expert code analyst. Your task is to analyze a codebase structure and generate shell commands to explore it further.
|
||||
|
||||
You will receive:
|
||||
1. User requirements describing what needs to be implemented
|
||||
2. A codebase report showing the structure and key elements of the codebase
|
||||
|
||||
Your job is to:
|
||||
1. Understand the requirements and identify what parts of the codebase are relevant
|
||||
2. Generate shell commands to explore those parts in more detail
|
||||
|
||||
IMPORTANT: Do NOT attempt to implement anything. Only generate exploration commands."#;
|
||||
|
||||
/// Discovery prompt template - used when we have a codebase report.
|
||||
/// The codebase report should be appended after this prompt.
|
||||
pub const DISCOVERY_REQUIREMENTS_PROMPT: &str = r#"**CRITICAL**: DO ABSOLUTELY NOT ATTEMPT TO IMPLEMENT THESE REQUIREMENTS AT THIS POINT. ONLY USE THEM TO
|
||||
UNDERSTAND WHICH PARTS OF THE CODE YOU MIGHT BE INTERESTED IN, AND WHAT SEARCH/GREP EXPRESSIONS YOU MIGHT WANT TO USE
|
||||
TO GET A BETTER UNDERSTANDING OF THE CODEBASE.
|
||||
|
||||
Your task is to analyze the codebase overview provided below and generate shell commands to explore it further - in particular, those
|
||||
you deem most relevant to the requirements given below.
|
||||
|
||||
Your output MUST include:
|
||||
1. A summary report. Use the heading {{SUMMARY BASED ON INITIAL INFO}}.
|
||||
- retain as much information of that as you consider relevant to the requirements, and for making an implementation plan.
|
||||
- Ideally that should not be more than 10000 tokens.
|
||||
2. A list of shell commands to explore the code. Use the heading {{CODE EXPLORATION COMMANDS}}.
|
||||
- Try plan ahead for what you need for a deep dive into the code. Make sure the information is sparing.
|
||||
- Carefully consider which commands give you the most relevant information, pick the top 25 commands.
|
||||
- Use tools like `ls`, `rg` (ripgrep), `grep`, `sed`, `cat`, `head`, `tail` etc.
|
||||
- Focus on commands that will help understand the code STRUCTURE without dumping large sections of file.
|
||||
- e.g. for Rust you might try `rg --no-heading --line-number --with-filename --max-filesize 500K -g '*.rs' '^(pub )?(struct|enum|type|union)`
|
||||
- Mark the beginning and end of the commands with "```".
|
||||
|
||||
DO NOT ADD ANY COMMENTS OR OTHER EXPLANATION IN THE COMMANDS SECTION, JUST INCLUDE THE SHELL COMMANDS."#;
|
||||
60
crates/g3-planner/tests/logging_test.rs
Normal file
60
crates/g3-planner/tests/logging_test.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
//! Integration tests for logging functionality
|
||||
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_log_files_created() {
|
||||
// This test verifies that the logging functions work correctly
|
||||
// by checking that files can be created in the logs directory
|
||||
|
||||
// Clean up any existing test logs
|
||||
let _ = fs::remove_dir_all("logs");
|
||||
|
||||
// Create logs directory
|
||||
fs::create_dir_all("logs").expect("Failed to create logs directory");
|
||||
|
||||
// Verify directory exists
|
||||
assert!(Path::new("logs").exists());
|
||||
assert!(Path::new("logs").is_dir());
|
||||
|
||||
// Test writing a code report
|
||||
let test_report = "Test codebase report\nLine 2\nLine 3";
|
||||
let timestamp = chrono::Local::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
let report_filename = format!("logs/code_report_{}.log", timestamp);
|
||||
|
||||
fs::write(&report_filename, test_report).expect("Failed to write code report");
|
||||
assert!(Path::new(&report_filename).exists());
|
||||
|
||||
let content = fs::read_to_string(&report_filename).expect("Failed to read code report");
|
||||
assert_eq!(content, test_report);
|
||||
|
||||
// Test writing discovery commands
|
||||
let commands_filename = format!("logs/discovery_commands_{}.log", timestamp);
|
||||
let test_commands = "# Discovery Commands\n# Generated by g3-planner\n\nls -la\ncat README.md\n";
|
||||
|
||||
fs::write(&commands_filename, test_commands).expect("Failed to write discovery commands");
|
||||
assert!(Path::new(&commands_filename).exists());
|
||||
|
||||
let content = fs::read_to_string(&commands_filename).expect("Failed to read discovery commands");
|
||||
assert_eq!(content, test_commands);
|
||||
|
||||
// Clean up
|
||||
let _ = fs::remove_file(&report_filename);
|
||||
let _ = fs::remove_file(&commands_filename);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_format() {
|
||||
// Verify the filename format matches the tool_calls log format
|
||||
let timestamp = chrono::Local::now().format("%Y%m%d_%H%M%S").to_string();
|
||||
|
||||
// Check format: YYYYMMDD_HHMMSS
|
||||
assert_eq!(timestamp.len(), 15); // 8 digits + underscore + 6 digits
|
||||
assert!(timestamp.contains('_'));
|
||||
|
||||
let parts: Vec<&str> = timestamp.split('_').collect();
|
||||
assert_eq!(parts.len(), 2);
|
||||
assert_eq!(parts[0].len(), 8); // YYYYMMDD
|
||||
assert_eq!(parts[1].len(), 6); // HHMMSS
|
||||
}
|
||||
103
crates/g3-planner/tests/planner_test.rs
Normal file
103
crates/g3-planner/tests/planner_test.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
//! Integration tests for g3-planner
|
||||
|
||||
use g3_planner::{create_tool_message, explore_codebase, extract_shell_commands};
|
||||
use g3_providers::MessageRole;
|
||||
|
||||
#[test]
|
||||
fn test_create_tool_message_format() {
|
||||
let msg = create_tool_message("shell", "ls -la");
|
||||
|
||||
assert!(matches!(msg.role, MessageRole::Assistant));
|
||||
|
||||
let parsed: serde_json::Value = serde_json::from_str(&msg.content).unwrap();
|
||||
assert_eq!(parsed["tool"], "shell");
|
||||
assert_eq!(parsed["args"]["command"], "ls -la");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_explore_codebase_returns_report() {
|
||||
// Test with current directory (should find Rust files in g3 project)
|
||||
let report = explore_codebase(".");
|
||||
|
||||
// Should return a non-empty report
|
||||
assert!(!report.is_empty(), "Report should not be empty");
|
||||
|
||||
// Should contain the codebase analysis header
|
||||
assert!(
|
||||
report.contains("CODEBASE ANALYSIS") || report.contains("No recognized"),
|
||||
"Report should have analysis header or indicate no languages found"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_basic() {
|
||||
let response = r#"
|
||||
Some text here.
|
||||
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
cat README.md
|
||||
rg --files -g '*.rs'
|
||||
```
|
||||
|
||||
More text.
|
||||
"#;
|
||||
|
||||
let commands = extract_shell_commands(response);
|
||||
assert_eq!(commands.len(), 3);
|
||||
assert_eq!(commands[0], "ls -la");
|
||||
assert_eq!(commands[1], "cat README.md");
|
||||
assert_eq!(commands[2], "rg --files -g '*.rs'");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_with_comments() {
|
||||
let response = r#"
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```
|
||||
# This is a comment
|
||||
ls -la
|
||||
# Another comment
|
||||
cat file.txt
|
||||
```
|
||||
"#;
|
||||
|
||||
let commands = extract_shell_commands(response);
|
||||
assert_eq!(commands.len(), 2);
|
||||
assert_eq!(commands[0], "ls -la");
|
||||
assert_eq!(commands[1], "cat file.txt");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_no_section() {
|
||||
let response = "Some response without the expected section.";
|
||||
let commands = extract_shell_commands(response);
|
||||
assert!(commands.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_shell_commands_multiple_code_blocks() {
|
||||
let response = r#"
|
||||
{{CODE EXPLORATION COMMANDS}}
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
```
|
||||
|
||||
Some explanation text.
|
||||
|
||||
```
|
||||
cat README.md
|
||||
head -50 src/main.rs
|
||||
```
|
||||
"#;
|
||||
|
||||
let commands = extract_shell_commands(response);
|
||||
assert_eq!(commands.len(), 3);
|
||||
assert_eq!(commands[0], "ls -la");
|
||||
assert_eq!(commands[1], "cat README.md");
|
||||
assert_eq!(commands[2], "head -50 src/main.rs");
|
||||
}
|
||||
@@ -678,6 +678,14 @@ impl LLMProvider for AnthropicProvider {
|
||||
// Anthropic supports cache control
|
||||
true
|
||||
}
|
||||
|
||||
fn max_tokens(&self) -> u32 {
|
||||
self.max_tokens
|
||||
}
|
||||
|
||||
fn temperature(&self) -> f32 {
|
||||
self.temperature
|
||||
}
|
||||
}
|
||||
|
||||
// Anthropic API request/response structures
|
||||
|
||||
@@ -1055,6 +1055,14 @@ impl LLMProvider for DatabricksProvider {
|
||||
fn supports_cache_control(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn max_tokens(&self) -> u32 {
|
||||
self.max_tokens
|
||||
}
|
||||
|
||||
fn temperature(&self) -> f32 {
|
||||
self.temperature
|
||||
}
|
||||
}
|
||||
|
||||
// Databricks API request/response structures
|
||||
|
||||
@@ -771,4 +771,12 @@ impl LLMProvider for EmbeddedProvider {
|
||||
fn model(&self) -> &str {
|
||||
&self.model_name
|
||||
}
|
||||
|
||||
fn max_tokens(&self) -> u32 {
|
||||
self.max_tokens
|
||||
}
|
||||
|
||||
fn temperature(&self) -> f32 {
|
||||
self.temperature
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,12 @@ pub trait LLMProvider: Send + Sync {
|
||||
fn supports_cache_control(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Get the configured max_tokens for this provider
|
||||
fn max_tokens(&self) -> u32;
|
||||
|
||||
/// Get the configured temperature for this provider
|
||||
fn temperature(&self) -> f32;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
||||
@@ -384,6 +384,14 @@ impl LLMProvider for OpenAIProvider {
|
||||
// OpenAI models support native tool calling
|
||||
true
|
||||
}
|
||||
|
||||
fn max_tokens(&self) -> u32 {
|
||||
self.max_tokens.unwrap_or(16000)
|
||||
}
|
||||
|
||||
fn temperature(&self) -> f32 {
|
||||
self._temperature.unwrap_or(0.1)
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_messages(messages: &[Message]) -> Vec<serde_json::Value> {
|
||||
|
||||
70
tail_tool_logs.sh
Executable file
70
tail_tool_logs.sh
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Useful tool for tailing tool_calls files. It picks up whatever the latest is and does tail -f
|
||||
|
||||
if [[ -n "$G3_WORKSPACE" ]]; then
|
||||
TARGET_DIR="$G3_WORKSPACE/logs"
|
||||
else
|
||||
TARGET_DIR="$HOME/tmp/workspace/logs"
|
||||
fi
|
||||
|
||||
if [[ ! -d "$TARGET_DIR" ]]; then
|
||||
echo "Error: Directory '$TARGET_DIR' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$TARGET_DIR" || exit 1
|
||||
|
||||
echo "Monitoring directory '$TARGET_DIR' for newest 'tool_calls*' file..."
|
||||
|
||||
|
||||
# Variables to keep track of the current state
|
||||
CURRENT_PID=""
|
||||
CURRENT_FILE=""
|
||||
|
||||
# Cleanup function: Kill the background tail process when this script is stopped (Ctrl+C)
|
||||
cleanup() {
|
||||
echo ""
|
||||
echo "Stopping monitor..."
|
||||
if [[ -n "$CURRENT_PID" ]]; then
|
||||
kill "$CURRENT_PID" 2>/dev/null
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Register the cleanup function for SIGINT (Ctrl+C) and SIGTERM
|
||||
trap cleanup SIGINT SIGTERM
|
||||
|
||||
while true; do
|
||||
# Find the newest file matching the pattern using ls -t (sort by time)
|
||||
# 2>/dev/null suppresses errors if no files are found
|
||||
NEWEST_FILE=$(ls -t tool_calls* 2>/dev/null | head -n 1)
|
||||
|
||||
# If a file was found AND it is different from the one we are currently watching
|
||||
if [[ -n "$NEWEST_FILE" && "$NEWEST_FILE" != "$CURRENT_FILE" ]]; then
|
||||
|
||||
# If we were already watching a file, kill the old tail process
|
||||
if [[ -n "$CURRENT_PID" ]]; then
|
||||
kill "$CURRENT_PID" 2>/dev/null
|
||||
fi
|
||||
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
echo ">>> Switched to new file: $NEWEST_FILE"
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
|
||||
# Start tail in the background (&)
|
||||
tail -f "$NEWEST_FILE" &
|
||||
|
||||
# Capture the Process ID ($!) of the tail command we just launched
|
||||
CURRENT_PID=$!
|
||||
|
||||
# Update the tracker variable
|
||||
CURRENT_FILE="$NEWEST_FILE"
|
||||
fi
|
||||
|
||||
# Wait 1 second before checking again
|
||||
sleep 1
|
||||
done
|
||||
|
||||
Reference in New Issue
Block a user