Compare commits
1 Commits
jochen-ast
...
anthropic-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9218ba2ab4 |
32
Cargo.lock
generated
32
Cargo.lock
generated
@@ -990,7 +990,7 @@ dependencies = [
|
||||
"libc",
|
||||
"option-ext",
|
||||
"redox_users 0.5.2",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1062,7 +1062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1391,7 +1391,6 @@ dependencies = [
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"shellexpand",
|
||||
"thiserror 1.0.69",
|
||||
"tokio",
|
||||
@@ -2334,7 +2333,7 @@ version = "0.50.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
|
||||
dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2905,7 +2904,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.11.0",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3079,19 +3078,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_yaml"
|
||||
version = "0.9.34+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
"unsafe-libyaml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.9"
|
||||
@@ -3306,7 +3292,7 @@ dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
"once_cell",
|
||||
"rustix 1.1.2",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3681,12 +3667,6 @@ version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-libyaml"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "2.5.7"
|
||||
@@ -3955,7 +3935,7 @@ version = "0.1.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
|
||||
dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
34
README.md
34
README.md
@@ -132,40 +132,6 @@ G3 is designed for:
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Default Mode: Accumulative Autonomous
|
||||
|
||||
The default interactive mode now uses **accumulative autonomous mode**, which combines the best of interactive and autonomous workflows:
|
||||
|
||||
```bash
|
||||
# Simply run g3 in any directory
|
||||
g3
|
||||
|
||||
# You'll be prompted to describe what you want to build
|
||||
# Each input you provide:
|
||||
# 1. Gets added to accumulated requirements
|
||||
# 2. Automatically triggers autonomous mode (coach-player loop)
|
||||
# 3. Implements your requirements iteratively
|
||||
|
||||
# Example session:
|
||||
requirement> create a simple web server in Python with Flask
|
||||
# ... autonomous mode runs and implements it ...
|
||||
requirement> add a /health endpoint that returns JSON
|
||||
# ... autonomous mode runs again with both requirements ...
|
||||
```
|
||||
|
||||
### Other Modes
|
||||
|
||||
```bash
|
||||
# Single-shot mode (one task, then exit)
|
||||
g3 "implement a function to calculate fibonacci numbers"
|
||||
|
||||
# Traditional autonomous mode (reads requirements.md)
|
||||
g3 --autonomous
|
||||
|
||||
# Traditional chat mode (simple interactive chat without autonomous runs)
|
||||
g3 --chat
|
||||
```
|
||||
|
||||
```bash
|
||||
# Build the project
|
||||
cargo build --release
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Result;
|
||||
use crossterm::style::{Color, SetForegroundColor, ResetColor};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -175,7 +174,7 @@ mod machine_ui_writer;
|
||||
use machine_ui_writer::MachineUiWriter;
|
||||
use ui_writer_impl::ConsoleUiWriter;
|
||||
|
||||
#[derive(Parser, Clone)]
|
||||
#[derive(Parser)]
|
||||
#[command(name = "g3")]
|
||||
#[command(about = "A modular, composable AI coding agent")]
|
||||
#[command(version)]
|
||||
@@ -215,9 +214,9 @@ pub struct Cli {
|
||||
#[arg(long, value_name = "TEXT")]
|
||||
pub requirements: Option<String>,
|
||||
|
||||
/// Enable accumulative autonomous mode (default is chat mode)
|
||||
/// Interactive mode: prompt for requirements and save to requirements.md before starting autonomous mode
|
||||
#[arg(long)]
|
||||
pub auto: bool,
|
||||
pub interactive_requirements: bool,
|
||||
|
||||
/// Enable machine-friendly output mode with JSON markers and stats
|
||||
#[arg(long)]
|
||||
@@ -310,6 +309,112 @@ pub async fn run() -> Result<()> {
|
||||
|
||||
// Create project model
|
||||
let project = if cli.autonomous {
|
||||
// Handle interactive requirements mode with AI enhancement
|
||||
if cli.interactive_requirements {
|
||||
println!("\n📝 Interactive Requirements Mode");
|
||||
println!("================================\n");
|
||||
println!("Describe what you want to build (can be brief):");
|
||||
println!("Press Ctrl+D (Unix) or Ctrl+Z (Windows) when done.\n");
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
let mut requirements_input = String::new();
|
||||
io::stdin().read_to_string(&mut requirements_input)?;
|
||||
|
||||
if requirements_input.trim().is_empty() {
|
||||
anyhow::bail!("No requirements provided. Exiting.");
|
||||
}
|
||||
|
||||
println!("\n🤖 Enhancing your requirements with AI...\n");
|
||||
|
||||
// Create a temporary agent to enhance the requirements
|
||||
let temp_config = Config::load_with_overrides(
|
||||
cli.config.as_deref(),
|
||||
cli.provider.clone(),
|
||||
cli.model.clone(),
|
||||
)?;
|
||||
|
||||
let ui_writer = ConsoleUiWriter::new();
|
||||
let mut temp_agent = Agent::new_with_readme_and_quiet(
|
||||
temp_config,
|
||||
ui_writer,
|
||||
None,
|
||||
true, // quiet mode
|
||||
).await?;
|
||||
|
||||
// Craft the enhancement prompt
|
||||
let enhancement_prompt = format!(
|
||||
r#"You are a requirements analyst. Take this brief user input and expand it into a structured requirements document.
|
||||
|
||||
USER INPUT:
|
||||
{}
|
||||
|
||||
Create a professional requirements document with:
|
||||
1. A clear project title (# heading)
|
||||
2. An overview section explaining what will be built
|
||||
3. Organized requirements (functional, technical, quality)
|
||||
4. Acceptance criteria
|
||||
5. Any technical constraints or preferences mentioned
|
||||
|
||||
Format as proper markdown. Be specific and actionable. If the user's input is vague, make reasonable assumptions but keep it focused on what they described.
|
||||
|
||||
Output ONLY the markdown content, no explanations or meta-commentary."#,
|
||||
requirements_input.trim()
|
||||
);
|
||||
|
||||
// Execute enhancement task
|
||||
let result = temp_agent
|
||||
.execute_task_with_timing(&enhancement_prompt, None, false, false, false, false)
|
||||
.await?;
|
||||
|
||||
let enhanced_requirements = result.response.trim().to_string();
|
||||
|
||||
// Show the enhanced requirements
|
||||
println!("\n📋 Enhanced Requirements Document:");
|
||||
println!("{}\n", "=".repeat(60));
|
||||
println!("{}", enhanced_requirements);
|
||||
println!("{}\n", "=".repeat(60));
|
||||
|
||||
// Ask for confirmation
|
||||
println!("\n❓ Is this requirements document acceptable?");
|
||||
println!(" [y] Yes, proceed with autonomous mode");
|
||||
println!(" [e] Edit and save manually");
|
||||
println!(" [n] No, cancel\n");
|
||||
|
||||
print!("Your choice (y/e/n): ");
|
||||
io::stdout().flush()?;
|
||||
|
||||
let mut choice = String::new();
|
||||
io::stdin().read_line(&mut choice)?;
|
||||
let choice = choice.trim().to_lowercase();
|
||||
|
||||
let requirements_path = workspace_dir.join("requirements.md");
|
||||
|
||||
match choice.as_str() {
|
||||
"y" | "yes" => {
|
||||
// Save enhanced requirements
|
||||
std::fs::write(&requirements_path, &enhanced_requirements)?;
|
||||
println!("\n✅ Requirements saved to: {}", requirements_path.display());
|
||||
println!("🚀 Starting autonomous mode...\n");
|
||||
}
|
||||
"e" | "edit" => {
|
||||
// Save enhanced requirements for manual editing
|
||||
std::fs::write(&requirements_path, &enhanced_requirements)?;
|
||||
println!("\n✅ Requirements saved to: {}", requirements_path.display());
|
||||
println!("📝 Please edit the file and run: g3 --autonomous");
|
||||
println!(" Exiting for now.\n");
|
||||
return Ok(());
|
||||
}
|
||||
"n" | "no" => {
|
||||
println!("\n❌ Cancelled. No files were saved.\n");
|
||||
return Ok(());
|
||||
}
|
||||
_ => {
|
||||
println!("\n❌ Invalid choice. Cancelled.\n");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(requirements_text) = &cli.requirements {
|
||||
// Use requirements text override
|
||||
Project::new_autonomous_with_requirements(workspace_dir.clone(), requirements_text.clone())?
|
||||
@@ -377,7 +482,6 @@ pub async fn run() -> Result<()> {
|
||||
// Execute task, autonomous mode, or start interactive mode based on machine mode
|
||||
if cli.machine {
|
||||
// Machine mode - use MachineUiWriter
|
||||
|
||||
let ui_writer = MachineUiWriter::new();
|
||||
|
||||
let agent = if cli.autonomous {
|
||||
@@ -401,20 +505,6 @@ pub async fn run() -> Result<()> {
|
||||
run_with_machine_mode(agent, cli, project).await?;
|
||||
} else {
|
||||
// Normal mode - use ConsoleUiWriter
|
||||
|
||||
// DEFAULT: Chat mode for interactive sessions
|
||||
// It runs when:
|
||||
// 1. No task is provided (not single-shot)
|
||||
// 2. Not in autonomous mode
|
||||
// 3. Not explicitly enabled with --auto flag
|
||||
let use_accumulative = cli.task.is_none() && !cli.autonomous && cli.auto;
|
||||
|
||||
if use_accumulative {
|
||||
// Run accumulative mode and return early
|
||||
run_accumulative_mode(workspace_dir.clone(), cli.clone(), combined_content.clone()).await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let ui_writer = ConsoleUiWriter::new();
|
||||
|
||||
let agent = if cli.autonomous {
|
||||
@@ -437,274 +527,7 @@ pub async fn run() -> Result<()> {
|
||||
|
||||
run_with_console_mode(agent, cli, project, combined_content).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Accumulative autonomous mode: accumulates requirements from user input
|
||||
/// and runs autonomous mode after each input
|
||||
async fn run_accumulative_mode(
|
||||
workspace_dir: PathBuf,
|
||||
cli: Cli,
|
||||
combined_content: Option<String>,
|
||||
) -> Result<()> {
|
||||
let output = SimpleOutput::new();
|
||||
|
||||
output.print("");
|
||||
output.print("🪿 G3 AI Coding Agent - Autonomous Mode");
|
||||
output.print(" >> describe what you want, I'll build it iteratively");
|
||||
output.print("");
|
||||
output.print(&format!("📁 Workspace: {}", workspace_dir.display()));
|
||||
output.print("");
|
||||
output.print("💡 Each input you provide will be added to requirements");
|
||||
output.print(" and I'll automatically work on implementing them. You can");
|
||||
output.print(" interrupt at any time (Ctrl+C) to add clarifications or more requirements.");
|
||||
output.print("");
|
||||
output.print(" Type '/help' for commands, 'exit' or 'quit' to stop, Ctrl+D to finish");
|
||||
output.print("");
|
||||
|
||||
// Initialize rustyline editor with history
|
||||
let mut rl = DefaultEditor::new()?;
|
||||
let history_file = dirs::home_dir().map(|mut path| {
|
||||
path.push(".g3_accumulative_history");
|
||||
path
|
||||
});
|
||||
|
||||
if let Some(ref history_path) = history_file {
|
||||
let _ = rl.load_history(history_path);
|
||||
}
|
||||
|
||||
// Accumulated requirements stored in memory
|
||||
let mut accumulated_requirements = Vec::new();
|
||||
let mut turn_number = 0;
|
||||
|
||||
loop {
|
||||
output.print(&format!("\n{}", "=".repeat(60)));
|
||||
if accumulated_requirements.is_empty() {
|
||||
output.print("📝 What would you like me to build? (describe your requirements)");
|
||||
} else {
|
||||
output.print(&format!("📝 Turn {} - What's next? (add more requirements or refinements)", turn_number + 1));
|
||||
}
|
||||
output.print(&format!("{}", "=".repeat(60)));
|
||||
|
||||
let readline = rl.readline("requirement> ");
|
||||
match readline {
|
||||
Ok(line) => {
|
||||
let input = line.trim().to_string();
|
||||
|
||||
if input.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if input == "exit" || input == "quit" {
|
||||
output.print("\n👋 Goodbye!");
|
||||
break;
|
||||
}
|
||||
|
||||
// Check for slash commands
|
||||
if input.starts_with('/') {
|
||||
match input.as_str() {
|
||||
"/help" => {
|
||||
output.print("");
|
||||
output.print("📖 Available Commands:");
|
||||
output.print(" /requirements - Show all accumulated requirements");
|
||||
output.print(" /chat - Switch to interactive chat mode");
|
||||
output.print(" /help - Show this help message");
|
||||
output.print(" exit/quit - Exit the session");
|
||||
output.print("");
|
||||
continue;
|
||||
}
|
||||
"/requirements" => {
|
||||
output.print("");
|
||||
if accumulated_requirements.is_empty() {
|
||||
output.print("📋 No requirements accumulated yet");
|
||||
} else {
|
||||
output.print("📋 Accumulated Requirements:");
|
||||
output.print("");
|
||||
for req in &accumulated_requirements {
|
||||
output.print(&format!(" {}", req));
|
||||
}
|
||||
}
|
||||
output.print("");
|
||||
continue;
|
||||
}
|
||||
"/chat" => {
|
||||
output.print("");
|
||||
output.print("🔄 Switching to interactive chat mode...");
|
||||
output.print("");
|
||||
|
||||
// Build context message with accumulated requirements
|
||||
let requirements_context = if accumulated_requirements.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(format!(
|
||||
"📋 Context from Accumulative Mode:\n\n\
|
||||
We were working on these requirements. There may be unstaged or in-progress changes or recent changes to this branch. This is for your information.\n\n\
|
||||
Requirements:\n{}\n",
|
||||
accumulated_requirements.join("\n")
|
||||
))
|
||||
};
|
||||
|
||||
// Combine with existing content (README/AGENTS.md)
|
||||
let chat_combined_content = match (requirements_context, combined_content.clone()) {
|
||||
(Some(req_ctx), Some(existing)) => Some(format!("{}\n\n{}", req_ctx, existing)),
|
||||
(Some(req_ctx), None) => Some(req_ctx),
|
||||
(None, existing) => existing,
|
||||
};
|
||||
|
||||
// Load configuration
|
||||
let mut config = Config::load_with_overrides(
|
||||
cli.config.as_deref(),
|
||||
cli.provider.clone(),
|
||||
cli.model.clone(),
|
||||
)?;
|
||||
|
||||
// Apply macax flag override
|
||||
if cli.macax {
|
||||
config.macax.enabled = true;
|
||||
}
|
||||
|
||||
// Apply webdriver flag override
|
||||
if cli.webdriver {
|
||||
config.webdriver.enabled = true;
|
||||
}
|
||||
|
||||
// Create agent for interactive mode with requirements context
|
||||
let ui_writer = ConsoleUiWriter::new();
|
||||
let agent = Agent::new_with_readme_and_quiet(
|
||||
config,
|
||||
ui_writer,
|
||||
chat_combined_content.clone(),
|
||||
cli.quiet,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Run interactive mode
|
||||
run_interactive(agent, cli.show_prompt, cli.show_code, chat_combined_content).await?;
|
||||
|
||||
// After returning from interactive mode, exit
|
||||
output.print("\n👋 Goodbye!");
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
output.print(&format!("❌ Unknown command: {}. Type /help for available commands.", input));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add to history
|
||||
rl.add_history_entry(&input)?;
|
||||
|
||||
// Add this requirement to accumulated list
|
||||
turn_number += 1;
|
||||
accumulated_requirements.push(format!("{}. {}", turn_number, input));
|
||||
|
||||
// Build the complete requirements document
|
||||
let requirements_doc = format!(
|
||||
"# Project Requirements\n\n\
|
||||
## Current Instructions and Requirements:\n\n\
|
||||
{}\n\n\
|
||||
## Latest Requirement (Turn {}):\n\n\
|
||||
{}",
|
||||
accumulated_requirements.join("\n"),
|
||||
turn_number,
|
||||
input
|
||||
);
|
||||
|
||||
output.print("");
|
||||
output.print(&format!("📋 Current instructions and requirements (Turn {}):", turn_number));
|
||||
output.print(&format!(" {}", input));
|
||||
output.print("");
|
||||
output.print("🚀 Starting autonomous implementation...");
|
||||
output.print("");
|
||||
|
||||
// Create a project with the accumulated requirements
|
||||
let project = Project::new_autonomous_with_requirements(
|
||||
workspace_dir.clone(),
|
||||
requirements_doc.clone()
|
||||
)?;
|
||||
|
||||
// Ensure workspace exists and enter it
|
||||
project.ensure_workspace_exists()?;
|
||||
project.enter_workspace()?;
|
||||
|
||||
// Load configuration with CLI overrides
|
||||
let mut config = Config::load_with_overrides(
|
||||
cli.config.as_deref(),
|
||||
cli.provider.clone(),
|
||||
cli.model.clone(),
|
||||
)?;
|
||||
|
||||
// Apply macax flag override
|
||||
if cli.macax {
|
||||
config.macax.enabled = true;
|
||||
}
|
||||
|
||||
// Apply webdriver flag override
|
||||
if cli.webdriver {
|
||||
config.webdriver.enabled = true;
|
||||
}
|
||||
|
||||
// Create agent for this autonomous run
|
||||
let ui_writer = ConsoleUiWriter::new();
|
||||
let agent = Agent::new_autonomous_with_readme_and_quiet(
|
||||
config.clone(),
|
||||
ui_writer,
|
||||
combined_content.clone(),
|
||||
cli.quiet,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Run autonomous mode with the accumulated requirements
|
||||
let autonomous_result = tokio::select! {
|
||||
result = run_autonomous(
|
||||
agent,
|
||||
project,
|
||||
cli.show_prompt,
|
||||
cli.show_code,
|
||||
cli.max_turns,
|
||||
cli.quiet,
|
||||
) => result,
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
output.print("\n⚠️ Autonomous run cancelled by user (Ctrl+C)");
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
match autonomous_result
|
||||
{
|
||||
Ok(_) => {
|
||||
output.print("");
|
||||
output.print("✅ Autonomous run completed");
|
||||
}
|
||||
Err(e) => {
|
||||
output.print("");
|
||||
output.print(&format!("❌ Autonomous run failed: {}", e));
|
||||
output.print(" You can provide more requirements to continue.");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(ReadlineError::Interrupted) => {
|
||||
output.print("\n👋 Interrupted. Goodbye!");
|
||||
break;
|
||||
}
|
||||
Err(ReadlineError::Eof) => {
|
||||
output.print("\n👋 Goodbye!");
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Error: {:?}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save history before exiting
|
||||
if let Some(ref history_path) = history_file {
|
||||
let _ = rl.save_history(history_path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1480,32 +1303,10 @@ fn handle_execution_error(e: &anyhow::Error, input: &str, output: &SimpleOutput,
|
||||
}
|
||||
}
|
||||
|
||||
fn display_context_progress<W: UiWriter>(agent: &Agent<W>, _output: &SimpleOutput) {
|
||||
fn display_context_progress<W: UiWriter>(agent: &Agent<W>, output: &SimpleOutput) {
|
||||
let context = agent.get_context_window();
|
||||
let percentage = context.percentage_used();
|
||||
|
||||
// Create 10 dots representing context fullness
|
||||
let total_dots: usize = 10;
|
||||
let filled_dots = ((percentage / 100.0) * total_dots as f32).round() as usize;
|
||||
let empty_dots = total_dots.saturating_sub(filled_dots);
|
||||
|
||||
let filled_str = "●".repeat(filled_dots);
|
||||
let empty_str = "○".repeat(empty_dots);
|
||||
|
||||
// Determine color based on percentage
|
||||
let color = if percentage < 40.0 {
|
||||
Color::Green
|
||||
} else if percentage < 60.0 {
|
||||
Color::Yellow
|
||||
} else if percentage < 80.0 {
|
||||
Color::Rgb { r: 255, g: 165, b: 0 } // Orange
|
||||
} else {
|
||||
Color::Red
|
||||
};
|
||||
|
||||
// Print with colored dots (using print! directly to handle color codes)
|
||||
print!("Context: {}{}{}{} {:.0}% ({}/{} tokens)\n",
|
||||
SetForegroundColor(color), filled_str, empty_str, ResetColor, percentage, context.used_tokens, context.total_tokens);
|
||||
output.print(&format!("Context: {}/{} tokens ({:.1}%)",
|
||||
context.used_tokens, context.total_tokens, context.percentage_used()));
|
||||
}
|
||||
|
||||
/// Set up the workspace directory for autonomous mode
|
||||
|
||||
@@ -71,20 +71,18 @@ impl SimpleOutput {
|
||||
}
|
||||
|
||||
pub fn print_context(&self, used: u32, total: u32, percentage: f32) {
|
||||
let total_dots = 10;
|
||||
let filled_dots = ((percentage / 100.0) * total_dots as f32) as usize;
|
||||
let empty_dots = total_dots.saturating_sub(filled_dots);
|
||||
let bar_width: usize = 10;
|
||||
let filled_width = ((percentage / 100.0) * bar_width as f32) as usize;
|
||||
let empty_width = bar_width.saturating_sub(filled_width);
|
||||
|
||||
let filled_str = "●".repeat(filled_dots);
|
||||
let empty_str = "○".repeat(empty_dots);
|
||||
let filled_chars = "●".repeat(filled_width);
|
||||
let empty_chars = "○".repeat(empty_width);
|
||||
|
||||
// Determine color based on percentage
|
||||
let color = if percentage < 40.0 {
|
||||
let color = if percentage < 60.0 {
|
||||
crossterm::style::Color::Green
|
||||
} else if percentage < 60.0 {
|
||||
crossterm::style::Color::Yellow
|
||||
} else if percentage < 80.0 {
|
||||
crossterm::style::Color::Rgb { r: 255, g: 165, b: 0 } // Orange
|
||||
crossterm::style::Color::Yellow
|
||||
} else {
|
||||
crossterm::style::Color::Red
|
||||
};
|
||||
@@ -92,9 +90,9 @@ impl SimpleOutput {
|
||||
// Print with colored progress bar
|
||||
print!("Context: ");
|
||||
print!("{}", SetForegroundColor(color));
|
||||
print!("{}{}", filled_str, empty_str);
|
||||
print!("{}{}", filled_chars, empty_chars);
|
||||
print!("{}", ResetColor);
|
||||
println!(" {:.0}% ({}/{} tokens)", percentage, used, total);
|
||||
println!(" {:.1}% | {}/{} tokens", percentage, used, total);
|
||||
}
|
||||
|
||||
pub fn print_context_thinning(&self, message: &str) {
|
||||
|
||||
@@ -25,4 +25,3 @@ chrono = { version = "0.4", features = ["serde"] }
|
||||
rand = "0.8"
|
||||
regex = "1.0"
|
||||
shellexpand = "3.1"
|
||||
serde_yaml = "0.9"
|
||||
|
||||
@@ -1,787 +0,0 @@
|
||||
//! Code search functionality using ast-grep for syntax-aware semantic searches
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::process::Stdio;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::Semaphore;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Maximum number of searches allowed per request
|
||||
const MAX_SEARCHES: usize = 20;
|
||||
|
||||
/// Default timeout for individual searches in seconds
|
||||
const DEFAULT_TIMEOUT_SECS: u64 = 60;
|
||||
|
||||
/// Default maximum concurrency
|
||||
const DEFAULT_MAX_CONCURRENCY: usize = 4;
|
||||
|
||||
/// Default maximum matches per search
|
||||
const DEFAULT_MAX_MATCHES: usize = 500;
|
||||
|
||||
/// Search specification for a single ast-grep search
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct SearchSpec {
|
||||
pub name: String,
|
||||
pub mode: SearchMode,
|
||||
|
||||
// Pattern mode fields
|
||||
pub pattern: Option<String>,
|
||||
pub language: Option<String>,
|
||||
|
||||
// YAML mode fields
|
||||
pub rule_yaml: Option<String>,
|
||||
|
||||
// Common fields
|
||||
pub paths: Option<Vec<String>>,
|
||||
pub globs: Option<Vec<String>>,
|
||||
pub json_style: Option<JsonStyle>,
|
||||
pub context: Option<u32>,
|
||||
pub threads: Option<u32>,
|
||||
pub include_metadata: Option<bool>,
|
||||
pub no_ignore: Option<Vec<NoIgnoreType>>,
|
||||
pub severity: Option<HashMap<String, SeverityLevel>>,
|
||||
pub timeout_secs: Option<u64>,
|
||||
}
|
||||
|
||||
/// Search mode: pattern or yaml
|
||||
#[derive(Debug, Clone, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum SearchMode {
|
||||
Pattern,
|
||||
Yaml,
|
||||
}
|
||||
|
||||
/// JSON output style
|
||||
#[derive(Debug, Clone, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum JsonStyle {
|
||||
Pretty,
|
||||
Stream,
|
||||
Compact,
|
||||
}
|
||||
|
||||
impl Default for JsonStyle {
|
||||
fn default() -> Self {
|
||||
JsonStyle::Stream
|
||||
}
|
||||
}
|
||||
|
||||
/// No-ignore types
|
||||
#[derive(Debug, Clone, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum NoIgnoreType {
|
||||
Hidden,
|
||||
Dot,
|
||||
Exclude,
|
||||
Global,
|
||||
Parent,
|
||||
Vcs,
|
||||
}
|
||||
|
||||
/// Severity levels for YAML rules
|
||||
#[derive(Debug, Clone, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum SeverityLevel {
|
||||
Error,
|
||||
Warning,
|
||||
Info,
|
||||
Hint,
|
||||
Off,
|
||||
}
|
||||
|
||||
/// Request structure for code search
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CodeSearchRequest {
|
||||
pub searches: Vec<SearchSpec>,
|
||||
pub max_concurrency: Option<usize>,
|
||||
pub max_matches_per_search: Option<usize>,
|
||||
}
|
||||
|
||||
/// Result of a single search
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SearchResult {
|
||||
pub name: String,
|
||||
pub mode: String,
|
||||
pub status: String,
|
||||
pub cmd: Vec<String>,
|
||||
pub match_count: Option<usize>,
|
||||
pub truncated: Option<bool>,
|
||||
pub matches: Option<Vec<Value>>,
|
||||
pub stderr: Option<String>,
|
||||
pub exit_code: Option<i32>,
|
||||
pub duration_ms: u64,
|
||||
}
|
||||
|
||||
/// Summary of all searches
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SearchSummary {
|
||||
pub completed: usize,
|
||||
pub total: usize,
|
||||
pub total_matches: usize,
|
||||
pub duration_ms: u64,
|
||||
}
|
||||
|
||||
/// Complete response structure
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct CodeSearchResponse {
|
||||
pub summary: SearchSummary,
|
||||
pub searches: Vec<SearchResult>,
|
||||
}
|
||||
|
||||
/// YAML rule structure for validation
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct YamlRule {
|
||||
pub id: String,
|
||||
pub language: String,
|
||||
pub rule: Value,
|
||||
}
|
||||
|
||||
/// Execute a batch of code searches using ast-grep
|
||||
pub async fn execute_code_search(request: CodeSearchRequest) -> Result<CodeSearchResponse> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Validate request
|
||||
if request.searches.is_empty() {
|
||||
return Err(anyhow!("No searches specified"));
|
||||
}
|
||||
|
||||
if request.searches.len() > MAX_SEARCHES {
|
||||
return Err(anyhow!(
|
||||
"Too many searches: {} (max: {})",
|
||||
request.searches.len(),
|
||||
MAX_SEARCHES
|
||||
));
|
||||
}
|
||||
|
||||
// Check if ast-grep is available
|
||||
check_ast_grep_available().await?;
|
||||
|
||||
let max_concurrency = request.max_concurrency.unwrap_or(DEFAULT_MAX_CONCURRENCY);
|
||||
let max_matches = request.max_matches_per_search.unwrap_or(DEFAULT_MAX_MATCHES);
|
||||
|
||||
// Create semaphore for concurrency control
|
||||
let semaphore = std::sync::Arc::new(Semaphore::new(max_concurrency));
|
||||
|
||||
// Execute searches concurrently
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for search in request.searches {
|
||||
let sem = semaphore.clone();
|
||||
let task = tokio::spawn(async move {
|
||||
let _permit = sem.acquire().await.unwrap();
|
||||
execute_single_search(search, max_matches).await
|
||||
});
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
// Wait for all searches to complete
|
||||
let mut results = Vec::new();
|
||||
let mut total_matches = 0;
|
||||
let mut completed = 0;
|
||||
|
||||
for task in tasks {
|
||||
match task.await {
|
||||
Ok(result) => {
|
||||
if result.status == "ok" {
|
||||
completed += 1;
|
||||
if let Some(count) = result.match_count {
|
||||
total_matches += count;
|
||||
}
|
||||
}
|
||||
results.push(result);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Task join error: {}", e);
|
||||
// Create an error result
|
||||
results.push(SearchResult {
|
||||
name: "unknown".to_string(),
|
||||
mode: "unknown".to_string(),
|
||||
status: "error".to_string(),
|
||||
cmd: vec![],
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("Task execution error: {}", e)),
|
||||
exit_code: None,
|
||||
duration_ms: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total_duration = start_time.elapsed();
|
||||
|
||||
Ok(CodeSearchResponse {
|
||||
summary: SearchSummary {
|
||||
completed,
|
||||
total: results.len(),
|
||||
total_matches,
|
||||
duration_ms: total_duration.as_millis() as u64,
|
||||
},
|
||||
searches: results,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a single search
|
||||
async fn execute_single_search(search: SearchSpec, max_matches: usize) -> SearchResult {
|
||||
let start_time = Instant::now();
|
||||
let timeout_secs = search.timeout_secs.unwrap_or(DEFAULT_TIMEOUT_SECS);
|
||||
|
||||
// Validate the search specification
|
||||
if let Err(e) = validate_search_spec(&search) {
|
||||
return SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "error".to_string(),
|
||||
cmd: vec![],
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("Validation error: {}", e)),
|
||||
exit_code: None,
|
||||
duration_ms: start_time.elapsed().as_millis() as u64,
|
||||
};
|
||||
}
|
||||
|
||||
// Build command
|
||||
let cmd_args = match build_ast_grep_command(&search) {
|
||||
Ok(args) => args,
|
||||
Err(e) => {
|
||||
return SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "error".to_string(),
|
||||
cmd: vec![],
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("Command build error: {}", e)),
|
||||
exit_code: None,
|
||||
duration_ms: start_time.elapsed().as_millis() as u64,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Executing ast-grep command: {:?}", cmd_args);
|
||||
|
||||
// Execute with timeout
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
|
||||
match tokio::time::timeout(timeout_duration, run_ast_grep_command(&cmd_args)).await {
|
||||
Ok(Ok((stdout, stderr, exit_code))) => {
|
||||
let duration_ms = start_time.elapsed().as_millis() as u64;
|
||||
|
||||
if exit_code == 0 {
|
||||
// Parse JSON output
|
||||
match parse_ast_grep_output(&stdout, max_matches) {
|
||||
Ok((matches, truncated)) => {
|
||||
SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "ok".to_string(),
|
||||
cmd: cmd_args,
|
||||
match_count: Some(matches.len()),
|
||||
truncated: Some(truncated),
|
||||
matches: Some(matches),
|
||||
stderr: if stderr.is_empty() { None } else { Some(stderr) },
|
||||
exit_code: None,
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "error".to_string(),
|
||||
cmd: cmd_args,
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("JSON parse error: {}\nRaw output: {}", e, stdout)),
|
||||
exit_code: Some(exit_code),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "error".to_string(),
|
||||
cmd: cmd_args,
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(stderr),
|
||||
exit_code: Some(exit_code),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "error".to_string(),
|
||||
cmd: cmd_args,
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("Execution error: {}", e)),
|
||||
exit_code: None,
|
||||
duration_ms: start_time.elapsed().as_millis() as u64,
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
SearchResult {
|
||||
name: search.name,
|
||||
mode: format!("{:?}", search.mode).to_lowercase(),
|
||||
status: "timeout".to_string(),
|
||||
cmd: cmd_args,
|
||||
match_count: None,
|
||||
truncated: None,
|
||||
matches: None,
|
||||
stderr: Some(format!("Search timed out after {} seconds", timeout_secs)),
|
||||
exit_code: None,
|
||||
duration_ms: start_time.elapsed().as_millis() as u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate a search specification
|
||||
fn validate_search_spec(search: &SearchSpec) -> Result<()> {
|
||||
match search.mode {
|
||||
SearchMode::Pattern => {
|
||||
if search.pattern.is_none() || search.pattern.as_ref().unwrap().is_empty() {
|
||||
return Err(anyhow!("Pattern mode requires non-empty 'pattern' field"));
|
||||
}
|
||||
}
|
||||
SearchMode::Yaml => {
|
||||
let rule_yaml = search.rule_yaml.as_ref()
|
||||
.ok_or_else(|| anyhow!("YAML mode requires 'rule_yaml' field"))?;
|
||||
|
||||
if rule_yaml.is_empty() {
|
||||
return Err(anyhow!("YAML mode requires non-empty 'rule_yaml' field"));
|
||||
}
|
||||
|
||||
// Parse and validate YAML structure
|
||||
let parsed: YamlRule = serde_yaml::from_str(rule_yaml)
|
||||
.map_err(|e| anyhow!("Invalid YAML rule: {}", e))?;
|
||||
|
||||
if parsed.id.is_empty() {
|
||||
return Err(anyhow!("YAML rule must have non-empty 'id' field"));
|
||||
}
|
||||
|
||||
if parsed.language.is_empty() {
|
||||
return Err(anyhow!("YAML rule must have non-empty 'language' field"));
|
||||
}
|
||||
|
||||
// Validate language is supported (basic check)
|
||||
validate_language(&parsed.language)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate context range
|
||||
if let Some(context) = search.context {
|
||||
if context > 20 {
|
||||
return Err(anyhow!("Context lines cannot exceed 20"));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate that a language is supported by ast-grep
|
||||
fn validate_language(language: &str) -> Result<()> {
|
||||
let supported_languages = [
|
||||
"rust", "javascript", "typescript", "python", "java", "c", "cpp", "csharp",
|
||||
"go", "html", "css", "json", "yaml", "xml", "bash", "kotlin", "swift",
|
||||
"php", "ruby", "scala", "dart", "lua", "r", "sql", "dockerfile",
|
||||
"Rust", "JavaScript", "TypeScript", "Python", "Java", "C", "Cpp", "CSharp",
|
||||
"Go", "Html", "Css", "Json", "Yaml", "Xml", "Bash", "Kotlin", "Swift",
|
||||
"Php", "Ruby", "Scala", "Dart", "Lua", "R", "Sql", "Dockerfile"
|
||||
];
|
||||
|
||||
if !supported_languages.contains(&language) {
|
||||
warn!("Language '{}' may not be supported by ast-grep", language);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build ast-grep command arguments
|
||||
fn build_ast_grep_command(search: &SearchSpec) -> Result<Vec<String>> {
|
||||
let mut args = vec!["ast-grep".to_string()];
|
||||
|
||||
match search.mode {
|
||||
SearchMode::Pattern => {
|
||||
args.push("run".to_string());
|
||||
|
||||
// Add pattern
|
||||
args.push("-p".to_string());
|
||||
args.push(search.pattern.as_ref().unwrap().clone());
|
||||
|
||||
// Add language if specified
|
||||
if let Some(ref lang) = search.language {
|
||||
args.push("-l".to_string());
|
||||
args.push(lang.clone());
|
||||
}
|
||||
}
|
||||
SearchMode::Yaml => {
|
||||
args.push("scan".to_string());
|
||||
|
||||
// Add inline rules
|
||||
args.push("--inline-rules".to_string());
|
||||
args.push(search.rule_yaml.as_ref().unwrap().clone());
|
||||
|
||||
// Add include-metadata if requested
|
||||
if search.include_metadata.unwrap_or(false) {
|
||||
args.push("--include-metadata".to_string());
|
||||
}
|
||||
|
||||
// Add severity overrides
|
||||
if let Some(ref severity_map) = search.severity {
|
||||
for (rule_id, severity) in severity_map {
|
||||
match severity {
|
||||
SeverityLevel::Error => {
|
||||
args.push("--error".to_string());
|
||||
args.push(rule_id.clone());
|
||||
}
|
||||
SeverityLevel::Warning => {
|
||||
args.push("--warning".to_string());
|
||||
args.push(rule_id.clone());
|
||||
}
|
||||
SeverityLevel::Info => {
|
||||
args.push("--info".to_string());
|
||||
args.push(rule_id.clone());
|
||||
}
|
||||
SeverityLevel::Hint => {
|
||||
args.push("--hint".to_string());
|
||||
args.push(rule_id.clone());
|
||||
}
|
||||
SeverityLevel::Off => {
|
||||
args.push("--off".to_string());
|
||||
args.push(rule_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add common arguments
|
||||
|
||||
// Add globs if specified
|
||||
if let Some(ref globs) = search.globs {
|
||||
if !globs.is_empty() {
|
||||
args.push("--globs".to_string());
|
||||
args.push(globs.join(","));
|
||||
}
|
||||
}
|
||||
|
||||
// Add context
|
||||
if let Some(context) = search.context {
|
||||
args.push("-C".to_string());
|
||||
args.push(context.to_string());
|
||||
}
|
||||
|
||||
// Add threads
|
||||
if let Some(threads) = search.threads {
|
||||
args.push("-j".to_string());
|
||||
args.push(threads.to_string());
|
||||
}
|
||||
|
||||
// Add JSON output style
|
||||
let json_style = search.json_style.as_ref().unwrap_or(&JsonStyle::Stream);
|
||||
let json_arg = match json_style {
|
||||
JsonStyle::Pretty => "--json=pretty",
|
||||
JsonStyle::Stream => "--json=stream",
|
||||
JsonStyle::Compact => "--json=compact",
|
||||
};
|
||||
args.push(json_arg.to_string());
|
||||
|
||||
// Add no-ignore options
|
||||
if let Some(ref no_ignore_list) = search.no_ignore {
|
||||
for no_ignore_type in no_ignore_list {
|
||||
let flag = match no_ignore_type {
|
||||
NoIgnoreType::Hidden => "--no-ignore=hidden",
|
||||
NoIgnoreType::Dot => "--no-ignore=dot",
|
||||
NoIgnoreType::Exclude => "--no-ignore=exclude",
|
||||
NoIgnoreType::Global => "--no-ignore=global",
|
||||
NoIgnoreType::Parent => "--no-ignore=parent",
|
||||
NoIgnoreType::Vcs => "--no-ignore=vcs",
|
||||
};
|
||||
args.push(flag.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Add paths (default to current directory if none specified)
|
||||
if let Some(ref paths) = search.paths {
|
||||
if !paths.is_empty() {
|
||||
args.extend(paths.clone());
|
||||
} else {
|
||||
args.push(".".to_string());
|
||||
}
|
||||
} else {
|
||||
args.push(".".to_string());
|
||||
}
|
||||
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Run ast-grep command and capture output
|
||||
async fn run_ast_grep_command(args: &[String]) -> Result<(String, String, i32)> {
|
||||
let mut cmd = Command::new(&args[0]);
|
||||
cmd.args(&args[1..]);
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
|
||||
debug!("Running command: {:?}", args);
|
||||
|
||||
let mut child = cmd.spawn()
|
||||
.map_err(|e| anyhow!("Failed to spawn ast-grep process: {}", e))?;
|
||||
|
||||
let stdout = child.stdout.take().unwrap();
|
||||
let stderr = child.stderr.take().unwrap();
|
||||
|
||||
let stdout_reader = BufReader::new(stdout);
|
||||
let stderr_reader = BufReader::new(stderr);
|
||||
|
||||
let stdout_task = tokio::spawn(async move {
|
||||
let mut lines = stdout_reader.lines();
|
||||
let mut output = String::new();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
if !output.is_empty() {
|
||||
output.push('\n');
|
||||
}
|
||||
output.push_str(&line);
|
||||
}
|
||||
output
|
||||
});
|
||||
|
||||
let stderr_task = tokio::spawn(async move {
|
||||
let mut lines = stderr_reader.lines();
|
||||
let mut output = String::new();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
if !output.is_empty() {
|
||||
output.push('\n');
|
||||
}
|
||||
output.push_str(&line);
|
||||
}
|
||||
output
|
||||
});
|
||||
|
||||
let status = child.wait().await
|
||||
.map_err(|e| anyhow!("Failed to wait for ast-grep process: {}", e))?;
|
||||
|
||||
let stdout_output = stdout_task.await
|
||||
.map_err(|e| anyhow!("Failed to read stdout: {}", e))?;
|
||||
let stderr_output = stderr_task.await
|
||||
.map_err(|e| anyhow!("Failed to read stderr: {}", e))?;
|
||||
|
||||
let exit_code = status.code().unwrap_or(-1);
|
||||
|
||||
Ok((stdout_output, stderr_output, exit_code))
|
||||
}
|
||||
|
||||
/// Parse ast-grep JSON output
|
||||
fn parse_ast_grep_output(output: &str, max_matches: usize) -> Result<(Vec<Value>, bool)> {
|
||||
if output.trim().is_empty() {
|
||||
return Ok((vec![], false));
|
||||
}
|
||||
|
||||
let mut matches = Vec::new();
|
||||
let mut truncated = false;
|
||||
|
||||
// Handle stream format (line-delimited JSON)
|
||||
for line in output.lines() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match serde_json::from_str::<Value>(line) {
|
||||
Ok(match_obj) => {
|
||||
if matches.len() >= max_matches {
|
||||
truncated = true;
|
||||
break;
|
||||
}
|
||||
matches.push(match_obj);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to parse JSON line '{}': {}", line, e);
|
||||
// Try to parse the entire output as a single JSON array
|
||||
match serde_json::from_str::<Vec<Value>>(output) {
|
||||
Ok(array_matches) => {
|
||||
let take_count = array_matches.len().min(max_matches);
|
||||
let total_count = array_matches.len();
|
||||
matches = array_matches.into_iter().take(take_count).collect();
|
||||
truncated = take_count < total_count;
|
||||
break;
|
||||
}
|
||||
Err(e2) => {
|
||||
return Err(anyhow!(
|
||||
"Failed to parse ast-grep output as line-delimited JSON or JSON array. Line error: {}, Array error: {}",
|
||||
e, e2
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((matches, truncated))
|
||||
}
|
||||
|
||||
/// Check if ast-grep is available and provide installation hints if not
|
||||
async fn check_ast_grep_available() -> Result<()> {
|
||||
match Command::new("ast-grep")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.await
|
||||
{
|
||||
Ok(output) => {
|
||||
if output.status.success() {
|
||||
let version = String::from_utf8_lossy(&output.stdout);
|
||||
info!("Found ast-grep: {}", version.trim());
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("ast-grep command failed: {}", String::from_utf8_lossy(&output.stderr)))
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
Err(anyhow!(
|
||||
"ast-grep not found. Please install it using one of these methods:\n\n\
|
||||
• Homebrew (macOS): brew install ast-grep\n\
|
||||
• MacPorts (macOS): sudo port install ast-grep\n\
|
||||
• Nix: nix-env -iA nixpkgs.ast-grep\n\
|
||||
• Cargo: cargo install ast-grep\n\
|
||||
• npm: npm install -g @ast-grep/cli\n\
|
||||
• pip: pip install ast-grep\n\n\
|
||||
For more installation options, visit: https://ast-grep.github.io/guide/quick-start.html"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_validate_pattern_search() {
|
||||
let search = SearchSpec {
|
||||
name: "test".to_string(),
|
||||
mode: SearchMode::Pattern,
|
||||
pattern: Some("fn $NAME() {}".to_string()),
|
||||
language: Some("rust".to_string()),
|
||||
rule_yaml: None,
|
||||
paths: None,
|
||||
globs: None,
|
||||
json_style: None,
|
||||
context: None,
|
||||
threads: None,
|
||||
include_metadata: None,
|
||||
no_ignore: None,
|
||||
severity: None,
|
||||
timeout_secs: None,
|
||||
};
|
||||
|
||||
assert!(validate_search_spec(&search).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_yaml_search() {
|
||||
let yaml_rule = r#"
|
||||
id: test-rule
|
||||
language: Rust
|
||||
rule:
|
||||
pattern: "fn $NAME() {}"
|
||||
"#;
|
||||
|
||||
let search = SearchSpec {
|
||||
name: "test".to_string(),
|
||||
mode: SearchMode::Yaml,
|
||||
pattern: None,
|
||||
language: None,
|
||||
rule_yaml: Some(yaml_rule.to_string()),
|
||||
paths: None,
|
||||
globs: None,
|
||||
json_style: None,
|
||||
context: None,
|
||||
threads: None,
|
||||
include_metadata: None,
|
||||
no_ignore: None,
|
||||
severity: None,
|
||||
timeout_secs: None,
|
||||
};
|
||||
|
||||
assert!(validate_search_spec(&search).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_pattern_command() {
|
||||
let search = SearchSpec {
|
||||
name: "test".to_string(),
|
||||
mode: SearchMode::Pattern,
|
||||
pattern: Some("fn $NAME() {}".to_string()),
|
||||
language: Some("rust".to_string()),
|
||||
rule_yaml: None,
|
||||
paths: Some(vec!["src/".to_string()]),
|
||||
globs: None,
|
||||
json_style: Some(JsonStyle::Stream),
|
||||
context: Some(2),
|
||||
threads: Some(4),
|
||||
include_metadata: None,
|
||||
no_ignore: None,
|
||||
severity: None,
|
||||
timeout_secs: None,
|
||||
};
|
||||
|
||||
let cmd = build_ast_grep_command(&search).unwrap();
|
||||
|
||||
assert_eq!(cmd[0], "ast-grep");
|
||||
assert_eq!(cmd[1], "run");
|
||||
assert!(cmd.contains(&"-p".to_string()));
|
||||
assert!(cmd.contains(&"fn $NAME() {}".to_string()));
|
||||
assert!(cmd.contains(&"-l".to_string()));
|
||||
assert!(cmd.contains(&"rust".to_string()));
|
||||
assert!(cmd.contains(&"--json=stream".to_string()));
|
||||
assert!(cmd.contains(&"-C".to_string()));
|
||||
assert!(cmd.contains(&"2".to_string()));
|
||||
assert!(cmd.contains(&"-j".to_string()));
|
||||
assert!(cmd.contains(&"4".to_string()));
|
||||
assert!(cmd.contains(&"src/".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_stream_json() {
|
||||
let output = r#"{"file":"test.rs","text":"fn hello() {}"}
|
||||
{"file":"test2.rs","text":"fn world() {}"}"#;
|
||||
|
||||
let (matches, truncated) = parse_ast_grep_output(output, 10).unwrap();
|
||||
|
||||
assert_eq!(matches.len(), 2);
|
||||
assert!(!truncated);
|
||||
assert_eq!(matches[0]["file"], "test.rs");
|
||||
assert_eq!(matches[1]["file"], "test2.rs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_truncated_output() {
|
||||
let output = r#"{"file":"test1.rs","text":"fn a() {}"}
|
||||
{"file":"test2.rs","text":"fn b() {}"}
|
||||
{"file":"test3.rs","text":"fn c() {}"}"#;
|
||||
|
||||
let (matches, truncated) = parse_ast_grep_output(output, 2).unwrap();
|
||||
|
||||
assert_eq!(matches.len(), 2);
|
||||
assert!(truncated);
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ pub mod error_handling;
|
||||
pub mod project;
|
||||
pub mod task_result;
|
||||
pub mod ui_writer;
|
||||
pub mod code_search;
|
||||
pub use task_result::TaskResult;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1105,18 +1104,6 @@ IMPORTANT: You must call tools to achieve goals. When you receive a request:
|
||||
For shell commands: Use the shell tool with the exact command needed. Avoid commands that produce a large amount of output, and consider piping those outputs to files. Example: If asked to list files, immediately call the shell tool with command parameter \"ls\".
|
||||
If you create temporary files for verification, place these in a subdir named 'tmp'. Do NOT pollute the current dir.
|
||||
|
||||
For reading files, prioritize use of code_search tool use with multiple search requests per call instead of read_file, if it makes sense.
|
||||
|
||||
Additional examples for the 'code_search' tool:
|
||||
- Example for pattern mode: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"find_functions\", \"mode\": \"pattern\", \"pattern\": \"fn $NAME($$$ARGS) { $$$ }\", \"language\": \"rust\", \"paths\": [\"src/\"]}]}}
|
||||
- Example for YAML mode: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"find_async\", \"mode\": \"yaml\", \"rule_yaml\": \"id: async-fn\nlanguage: Rust\nrule:\n pattern: async fn $NAME($$$) { $$$ }\"}]}}
|
||||
- Example for multiple searches: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"funcs\", \"mode\": \"pattern\", \"pattern\": \"fn $NAME\", \"language\": \"rust\"}, {\"name\": \"structs\", \"mode\": \"pattern\", \"pattern\": \"struct $NAME\", \"language\": \"rust\"}]}}
|
||||
- Example for passing optional args like \"context\": {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"funcs\", \"mode\": \"pattern\", \"context\": 3, \"pattern\": \"fn $NAME\", \"language\": \"rust\"}]}
|
||||
- Common optional args for searches:
|
||||
- \"context\": 3 (show surrounding lines),
|
||||
- \"json_style\": \"stream\" (for large results)
|
||||
|
||||
|
||||
IMPORTANT: If the user asks you to just respond with text (like \"just say hello\" or \"tell me about X\"), do NOT use tools. Simply respond with the requested text directly. Only use tools when you need to execute commands or complete tasks that require action.
|
||||
|
||||
When taking screenshots of specific windows (like \"my Safari window\" or \"my terminal\"), ALWAYS use list_windows first to identify the correct window ID, then use take_screenshot with the window_id parameter.
|
||||
@@ -1166,8 +1153,6 @@ The tool will execute immediately and you'll receive the result (success or erro
|
||||
|
||||
# Available Tools
|
||||
|
||||
Short description for providers without native calling specs:
|
||||
|
||||
- **shell**: Execute shell commands
|
||||
- Format: {\"tool\": \"shell\", \"args\": {\"command\": \"your_command_here\"}
|
||||
- Example: {\"tool\": \"shell\", \"args\": {\"command\": \"ls ~/Downloads\"}
|
||||
@@ -1196,41 +1181,13 @@ Short description for providers without native calling specs:
|
||||
- Format: {\"tool\": \"todo_write\", \"args\": {\"content\": \"- [ ] Task 1\\n- [ ] Task 2\"}}
|
||||
- Example: {\"tool\": \"todo_write\", \"args\": {\"content\": \"- [ ] Implement feature\\n - [ ] Write tests\\n - [ ] Run tests\"}}
|
||||
|
||||
- **code_search**: Batch syntax-aware searches via ast-grep. Supports up to 20 pattern or YAML-rule searches in parallel.
|
||||
- Format: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"search_label\", \"mode\": \"pattern|yaml\", ...}], \"max_concurrency\": 4, \"max_matches_per_search\": 500}}
|
||||
- Example for pattern mode: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"find_functions\", \"mode\": \"pattern\", \"pattern\": \"fn $NAME($$$ARGS) { $$$ }\", \"language\": \"rust\", \"paths\": [\"src/\"]}]}}
|
||||
- Example for YAML mode: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"find_async\", \"mode\": \"yaml\", \"rule_yaml\": \"id: async-fn\nlanguage: Rust\nrule:\n pattern: async fn $NAME($$$) { $$$ }\"}]}}
|
||||
- Example for multiple searches: {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"funcs\", \"mode\": \"pattern\", \"pattern\": \"fn $NAME\", \"language\": \"rust\"}, {\"name\": \"structs\", \"mode\": \"pattern\", \"pattern\": \"struct $NAME\", \"language\": \"rust\"}]}}
|
||||
- Example for passing optional args like \"context\": {\"tool\": \"code_search\", \"args\": {\"searches\": [{\"name\": \"funcs\", \"mode\": \"pattern\", \"context\": 3, \"pattern\": \"fn $NAME\", \"language\": \"rust\"}]}
|
||||
- Common optional args for searches:
|
||||
- \"context\": 3 (show surrounding lines),
|
||||
- \"json_style\": \"stream\" (for large results)
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Analyze the request and break down into smaller tasks if appropriate
|
||||
2. Execute ONE tool at a time. An exception exists for when you're writing files. See below.
|
||||
2. Execute ONE tool at a time
|
||||
3. STOP when the original request was satisfied
|
||||
4. Call the final_output tool when done
|
||||
|
||||
For reading files, prioritize use of code_search tool use with multiple search requests per call instead of read_file, if it makes sense.
|
||||
|
||||
Exception to using ONE tool at a time:
|
||||
If all you’re doing is WRITING files, and you don’t need to do anything else between each step.
|
||||
You can issue MULTIPLE write_file tool calls in a request, however you may ONLY make a SINGLE write_file call for any file in that request.
|
||||
For example you may call:
|
||||
[START OF REQUEST]
|
||||
write_file(\"helper.rs\", \"...\")
|
||||
write_file(\"file2.txt\", \"...\")
|
||||
[DONE]
|
||||
|
||||
But NOT:
|
||||
[START OF REQUEST]
|
||||
write_file(\"helper.rs\", \"...\")
|
||||
write_file(\"file2.txt\", \"...\")
|
||||
write_file(\"helper.rs\", \"...\")
|
||||
[DONE]
|
||||
|
||||
# Task Management
|
||||
|
||||
Use todo_read and todo_write for tasks with 3+ steps, multiple files/components, or uncertain scope.
|
||||
@@ -1487,6 +1444,13 @@ Template:
|
||||
let available = model_limit
|
||||
.saturating_sub(current_usage)
|
||||
.saturating_sub(5000);
|
||||
// Ensure we have at least 1 token available, otherwise we can't generate a summary
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
|
||||
current_usage, model_limit
|
||||
));
|
||||
}
|
||||
Some(available.min(10_000))
|
||||
}
|
||||
"embedded" => {
|
||||
@@ -1495,10 +1459,24 @@ Template:
|
||||
let available = model_limit
|
||||
.saturating_sub(current_usage)
|
||||
.saturating_sub(1000);
|
||||
// Ensure we have at least 1 token available
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
|
||||
current_usage, model_limit
|
||||
));
|
||||
}
|
||||
Some(available.min(3000))
|
||||
}
|
||||
_ => {
|
||||
let available = self.context_window.remaining_tokens().saturating_sub(2000);
|
||||
// Ensure we have at least 1 token available
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
|
||||
self.context_window.used_tokens
|
||||
));
|
||||
}
|
||||
Some(available.min(5000))
|
||||
}
|
||||
};
|
||||
@@ -1903,64 +1881,6 @@ Template:
|
||||
}),
|
||||
},
|
||||
];
|
||||
|
||||
// Add code_search tool
|
||||
tools.push(Tool {
|
||||
name: "code_search".to_string(),
|
||||
description: "Batch syntax-aware searches via ast-grep. Supports up to 20 pattern or YAML-rule searches in parallel; returns JSON matches (stream-collated).".to_string(),
|
||||
input_schema: json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"searches": {
|
||||
"type": "array",
|
||||
"maxItems": 20,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string", "description": "Label for this search." },
|
||||
"mode": {
|
||||
"type": "string",
|
||||
"enum": ["pattern", "yaml"],
|
||||
"description": "`pattern` uses `ast-grep run`; `yaml` uses `ast-grep scan --inline-rules`."
|
||||
},
|
||||
// pattern mode (fast one-off)
|
||||
"pattern": { "type": "string", "description": "ast-grep pattern code (e.g., \"async fn $NAME($$$ARGS) { $$$ }\")"},
|
||||
"language": { "type": "string", "description": "Optional language for pattern mode; ast-grep may infer from file extensions if omitted." },
|
||||
// yaml mode (full rule object)
|
||||
"rule_yaml": { "type": "string", "description": "A full YAML rule object text. Must include `id`, `language`, and `rule`." },
|
||||
// targeting
|
||||
"paths": { "type": "array", "items": { "type": "string" }, "description": "Paths/dirs to search. Defaults to current dir if empty." },
|
||||
"globs": { "type": "array", "items": { "type": "string" }, "description": "Optional include/exclude globs for CLI --globs." },
|
||||
// result formatting & performance knobs
|
||||
"json_style": { "type": "string", "enum": ["pretty","stream","compact"], "default": "stream", "description": "Use stream for large codebases." },
|
||||
"context": { "type": "integer", "minimum": 0, "maximum": 20, "default": 0, "description": "CLI -C context lines in text output; also affects JSON `lines` field." },
|
||||
"threads": { "type": "integer", "minimum": 1, "description": "Optional override for ast-grep -j (per process)." },
|
||||
"include_metadata": { "type": "boolean", "default": false, "description": "If yaml mode and rule has metadata, add --include-metadata." },
|
||||
// robustness
|
||||
"no_ignore": {
|
||||
"type": "array",
|
||||
"items": { "type": "string", "enum": ["hidden","dot","exclude","global","parent","vcs"] },
|
||||
"description": "Forwarded to --no-ignore to bypass ignore files/hidden."
|
||||
},
|
||||
// severity overrides for yaml mode
|
||||
"severity": {
|
||||
"type": "object",
|
||||
"additionalProperties": { "type": "string", "enum": ["error","warning","info","hint","off"] },
|
||||
"description": "Optional map<ruleId, severity> -> passed via --error/--warning/--info/--hint/--off."
|
||||
},
|
||||
// per-search timeout seconds (default 60)
|
||||
"timeout_secs": { "type": "integer", "minimum": 1, "default": 60 }
|
||||
},
|
||||
"required": ["name","mode"]
|
||||
}
|
||||
},
|
||||
// global concurrency & truncation
|
||||
"max_concurrency": { "type": "integer", "minimum": 1, "default": 4 },
|
||||
"max_matches_per_search": { "type": "integer", "minimum": 1, "default": 500 }
|
||||
},
|
||||
"required": ["searches"]
|
||||
}),
|
||||
});
|
||||
|
||||
// Add WebDriver tools if enabled
|
||||
if enable_webdriver {
|
||||
@@ -2448,6 +2368,13 @@ Template:
|
||||
let available = model_limit
|
||||
.saturating_sub(current_usage)
|
||||
.saturating_sub(5000);
|
||||
// Ensure we have at least 1 token available, otherwise we can't generate a summary
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
|
||||
current_usage, model_limit
|
||||
));
|
||||
}
|
||||
// Cap at a reasonable summary size (10k tokens max)
|
||||
Some(available.min(10_000))
|
||||
}
|
||||
@@ -2459,12 +2386,26 @@ Template:
|
||||
let available = model_limit
|
||||
.saturating_sub(current_usage)
|
||||
.saturating_sub(1000);
|
||||
// Ensure we have at least 1 token available
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
|
||||
current_usage, model_limit
|
||||
));
|
||||
}
|
||||
// Cap at 3k for embedded models
|
||||
Some(available.min(3000))
|
||||
}
|
||||
_ => {
|
||||
// Default: conservative approach
|
||||
let available = self.context_window.remaining_tokens().saturating_sub(2000);
|
||||
// Ensure we have at least 1 token available
|
||||
if available == 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
|
||||
self.context_window.used_tokens
|
||||
));
|
||||
}
|
||||
Some(available.min(5000))
|
||||
}
|
||||
};
|
||||
@@ -2697,8 +2638,7 @@ Template:
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Don't display text before final_output - it will be in the summary
|
||||
if !new_content.trim().is_empty() && tool_call.tool != "final_output" {
|
||||
if !new_content.trim().is_empty() {
|
||||
#[allow(unused_assignments)]
|
||||
if !response_started {
|
||||
self.ui_writer.print_agent_prompt();
|
||||
@@ -2776,13 +2716,7 @@ Template:
|
||||
));
|
||||
|
||||
// Display tool execution result with proper indentation
|
||||
if tool_call.tool == "final_output" {
|
||||
// For final_output, display the summary without truncation
|
||||
for line in tool_result.lines() {
|
||||
self.ui_writer.update_tool_output_line(line);
|
||||
}
|
||||
self.ui_writer.println("");
|
||||
} else {
|
||||
if tool_call.tool != "final_output" {
|
||||
let output_lines: Vec<&str> = tool_result.lines().collect();
|
||||
|
||||
// Check if UI wants full output (machine mode) or truncated (human mode)
|
||||
@@ -2830,9 +2764,13 @@ Template:
|
||||
|
||||
// Check if this was a final_output tool call
|
||||
if tool_call.tool == "final_output" {
|
||||
// The summary was displayed above when we printed the tool result
|
||||
// Add it to full_response so it's included in the TaskResult
|
||||
full_response.push_str(&tool_result);
|
||||
// Don't add final_display_content here - it was already added before tool execution
|
||||
// Adding it again would duplicate the output
|
||||
if let Some(summary) = tool_call.args.get("summary") {
|
||||
if let Some(summary_str) = summary.as_str() {
|
||||
full_response.push_str(&format!("\n\n{}", summary_str));
|
||||
}
|
||||
}
|
||||
self.ui_writer.println("");
|
||||
let _ttft =
|
||||
first_token_time.unwrap_or_else(|| stream_start.elapsed());
|
||||
@@ -2864,7 +2802,7 @@ Template:
|
||||
|
||||
// Add the tool call and result to the context window using RAW unfiltered content
|
||||
// This ensures the log file contains the true raw content including JSON tool calls
|
||||
let tool_message = if !raw_content_for_log.trim().is_empty() {
|
||||
let tool_message = if !full_response.contains(final_display_content) && !raw_content_for_log.trim().is_empty() {
|
||||
Message {
|
||||
role: MessageRole::Assistant,
|
||||
content: format!(
|
||||
@@ -2875,7 +2813,7 @@ Template:
|
||||
),
|
||||
}
|
||||
} else {
|
||||
// No text content before tool call, just include the tool call
|
||||
// If we've already added the text or there's no text, just include the tool call
|
||||
Message {
|
||||
role: MessageRole::Assistant,
|
||||
content: format!(
|
||||
@@ -2900,22 +2838,18 @@ Template:
|
||||
request.tools = Some(Self::create_tool_definitions(self.config.webdriver.enabled, self.config.macax.enabled, self.config.computer_control.enabled));
|
||||
}
|
||||
|
||||
// DO NOT add final_display_content to full_response here!
|
||||
// The content was already displayed during streaming and added to current_response.
|
||||
// Adding it again would cause duplication when the agent message is printed.
|
||||
// The only time we should add to full_response is:
|
||||
// 1. For final_output tool (handled separately)
|
||||
// 2. At the end when no tools were executed (handled in the "no tool executed" branch)
|
||||
|
||||
// Only add to full_response if we haven't already added it
|
||||
if !full_response.contains(final_display_content) {
|
||||
full_response.push_str(final_display_content);
|
||||
}
|
||||
tool_executed = true;
|
||||
|
||||
// Reset the JSON tool call filter state after each tool execution
|
||||
// This ensures the filter doesn't stay in suppression mode for subsequent streaming content
|
||||
fixed_filter_json::reset_fixed_json_tool_state();
|
||||
|
||||
// Reset parser for next iteration - this clears the text buffer
|
||||
// Reset parser for next iteration
|
||||
parser.reset();
|
||||
|
||||
// Clear current_response for next iteration to prevent buffered text
|
||||
// from being incorrectly displayed after tool execution
|
||||
current_response.clear();
|
||||
@@ -3093,10 +3027,11 @@ Template:
|
||||
|
||||
// Set full_response to current_response (don't append)
|
||||
// current_response already contains everything that was displayed
|
||||
// Don't set full_response here - it would duplicate the output
|
||||
// The text was already displayed during streaming
|
||||
// Return empty string to avoid duplication
|
||||
full_response = String::new();
|
||||
// Appending would duplicate the output
|
||||
if !current_response.is_empty() && full_response.is_empty() {
|
||||
full_response = current_response.clone();
|
||||
debug!("Set full_response from current_response (no tool): {} chars", full_response.len());
|
||||
}
|
||||
|
||||
self.ui_writer.println("");
|
||||
let _ttft =
|
||||
@@ -4538,41 +4473,6 @@ Template:
|
||||
Ok("❌ Computer control not enabled. Set computer_control.enabled = true in config.".to_string())
|
||||
}
|
||||
}
|
||||
"code_search" => {
|
||||
debug!("Processing code_search tool call");
|
||||
|
||||
// Parse the request
|
||||
let request: crate::code_search::CodeSearchRequest = match serde_json::from_value(tool_call.args.clone()) {
|
||||
Ok(req) => req,
|
||||
Err(e) => {
|
||||
return Ok(format!("❌ Invalid code_search arguments: {}", e));
|
||||
}
|
||||
};
|
||||
|
||||
// Execute the code search
|
||||
match crate::code_search::execute_code_search(request).await {
|
||||
Ok(response) => {
|
||||
// Serialize the response to JSON
|
||||
match serde_json::to_string_pretty(&response) {
|
||||
Ok(json_output) => {
|
||||
Ok(format!("✅ Code search completed\n{}", json_output))
|
||||
}
|
||||
Err(e) => {
|
||||
Ok(format!("❌ Failed to serialize response: {}", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Check if it's an ast-grep not found error and provide helpful message
|
||||
let error_msg = e.to_string();
|
||||
if error_msg.contains("ast-grep not found") {
|
||||
Ok(format!("❌ {}", error_msg))
|
||||
} else {
|
||||
Ok(format!("❌ Code search failed: {}", error_msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("Unknown tool: {}", tool_call.tool);
|
||||
Ok(format!("❓ Unknown tool: {}", tool_call.tool))
|
||||
|
||||
@@ -276,7 +276,6 @@ impl AnthropicProvider {
|
||||
let mut partial_tool_json = String::new(); // Accumulate partial JSON for tool calls
|
||||
let mut accumulated_usage: Option<Usage> = None;
|
||||
let mut byte_buffer = Vec::new(); // Buffer for incomplete UTF-8 sequences
|
||||
let mut message_stopped = false; // Track if we've received message_stop
|
||||
|
||||
while let Some(chunk_result) = stream.next().await {
|
||||
match chunk_result {
|
||||
@@ -317,12 +316,6 @@ impl AnthropicProvider {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we've already sent the final chunk, skip processing more events
|
||||
if message_stopped {
|
||||
debug!("Skipping event after message_stop: {}", line);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse Server-Sent Events format
|
||||
if let Some(data) = line.strip_prefix("data: ") {
|
||||
if data == "[DONE]" {
|
||||
@@ -458,7 +451,6 @@ impl AnthropicProvider {
|
||||
}
|
||||
"message_stop" => {
|
||||
debug!("Received message stop event");
|
||||
message_stopped = true;
|
||||
let final_chunk = CompletionChunk {
|
||||
content: String::new(),
|
||||
finished: true,
|
||||
@@ -468,8 +460,7 @@ impl AnthropicProvider {
|
||||
if tx.send(Ok(final_chunk)).await.is_err() {
|
||||
debug!("Receiver dropped, stopping stream");
|
||||
}
|
||||
// Don't return here - let the stream naturally exhaust
|
||||
// This prevents dropping the sender prematurely
|
||||
return accumulated_usage;
|
||||
}
|
||||
"error" => {
|
||||
if let Some(error) = event.error {
|
||||
@@ -477,7 +468,7 @@ impl AnthropicProvider {
|
||||
let _ = tx
|
||||
.send(Err(anyhow!("Anthropic API error: {:?}", error)))
|
||||
.await;
|
||||
break; // Break to let stream exhaust naturally
|
||||
return accumulated_usage;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
@@ -496,10 +487,7 @@ impl AnthropicProvider {
|
||||
Err(e) => {
|
||||
error!("Stream error: {}", e);
|
||||
let _ = tx.send(Err(anyhow!("Stream error: {}", e))).await;
|
||||
// Don't return here either - let the stream exhaust naturally
|
||||
// The error has been sent to the receiver, so it will handle it
|
||||
// Breaking here ensures we clean up properly
|
||||
break;
|
||||
return accumulated_usage;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,389 +0,0 @@
|
||||
# Accumulative Autonomous Mode
|
||||
|
||||
## Overview
|
||||
|
||||
Accumulative Autonomous Mode is the **new default interactive mode** for G3. It combines the ease of interactive chat with the power of autonomous implementation, allowing you to build projects iteratively by describing what you want, one requirement at a time.
|
||||
|
||||
## How It Works
|
||||
|
||||
### The Flow
|
||||
|
||||
1. **Start G3** in any directory (no arguments needed)
|
||||
2. **Describe** what you want to build
|
||||
3. **G3 automatically**:
|
||||
- Adds your input to accumulated requirements
|
||||
- Runs autonomous mode (coach-player feedback loop)
|
||||
- Implements your requirements with quality checks
|
||||
4. **Continue** adding more requirements or refinements
|
||||
5. **Repeat** until your project is complete
|
||||
|
||||
### Example Session
|
||||
|
||||
```bash
|
||||
$ cd ~/projects/my-new-app
|
||||
$ g3
|
||||
|
||||
🪿 G3 AI Coding Agent - Accumulative Mode
|
||||
>> describe what you want, I'll build it iteratively
|
||||
|
||||
📁 Workspace: /Users/you/projects/my-new-app
|
||||
|
||||
💡 Each input you provide will be added to requirements
|
||||
and I'll automatically work on implementing them.
|
||||
|
||||
Type 'exit' or 'quit' to stop, Ctrl+D to finish
|
||||
|
||||
============================================================
|
||||
📝 What would you like me to build? (describe your requirements)
|
||||
============================================================
|
||||
requirement> create a simple web server in Python with Flask that serves a homepage
|
||||
|
||||
📋 Current instructions and requirements (Turn 1):
|
||||
create a simple web server in Python with Flask that serves a homepage
|
||||
|
||||
🚀 Starting autonomous implementation...
|
||||
|
||||
🤖 G3 AI Coding Agent - Autonomous Mode
|
||||
📁 Using workspace: /Users/you/projects/my-new-app
|
||||
📋 Requirements loaded from --requirements flag
|
||||
🔄 Starting coach-player feedback loop...
|
||||
📂 No existing implementation files detected
|
||||
🎯 Starting with player implementation
|
||||
|
||||
=== TURN 1/5 - PLAYER MODE ===
|
||||
🎯 Starting player implementation...
|
||||
📋 Player starting initial implementation (no prior coach feedback)
|
||||
|
||||
[Player creates files, writes code...]
|
||||
|
||||
=== TURN 1/5 - COACH MODE ===
|
||||
🎓 Starting coach review...
|
||||
🎓 Coach review completed
|
||||
Coach feedback:
|
||||
The Flask server is implemented correctly with a homepage route.
|
||||
The code follows best practices and meets the requirements.
|
||||
IMPLEMENTATION_APPROVED
|
||||
|
||||
=== SESSION COMPLETED - IMPLEMENTATION APPROVED ===
|
||||
✅ Coach approved the implementation!
|
||||
|
||||
============================================================
|
||||
📊 AUTONOMOUS MODE SESSION REPORT
|
||||
============================================================
|
||||
⏱️ Total Duration: 12.34s
|
||||
🔄 Turns Taken: 1/5
|
||||
📝 Final Status: ✅ APPROVED
|
||||
...
|
||||
============================================================
|
||||
|
||||
✅ Autonomous run completed
|
||||
|
||||
============================================================
|
||||
📝 Turn 2 - What's next? (add more requirements or refinements)
|
||||
============================================================
|
||||
requirement> add a /api/users endpoint that returns a list of users as JSON
|
||||
|
||||
📋 Current instructions and requirements (Turn 2):
|
||||
add a /api/users endpoint that returns a list of users as JSON
|
||||
|
||||
🚀 Starting autonomous implementation...
|
||||
|
||||
[Autonomous mode runs again with BOTH requirements...]
|
||||
|
||||
============================================================
|
||||
📝 Turn 3 - What's next? (add more requirements or refinements)
|
||||
============================================================
|
||||
requirement> exit
|
||||
|
||||
👋 Goodbye!
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
### 1. Requirement Accumulation
|
||||
|
||||
Each input you provide is:
|
||||
- **Numbered sequentially** (1, 2, 3, ...)
|
||||
- **Stored in memory** for the session
|
||||
- **Included in all subsequent runs**
|
||||
|
||||
This means the agent always has the full context of what you've asked for.
|
||||
|
||||
### 2. Automatic Requirements Document
|
||||
|
||||
G3 automatically generates a structured requirements document:
|
||||
|
||||
```markdown
|
||||
# Project Requirements
|
||||
|
||||
## Current Instructions and Requirements:
|
||||
|
||||
1. create a simple web server in Python with Flask that serves a homepage
|
||||
2. add a /api/users endpoint that returns a list of users as JSON
|
||||
3. add error handling for 404 and 500 errors
|
||||
|
||||
## Latest Requirement (Turn 3):
|
||||
|
||||
add error handling for 404 and 500 errors
|
||||
```
|
||||
|
||||
This document is passed to autonomous mode, ensuring the agent:
|
||||
- Knows all previous requirements
|
||||
- Focuses on the latest addition
|
||||
- Maintains consistency across iterations
|
||||
|
||||
### 3. Full Autonomous Quality
|
||||
|
||||
Each requirement triggers a complete autonomous run with:
|
||||
- **Coach-Player Feedback Loop**: Quality assurance built-in
|
||||
- **Multiple Turns**: Up to 5 iterations per requirement (configurable with `--max-turns`)
|
||||
- **Compilation Checks**: Ensures code actually works
|
||||
- **Testing**: Coach can run tests to verify functionality
|
||||
|
||||
### 4. Error Recovery
|
||||
|
||||
If an autonomous run fails:
|
||||
- You're notified of the error
|
||||
- You can provide additional requirements to fix issues
|
||||
- The session continues (doesn't crash)
|
||||
|
||||
### 5. Workspace Management
|
||||
|
||||
- Uses **current directory** as workspace
|
||||
- All files created in current directory
|
||||
- No need to specify workspace path
|
||||
- Works with existing projects or empty directories
|
||||
|
||||
## Command-Line Options
|
||||
|
||||
### Default (Accumulative Mode)
|
||||
|
||||
```bash
|
||||
g3
|
||||
```
|
||||
|
||||
Starts accumulative autonomous mode in the current directory.
|
||||
|
||||
### With Options
|
||||
|
||||
```bash
|
||||
# Use a specific workspace
|
||||
g3 --workspace ~/projects/my-app
|
||||
|
||||
# Limit autonomous turns per requirement
|
||||
g3 --max-turns 3
|
||||
|
||||
# Enable macOS Accessibility tools
|
||||
g3 --macax
|
||||
|
||||
# Enable WebDriver browser automation
|
||||
g3 --webdriver
|
||||
|
||||
# Use a specific provider/model
|
||||
g3 --provider anthropic --model claude-3-5-sonnet-20241022
|
||||
|
||||
# Show prompts and code during execution
|
||||
g3 --show-prompt --show-code
|
||||
|
||||
# Disable log files
|
||||
g3 --quiet
|
||||
```
|
||||
|
||||
### Disable Accumulative Mode
|
||||
|
||||
To use the traditional chat mode (without automatic autonomous runs):
|
||||
|
||||
```bash
|
||||
g3 --chat
|
||||
|
||||
# Alternative: legacy flag also works
|
||||
g3 --accumulative
|
||||
```
|
||||
|
||||
This gives you the old behavior where you chat with the agent without automatic autonomous runs.
|
||||
|
||||
## Use Cases
|
||||
|
||||
### 1. Rapid Prototyping
|
||||
|
||||
```bash
|
||||
requirement> create a REST API for a todo app
|
||||
requirement> add SQLite database storage
|
||||
requirement> add authentication with JWT
|
||||
requirement> add rate limiting
|
||||
```
|
||||
|
||||
### 2. Iterative Refinement
|
||||
|
||||
```bash
|
||||
requirement> create a data visualization dashboard
|
||||
requirement> make the charts interactive
|
||||
requirement> add dark mode support
|
||||
requirement> optimize for mobile devices
|
||||
```
|
||||
|
||||
### 3. Bug Fixing
|
||||
|
||||
```bash
|
||||
requirement> fix the login form validation
|
||||
requirement> handle edge case when username is empty
|
||||
requirement> add better error messages
|
||||
```
|
||||
|
||||
### 4. Feature Addition
|
||||
|
||||
```bash
|
||||
requirement> add export to CSV functionality
|
||||
requirement> add email notifications
|
||||
requirement> add admin dashboard
|
||||
```
|
||||
|
||||
## Tips and Best Practices
|
||||
|
||||
### 1. Start Simple
|
||||
|
||||
Begin with a basic requirement, let it be implemented, then add complexity:
|
||||
|
||||
```bash
|
||||
✅ Good:
|
||||
requirement> create a basic Flask web server
|
||||
requirement> add a homepage with a form
|
||||
requirement> add form validation
|
||||
|
||||
❌ Too Complex:
|
||||
requirement> create a full-stack web app with authentication, database, API, and frontend
|
||||
```
|
||||
|
||||
### 2. Be Specific
|
||||
|
||||
The more specific you are, the better the results:
|
||||
|
||||
```bash
|
||||
✅ Good:
|
||||
requirement> add a /api/users endpoint that returns JSON with id, name, and email fields
|
||||
|
||||
❌ Vague:
|
||||
requirement> add users
|
||||
```
|
||||
|
||||
### 3. One Thing at a Time
|
||||
|
||||
Focus each requirement on a single feature or fix:
|
||||
|
||||
```bash
|
||||
✅ Good:
|
||||
requirement> add error handling for database connections
|
||||
requirement> add logging for all API requests
|
||||
|
||||
❌ Multiple Things:
|
||||
requirement> add error handling and logging and monitoring and alerts
|
||||
```
|
||||
|
||||
### 4. Review Between Turns
|
||||
|
||||
After each autonomous run completes:
|
||||
- Check the generated files
|
||||
- Test the functionality
|
||||
- Decide what to add or fix next
|
||||
|
||||
### 5. Use Exit Commands
|
||||
|
||||
When done:
|
||||
- Type `exit` or `quit`
|
||||
- Press `Ctrl+D` (EOF)
|
||||
- Press `Ctrl+C` to cancel current input
|
||||
|
||||
## Comparison with Other Modes
|
||||
|
||||
| Feature | Accumulative (Default) | Traditional Interactive | Autonomous | Single-Shot |
|
||||
|---------|----------------------|------------------------|------------|-------------|
|
||||
| **Command** | `g3` | `g3 --accumulative` | `g3 --autonomous` | `g3 "task"` |
|
||||
| **Input Style** | Iterative prompts | Chat messages | requirements.md file | Command-line arg |
|
||||
| **Auto-Autonomous** | ✅ Yes | ❌ No | ✅ Yes | ❌ No |
|
||||
| **Coach-Player Loop** | ✅ Yes | ❌ No | ✅ Yes | ❌ No |
|
||||
| **Accumulates Requirements** | ✅ Yes | ❌ No | ❌ No | ❌ No |
|
||||
| **Multiple Iterations** | ✅ Yes | ✅ Yes | ✅ Yes | ❌ No |
|
||||
| **Best For** | Iterative development | Quick questions | Pre-planned projects | One-off tasks |
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Requirements Storage
|
||||
|
||||
- Stored in memory (not persisted to disk)
|
||||
- Numbered sequentially starting from 1
|
||||
- Formatted as markdown list
|
||||
- Passed to autonomous mode as `--requirements` override
|
||||
|
||||
### History
|
||||
|
||||
- Saved to `~/.g3_accumulative_history`
|
||||
- Separate from traditional interactive history
|
||||
- Persists across sessions
|
||||
- Uses rustyline for readline support
|
||||
|
||||
### Workspace
|
||||
|
||||
- Defaults to current directory
|
||||
- Can be overridden with `--workspace`
|
||||
- All files created in workspace
|
||||
- Logs saved to `workspace/logs/`
|
||||
|
||||
### Autonomous Execution
|
||||
|
||||
- Full coach-player feedback loop
|
||||
- Configurable max turns (default: 5)
|
||||
- Respects all CLI flags (--macax, --webdriver, etc.)
|
||||
- Error handling allows continuation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "No requirements provided"
|
||||
|
||||
This shouldn't happen in accumulative mode, but if it does:
|
||||
- Check that you entered a requirement
|
||||
- Ensure the requirement isn't empty
|
||||
- Try restarting G3
|
||||
|
||||
### "Autonomous run failed"
|
||||
|
||||
If an autonomous run fails:
|
||||
- Read the error message
|
||||
- Provide a new requirement to fix the issue
|
||||
- Or type `exit` and investigate manually
|
||||
|
||||
### "Context window full"
|
||||
|
||||
If you hit token limits:
|
||||
- The agent will auto-summarize
|
||||
- Or you can start a new session
|
||||
- Consider using `--max-turns` to limit iterations
|
||||
|
||||
### "Coach never approves"
|
||||
|
||||
If the coach keeps rejecting:
|
||||
- Check the coach feedback for specific issues
|
||||
- Provide more specific requirements
|
||||
- Consider increasing `--max-turns`
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Planned improvements:
|
||||
|
||||
1. **Persistence**: Save accumulated requirements to disk
|
||||
2. **Editing**: Edit or remove previous requirements
|
||||
3. **Branching**: Try different approaches
|
||||
4. **Templates**: Pre-defined requirement sets
|
||||
5. **Review**: Show all accumulated requirements
|
||||
6. **Export**: Save to requirements.md
|
||||
7. **Undo**: Remove last requirement
|
||||
8. **Replay**: Re-run with same requirements
|
||||
|
||||
## Feedback
|
||||
|
||||
This is a new feature! Please provide feedback:
|
||||
- What works well?
|
||||
- What's confusing?
|
||||
- What features would you like?
|
||||
- Any bugs or issues?
|
||||
|
||||
Open an issue on GitHub or contribute improvements!
|
||||
Reference in New Issue
Block a user