multiline input with \

This commit is contained in:
Dhanji Prasanna
2025-09-29 10:23:41 +10:00
parent c4ee4a6cde
commit ce273ba3fb
2 changed files with 220 additions and 24 deletions

View File

@@ -1,9 +1,9 @@
use anyhow::Result; use anyhow::Result;
use clap::Parser; use clap::Parser;
use g3_config::Config; use g3_config::Config;
use g3_core::{Agent, project::Project}; use g3_core::{project::Project, Agent};
use rustyline::error::ReadlineError; use rustyline::error::ReadlineError;
use rustyline::DefaultEditor; use rustyline::{Cmd, ConditionalEventHandler, DefaultEditor, Event, EventHandler, KeyEvent};
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::{BufWriter, Write}; use std::io::{BufWriter, Write};
use std::path::PathBuf; use std::path::PathBuf;
@@ -115,7 +115,14 @@ pub async fn run() -> Result<()> {
if cli.autonomous { if cli.autonomous {
// Autonomous mode with coach-player feedback loop // Autonomous mode with coach-player feedback loop
info!("Starting autonomous mode"); info!("Starting autonomous mode");
run_autonomous(agent, project, cli.show_prompt, cli.show_code, cli.max_turns).await?; run_autonomous(
agent,
project,
cli.show_prompt,
cli.show_code,
cli.max_turns,
)
.await?;
} else if let Some(task) = cli.task { } else if let Some(task) = cli.task {
// Single-shot mode // Single-shot mode
info!("Executing task: {}", task); info!("Executing task: {}", task);
@@ -152,6 +159,7 @@ async fn run_interactive(mut agent: Agent, show_prompt: bool, show_code: bool) -
println!(); println!();
println!("Type 'exit' or 'quit' to exit, use Up/Down arrows for command history"); println!("Type 'exit' or 'quit' to exit, use Up/Down arrows for command history");
println!("Press Enter to submit, Shift+Enter to add a new line");
println!(); println!();
// Initialize rustyline editor with history // Initialize rustyline editor with history
@@ -167,14 +175,66 @@ async fn run_interactive(mut agent: Agent, show_prompt: bool, show_code: bool) -
let _ = rl.load_history(history_path); let _ = rl.load_history(history_path);
} }
// Track whether we're in multi-line mode
let mut multi_line_buffer = String::new();
let mut in_multi_line = false;
loop { loop {
// Display context window progress bar before each prompt // Display context window progress bar before each prompt
display_context_progress(&agent); display_context_progress(&agent);
let readline = rl.readline("g3> "); // Adjust prompt based on whether we're in multi-line mode
let prompt = if in_multi_line { "... > " } else { "g3> " };
let readline = rl.readline(prompt);
match readline { match readline {
Ok(line) => { Ok(line) => {
let input = line.trim(); // Check if the line ends with a backslash (alternative multi-line indicator)
// or if we're continuing a multi-line input
let trimmed_line = line.trim_end();
// Handle multi-line continuation with backslash
if trimmed_line.ends_with('\\') && !in_multi_line {
// Start multi-line mode
in_multi_line = true;
// Remove the backslash and add to buffer
let line_without_backslash = &trimmed_line[..trimmed_line.len() - 1];
multi_line_buffer.push_str(line_without_backslash);
multi_line_buffer.push('\n');
continue;
} else if in_multi_line {
// Continue multi-line input
if trimmed_line.is_empty() {
// Empty line ends multi-line mode - process the accumulated input
in_multi_line = false;
// Fall through to process the accumulated input
} else if trimmed_line.ends_with('\\') {
// Continue multi-line with backslash
let line_without_backslash = &trimmed_line[..trimmed_line.len() - 1];
multi_line_buffer.push_str(line_without_backslash);
multi_line_buffer.push('\n');
continue;
} else {
// Last line of multi-line input (no backslash) - add it and process
multi_line_buffer.push_str(&line);
in_multi_line = false;
// Fall through to process the accumulated input
}
}
// Get the final input
let input = if !multi_line_buffer.is_empty() {
// We have multi-line input to process
let result = multi_line_buffer.trim().to_string();
multi_line_buffer.clear();
result
} else if !in_multi_line {
// Single line input
line.trim().to_string()
} else {
// Should not reach here, but handle gracefully
continue;
};
if input == "exit" || input == "quit" { if input == "exit" || input == "quit" {
break; break;
@@ -185,7 +245,7 @@ async fn run_interactive(mut agent: Agent, show_prompt: bool, show_code: bool) -
} }
// Add to history // Add to history
rl.add_history_entry(input)?; rl.add_history_entry(&input)?;
// Show thinking indicator immediately // Show thinking indicator immediately
print!("🤔 Thinking..."); print!("🤔 Thinking...");
@@ -206,7 +266,7 @@ async fn run_interactive(mut agent: Agent, show_prompt: bool, show_code: bool) -
// Execute task with cancellation support // Execute task with cancellation support
let execution_result = tokio::select! { let execution_result = tokio::select! {
result = agent.execute_task_with_timing_cancellable( result = agent.execute_task_with_timing_cancellable(
input, None, false, show_prompt, show_code, true, cancellation_token &input, None, false, show_prompt, show_code, true, cancellation_token
) => { ) => {
esc_monitor.abort(); esc_monitor.abort();
result result
@@ -231,7 +291,15 @@ async fn run_interactive(mut agent: Agent, show_prompt: bool, show_code: bool) -
} }
} }
Err(ReadlineError::Interrupted) => { Err(ReadlineError::Interrupted) => {
// Ctrl-C pressed
if in_multi_line {
// Cancel multi-line input
println!("Multi-line input cancelled");
multi_line_buffer.clear();
in_multi_line = false;
} else {
println!("CTRL-C"); println!("CTRL-C");
}
continue; continue;
} }
Err(ReadlineError::Eof) => { Err(ReadlineError::Eof) => {
@@ -548,7 +616,13 @@ fn format_duration(duration: Duration) -> String {
} }
} }
async fn run_autonomous(mut agent: Agent, project: Project, show_prompt: bool, show_code: bool, max_turns: usize) -> Result<()> { async fn run_autonomous(
mut agent: Agent,
project: Project,
show_prompt: bool,
show_code: bool,
max_turns: usize,
) -> Result<()> {
// Set up logging // Set up logging
project.ensure_logs_dir()?; project.ensure_logs_dir()?;
let logger = AutonomousLogger::new(&project.logs_dir())?; let logger = AutonomousLogger::new(&project.logs_dir())?;
@@ -572,7 +646,10 @@ async fn run_autonomous(mut agent: Agent, project: Project, show_prompt: bool, s
logger.log(&format!( logger.log(&format!(
" Please create a requirements.md file with your project requirements at:" " Please create a requirements.md file with your project requirements at:"
)); ));
logger.log(&format!(" {}/requirements.md", project.workspace().display())); logger.log(&format!(
" {}/requirements.md",
project.workspace().display()
));
return Ok(()); return Ok(());
} }
@@ -696,12 +773,13 @@ REQUIREMENTS:
IMPLEMENTATION REVIEW: IMPLEMENTATION REVIEW:
Review the current state of the project and provide a concise critique focusing on: Review the current state of the project and provide a concise critique focusing on:
1. Whether the requirements are correctly implemented 1. Whether the requirements are correctly implemented
2. What's missing or incorrect 2. Whether the project compiles successfully
3. Specific improvements needed 3. What requirements are missing or incorrect
4. Specific improvements needed to satisfy requirements
If the implementation correctly meets all requirements, respond with: 'IMPLEMENTATION_APPROVED' If the implementation correctly meets all requirements, respond with: 'IMPLEMENTATION_APPROVED'
If improvements are needed, provide specific actionable feedback. Don't be overly critical. APPROVE the If improvements are needed, provide specific actionable feedback. Be thorough but don't be overly critical. APPROVE the
implementation if it generally fits the bill, doesn't have compile errors or glaring omissions. implementation if it doesn't have compile errors, glaring omissions and generally fits the bill.
Keep your response concise and focused on actionable items.", Keep your response concise and focused on actionable items.",
requirements requirements
@@ -946,7 +1024,10 @@ impl AutonomousLogger {
first_line.to_string() first_line.to_string()
} else { } else {
// Use char indices to ensure we don't split UTF-8 characters // Use char indices to ensure we don't split UTF-8 characters
let truncated: String = first_line.chars().take(max_chars.saturating_sub(3)).collect(); let truncated: String = first_line
.chars()
.take(max_chars.saturating_sub(3))
.collect();
format!("{}...", truncated) format!("{}...", truncated)
} }
} }

View File

@@ -260,6 +260,47 @@ impl ContextWindow {
pub fn remaining_tokens(&self) -> u32 { pub fn remaining_tokens(&self) -> u32 {
self.total_tokens.saturating_sub(self.used_tokens) self.total_tokens.saturating_sub(self.used_tokens)
} }
/// Check if we should trigger summarization (at 80% capacity)
pub fn should_summarize(&self) -> bool {
self.percentage_used() >= 80.0
}
/// Create a summary request prompt for the current conversation
pub fn create_summary_prompt(&self) -> String {
"Please provide a comprehensive summary of our conversation so far. Include:
1. **Main Topic/Goal**: What is the primary task or objective being worked on?
2. **Key Decisions**: What important decisions have been made?
3. **Actions Taken**: What specific actions, commands, or code changes have been completed?
4. **Current State**: What is the current status of the work?
5. **Important Context**: Any critical information, file paths, configurations, or constraints that should be remembered?
6. **Pending Items**: What remains to be done or what was the user's last request?
Format this as a detailed but concise summary that can be used to resume the conversation from scratch while maintaining full context.".to_string()
}
/// Reset the context window with a summary
pub fn reset_with_summary(&mut self, summary: String, latest_user_message: Option<String>) {
// Clear the conversation history
self.conversation_history.clear();
self.used_tokens = 0;
// Add the summary as a system message
let summary_message = Message {
role: MessageRole::System,
content: format!("Previous conversation summary:\n\n{}", summary),
};
self.add_message(summary_message);
// Add the latest user message if provided
if let Some(user_msg) = latest_user_message {
self.add_message(Message {
role: MessageRole::User,
content: user_msg,
});
}
}
} }
pub struct Agent { pub struct Agent {
@@ -510,16 +551,17 @@ impl Agent {
let provider = self.providers.get(None)?; let provider = self.providers.get(None)?;
let system_prompt = if provider.has_native_tool_calling() { let system_prompt = if provider.has_native_tool_calling() {
// For native tool calling providers, use a more explicit system prompt // For native tool calling providers, use a more explicit system prompt
"You are G3, a general-purpose AI agent. Your goal is to analyze and solve problems by writing code. The current directory always contains a project that the user is working on and likely referring to. "You are G3, an AI programming agent. Your goal is to analyze, write and modify code to achieve given goals.
You have access to tools. When you need to accomplish a task, you MUST use the appropriate tool immediately. Do not just describe what you would do - actually use the tools. You have access to tools. When you need to accomplish a task, you MUST use the appropriate tool. Do not just describe what you would do - actually use the tools.
IMPORTANT: You must call tools to complete tasks. When you receive a request: IMPORTANT: You must call tools to achieve goals. When you receive a request:
1. Identify what needs to be done 1. Analyze and identify what needs to be done
2. Immediately call the appropriate tool with the required parameters 2. Call the appropriate tool with the required parameters
3. Wait for the tool result 3. Wait for the tool result
4. Continue or complete the task based on the result 4. Continue or complete the task based on the result
5. Call the final_output task with a detailed summary when done with all tasks. 5. If you repeatedly try something and it fails, try a different approach
6. Call the final_output task with a detailed summary when done with all tasks.
For shell commands: Use the shell tool with the exact command needed. Avoid commands that produce a large amount of output, and consider piping those outputs to files. Example: If asked to list files, immediately call the shell tool with command parameter \"ls\". For shell commands: Use the shell tool with the exact command needed. Avoid commands that produce a large amount of output, and consider piping those outputs to files. Example: If asked to list files, immediately call the shell tool with command parameter \"ls\".
@@ -852,6 +894,79 @@ The tool will execute immediately and you'll receive the result (success or erro
const MAX_ITERATIONS: usize = 30; // Prevent infinite loops const MAX_ITERATIONS: usize = 30; // Prevent infinite loops
let mut response_started = false; let mut response_started = false;
// Check if we need to summarize before starting
if self.context_window.should_summarize() {
info!(
"Context window at {}%, triggering auto-summarization",
self.context_window.percentage_used() as u32
);
// Notify user about summarization
println!(
"\n📊 Context window reaching capacity ({}%). Creating summary...",
self.context_window.percentage_used() as u32
);
// Create summary request
let summary_prompt = self.context_window.create_summary_prompt();
let summary_messages = vec![
Message {
role: MessageRole::System,
content: "You are a helpful assistant that creates concise summaries."
.to_string(),
},
Message {
role: MessageRole::User,
content: format!(
"Based on this conversation history, {}\n\nConversation:\n{}",
summary_prompt,
self.context_window
.conversation_history
.iter()
.map(|m| format!("{:?}: {}", m.role, m.content))
.collect::<Vec<_>>()
.join("\n\n")
),
},
];
let provider = self.providers.get(None)?;
let summary_request = CompletionRequest {
messages: summary_messages,
max_tokens: Some(4000), // Reasonable size for summary
temperature: Some(0.3), // Lower temperature for factual summary
stream: false,
tools: None,
};
// Get the summary
match provider.complete(summary_request).await {
Ok(summary_response) => {
println!("✅ Summary created successfully. Resetting context window...\n");
// Extract the latest user message from the request
let latest_user_msg = request
.messages
.iter()
.rev()
.find(|m| matches!(m.role, MessageRole::User))
.map(|m| m.content.clone());
// Reset context with summary
self.context_window
.reset_with_summary(summary_response.content, latest_user_msg);
// Update the request with new context
request.messages = self.context_window.conversation_history.clone();
println!("🔄 Context reset complete. Continuing with your request...\n");
}
Err(e) => {
warn!("Failed to create summary: {}. Continuing without reset.", e);
}
}
}
loop { loop {
iteration_count += 1; iteration_count += 1;
debug!("Starting iteration {}", iteration_count); debug!("Starting iteration {}", iteration_count);
@@ -1045,8 +1160,7 @@ The tool will execute immediately and you'll receive the result (success or erro
role: MessageRole::Assistant, role: MessageRole::Assistant,
content: format!( content: format!(
"{{\"tool\": \"{}\", \"args\": {}}}", "{{\"tool\": \"{}\", \"args\": {}}}",
tool_call.tool, tool_call.tool, tool_call.args
tool_call.args
), ),
} }
}; };
@@ -1108,7 +1222,8 @@ The tool will execute immediately and you'll receive the result (success or erro
// No tools were executed in this iteration, we're done // No tools were executed in this iteration, we're done
full_response.push_str(&current_response); full_response.push_str(&current_response);
println!(); println!();
let ttft = first_token_time.unwrap_or_else(|| stream_start.elapsed()); let ttft =
first_token_time.unwrap_or_else(|| stream_start.elapsed());
return Ok((full_response, ttft)); return Ok((full_response, ttft));
} }
break; // Tool was executed, break to continue outer loop break; // Tool was executed, break to continue outer loop