some autonomous mode fixes

This commit is contained in:
Dhanji Prasanna
2025-10-02 09:45:18 +10:00
parent 98cf72c12a
commit 9638f40cfb
3 changed files with 61 additions and 11 deletions

11
TODO Normal file
View File

@@ -0,0 +1,11 @@
next tasks
- get something working with autonomous mode
- g3d
- context token counting bug
- error where it just gives up turn
- "project" behaviors (read readme first)
- ui
- git
- swarm

View File

@@ -397,13 +397,25 @@ async fn run_autonomous(
};
output.print("🎯 Starting player implementation...");
let player_result = agent
// Execute player task and handle the result properly
match agent
.execute_task_with_timing(&player_prompt, None, false, show_prompt, show_code, true)
.await;
if let Err(e) = player_result {
output.print(&format!("❌ Player implementation failed: {}", e));
.await
{
Ok(player_result) => {
// Display player's implementation result
output.print("📝 Player implementation completed:");
output.print_markdown(&player_result);
}
Err(e) => {
output.print(&format!("❌ Player implementation failed: {}", e));
// Continue to coach review even if player had an error
}
}
// Give some time for file operations to complete
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
// Create a new agent instance for coach mode to ensure fresh context
let config = g3_config::Config::load(None)?;

View File

@@ -1222,6 +1222,7 @@ The tool will execute immediately and you'll receive the result (success or erro
let mut parser = StreamingToolParser::new();
let mut current_response = String::new();
let mut tool_executed = false;
let mut chunks_received = 0;
while let Some(chunk_result) = stream.next().await {
match chunk_result {
@@ -1230,6 +1231,11 @@ The tool will execute immediately and you'll receive the result (success or erro
if first_token_time.is_none() && !chunk.content.is_empty() {
first_token_time = Some(stream_start.elapsed());
}
chunks_received += 1;
if chunks_received == 1 {
debug!("First chunk received: content_len={}, finished={}", chunk.content.len(), chunk.finished);
}
// Process chunk with the new parser
let completed_tools = parser.process_chunk(&chunk);
@@ -1442,13 +1448,29 @@ The tool will execute immediately and you'll receive the result (success or erro
}
if chunk.finished {
debug!("Stream finished: tool_executed={}, current_response_len={}, full_response_len={}, chunks_received={}",
tool_executed, current_response.len(), full_response.len(), chunks_received);
// Stream finished - check if we should continue or return
if !tool_executed {
// No tools were executed in this iteration, we're done
full_response.push_str(&current_response);
// No tools were executed in this iteration
// Check if we got any response at all
if current_response.is_empty() && full_response.is_empty() {
// No response received - this is an error condition
warn!("Stream finished without any content or tool calls");
warn!("Chunks received: {}", chunks_received);
return Err(anyhow::anyhow!(
"No response received from the model. The model may be experiencing issues or the request may have been malformed."
));
}
// Add current response to full response if we have any
if !current_response.is_empty() {
full_response.push_str(&current_response);
}
println!();
let ttft =
first_token_time.unwrap_or_else(|| stream_start.elapsed());
let ttft = first_token_time.unwrap_or_else(|| stream_start.elapsed());
return Ok((full_response, ttft));
}
break; // Tool was executed, break to continue outer loop
@@ -1469,8 +1491,13 @@ The tool will execute immediately and you'll receive the result (success or erro
// If we get here and no tool was executed, we're done
if !tool_executed {
full_response.push_str(&current_response);
println!();
if current_response.is_empty() && full_response.is_empty() {
warn!("Loop exited without any response after {} iterations", iteration_count);
} else {
full_response.push_str(&current_response);
println!();
}
let ttft = first_token_time.unwrap_or_else(|| stream_start.elapsed());
return Ok((full_response, ttft));
}