Compare commits

..

5 Commits

Author SHA1 Message Date
Michael Neale
f7d1c8e42a Merge branch 'main' into micn/libvision-fix
* main:
  control commands for machine mode
  Fix duplicate dump at end
  minor
  --machine mode flag for verbose CLI output
  fixed x,y detection in vision click
  screenshotting bug fix
2025-10-28 13:56:49 +11:00
Dhanji Prasanna
7c2c433746 control commands for machine mode 2025-10-28 12:35:58 +11:00
Dhanji Prasanna
98f4220544 Fix duplicate dump at end 2025-10-27 13:48:46 +11:00
Dhanji Prasanna
a4476a555c minor 2025-10-27 13:32:14 +11:00
Michael Neale
8a7d6ad4ba lets you run the binary from anywhere 2025-10-24 19:28:14 +11:00
5 changed files with 39 additions and 60 deletions

View File

@@ -1,5 +0,0 @@
[target.aarch64-apple-darwin]
rustflags = ["-C", "link-args=-Wl,-rpath,@executable_path"]
[target.x86_64-apple-darwin]
rustflags = ["-C", "link-args=-Wl,-rpath,@executable_path"]

View File

@@ -136,12 +136,8 @@ G3 is designed for:
# Build the project # Build the project
cargo build --release cargo build --release
# Run from the build directory # Run G3
./target/release/g3 cargo run
# Or copy both files to somewhere in your PATH (macOS only needs both files)
cp target/release/g3 ~/.local/bin/
cp target/release/libVisionBridge.dylib ~/.local/bin/ # macOS only
# Execute a task # Execute a task
g3 "implement a function to calculate fibonacci numbers" g3 "implement a function to calculate fibonacci numbers"

11
build.rs Normal file
View File

@@ -0,0 +1,11 @@
use std::env;
fn main() {
// Only add rpaths on macOS
if env::var("CARGO_CFG_TARGET_OS").unwrap() == "macos" {
// Add rpath so libVisionBridge.dylib can be found at runtime
// @executable_path means "relative to the executable"
println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path");
println!("cargo:rustc-link-arg=-Wl,-rpath,@loader_path");
}
}

11
crates/g3-cli/build.rs Normal file
View File

@@ -0,0 +1,11 @@
use std::env;
fn main() {
// Only add rpaths on macOS
if env::var("CARGO_CFG_TARGET_OS").unwrap() == "macos" {
// Add rpath so libVisionBridge.dylib can be found at runtime
// @executable_path means "relative to the executable"
println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path");
println!("cargo:rustc-link-arg=-Wl,-rpath,@loader_path");
}
}

View File

@@ -1444,13 +1444,6 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(5000); .saturating_sub(5000);
// Ensure we have at least 1 token available, otherwise we can't generate a summary
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
Some(available.min(10_000)) Some(available.min(10_000))
} }
"embedded" => { "embedded" => {
@@ -1459,24 +1452,10 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(1000); .saturating_sub(1000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
Some(available.min(3000)) Some(available.min(3000))
} }
_ => { _ => {
let available = self.context_window.remaining_tokens().saturating_sub(2000); let available = self.context_window.remaining_tokens().saturating_sub(2000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
self.context_window.used_tokens
));
}
Some(available.min(5000)) Some(available.min(5000))
} }
}; };
@@ -2368,13 +2347,6 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(5000); .saturating_sub(5000);
// Ensure we have at least 1 token available, otherwise we can't generate a summary
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
// Cap at a reasonable summary size (10k tokens max) // Cap at a reasonable summary size (10k tokens max)
Some(available.min(10_000)) Some(available.min(10_000))
} }
@@ -2386,26 +2358,12 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(1000); .saturating_sub(1000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
// Cap at 3k for embedded models // Cap at 3k for embedded models
Some(available.min(3000)) Some(available.min(3000))
} }
_ => { _ => {
// Default: conservative approach // Default: conservative approach
let available = self.context_window.remaining_tokens().saturating_sub(2000); let available = self.context_window.remaining_tokens().saturating_sub(2000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
self.context_window.used_tokens
));
}
Some(available.min(5000)) Some(available.min(5000))
} }
}; };
@@ -2717,7 +2675,12 @@ Template:
// Display tool execution result with proper indentation // Display tool execution result with proper indentation
if tool_call.tool != "final_output" { if tool_call.tool != "final_output" {
let output_lines: Vec<&str> = tool_result.lines().collect(); // Skip displaying output for shell tool since it was already streamed
let should_display_output = tool_call.tool != "shell";
let output_lines: Vec<&str> = if should_display_output {
tool_result.lines().collect()
} else { vec![] };
// Check if UI wants full output (machine mode) or truncated (human mode) // Check if UI wants full output (machine mode) or truncated (human mode)
let wants_full = self.ui_writer.wants_full_output(); let wants_full = self.ui_writer.wants_full_output();
@@ -3229,13 +3192,16 @@ Template:
{ {
Ok(result) => { Ok(result) => {
if result.success { if result.success {
Ok(if result.stdout.is_empty() { // Don't return stdout - it was already streamed to the UI
"✅ Command executed successfully".to_string() // Returning it would cause duplicate output
Ok("✅ Command executed successfully".to_string())
} else { } else {
result.stdout.trim().to_string() // For errors, return stderr since it wasn't streamed
Ok(if result.stderr.is_empty() {
"❌ Command failed".to_string()
} else {
format!("❌ Command failed: {}", result.stderr.trim())
}) })
} else {
Ok(format!("❌ Command failed: {}", result.stderr.trim()))
} }
} }
Err(e) => Ok(format!("❌ Execution error: {}", e)), Err(e) => Ok(format!("❌ Execution error: {}", e)),