Compare commits

..

4 Commits

Author SHA1 Message Date
Michael Neale
9218ba2ab4 checkpoint 2025-10-29 16:21:44 +11:00
Dhanji R. Prasanna
4bf0f71bbd Merge pull request #12 from dhanji/libvision
will need this for it to work
2025-10-28 15:12:51 +11:00
Michael Neale
c1ce3038d8 will need this for it to work 2025-10-28 15:07:24 +11:00
Dhanji Prasanna
4b1694b308 machine mode 2025-10-28 14:51:32 +11:00
5 changed files with 59 additions and 38 deletions

5
.cargo/config.toml Normal file
View File

@@ -0,0 +1,5 @@
[target.aarch64-apple-darwin]
rustflags = ["-C", "link-args=-Wl,-rpath,@executable_path"]
[target.x86_64-apple-darwin]
rustflags = ["-C", "link-args=-Wl,-rpath,@executable_path"]

View File

@@ -136,8 +136,12 @@ G3 is designed for:
# Build the project # Build the project
cargo build --release cargo build --release
# Run G3 # Run from the build directory
cargo run ./target/release/g3
# Or copy both files to somewhere in your PATH (macOS only needs both files)
cp target/release/g3 ~/.local/bin/
cp target/release/libVisionBridge.dylib ~/.local/bin/ # macOS only
# Execute a task # Execute a task
g3 "implement a function to calculate fibonacci numbers" g3 "implement a function to calculate fibonacci numbers"

View File

@@ -1,11 +0,0 @@
use std::env;
fn main() {
// Only add rpaths on macOS
if env::var("CARGO_CFG_TARGET_OS").unwrap() == "macos" {
// Add rpath so libVisionBridge.dylib can be found at runtime
// @executable_path means "relative to the executable"
println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path");
println!("cargo:rustc-link-arg=-Wl,-rpath,@loader_path");
}
}

View File

@@ -1,11 +0,0 @@
use std::env;
fn main() {
// Only add rpaths on macOS
if env::var("CARGO_CFG_TARGET_OS").unwrap() == "macos" {
// Add rpath so libVisionBridge.dylib can be found at runtime
// @executable_path means "relative to the executable"
println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path");
println!("cargo:rustc-link-arg=-Wl,-rpath,@loader_path");
}
}

View File

@@ -1444,6 +1444,13 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(5000); .saturating_sub(5000);
// Ensure we have at least 1 token available, otherwise we can't generate a summary
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
Some(available.min(10_000)) Some(available.min(10_000))
} }
"embedded" => { "embedded" => {
@@ -1452,10 +1459,24 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(1000); .saturating_sub(1000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
Some(available.min(3000)) Some(available.min(3000))
} }
_ => { _ => {
let available = self.context_window.remaining_tokens().saturating_sub(2000); let available = self.context_window.remaining_tokens().saturating_sub(2000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
self.context_window.used_tokens
));
}
Some(available.min(5000)) Some(available.min(5000))
} }
}; };
@@ -2347,6 +2368,13 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(5000); .saturating_sub(5000);
// Ensure we have at least 1 token available, otherwise we can't generate a summary
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
// Cap at a reasonable summary size (10k tokens max) // Cap at a reasonable summary size (10k tokens max)
Some(available.min(10_000)) Some(available.min(10_000))
} }
@@ -2358,12 +2386,26 @@ Template:
let available = model_limit let available = model_limit
.saturating_sub(current_usage) .saturating_sub(current_usage)
.saturating_sub(1000); .saturating_sub(1000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens, model limit: {} tokens. Please start a new session.",
current_usage, model_limit
));
}
// Cap at 3k for embedded models // Cap at 3k for embedded models
Some(available.min(3000)) Some(available.min(3000))
} }
_ => { _ => {
// Default: conservative approach // Default: conservative approach
let available = self.context_window.remaining_tokens().saturating_sub(2000); let available = self.context_window.remaining_tokens().saturating_sub(2000);
// Ensure we have at least 1 token available
if available == 0 {
return Err(anyhow::anyhow!(
"Insufficient tokens available for summary generation. Current usage: {} tokens. Please start a new session.",
self.context_window.used_tokens
));
}
Some(available.min(5000)) Some(available.min(5000))
} }
}; };
@@ -2675,12 +2717,7 @@ Template:
// Display tool execution result with proper indentation // Display tool execution result with proper indentation
if tool_call.tool != "final_output" { if tool_call.tool != "final_output" {
// Skip displaying output for shell tool since it was already streamed let output_lines: Vec<&str> = tool_result.lines().collect();
let should_display_output = tool_call.tool != "shell";
let output_lines: Vec<&str> = if should_display_output {
tool_result.lines().collect()
} else { vec![] };
// Check if UI wants full output (machine mode) or truncated (human mode) // Check if UI wants full output (machine mode) or truncated (human mode)
let wants_full = self.ui_writer.wants_full_output(); let wants_full = self.ui_writer.wants_full_output();
@@ -3192,16 +3229,13 @@ Template:
{ {
Ok(result) => { Ok(result) => {
if result.success { if result.success {
// Don't return stdout - it was already streamed to the UI Ok(if result.stdout.is_empty() {
// Returning it would cause duplicate output "✅ Command executed successfully".to_string()
Ok("✅ Command executed successfully".to_string())
} else { } else {
// For errors, return stderr since it wasn't streamed result.stdout.trim().to_string()
Ok(if result.stderr.is_empty() {
"❌ Command failed".to_string()
} else {
format!("❌ Command failed: {}", result.stderr.trim())
}) })
} else {
Ok(format!("❌ Command failed: {}", result.stderr.trim()))
} }
} }
Err(e) => Ok(format!("❌ Execution error: {}", e)), Err(e) => Ok(format!("❌ Execution error: {}", e)),