Convert all INFO logs to DEBUG to reduce CLI noise

Converted ~77 info! macro calls to debug! across the codebase to prevent
log messages from interrupting the CLI experience during normal operation.
Users can still see these logs by setting RUST_LOG=debug if needed.

Affected crates:
- g3-cli
- g3-computer-control
- g3-console
- g3-core
- g3-ensembles
- g3-execution
- g3-providers
This commit is contained in:
Dhanji R. Prasanna
2025-12-22 16:27:35 +11:00
parent 58cbf3431a
commit 923def0ab2
19 changed files with 92 additions and 92 deletions

View File

@@ -267,7 +267,7 @@ use std::path::Path;
use std::path::PathBuf;
use std::process::exit;
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use tracing::{debug, error};
use g3_core::error_handling::{classify_error, ErrorType, RecoverableError};
mod simple_output;
@@ -2693,7 +2693,7 @@ Remember: Be clear in your review and concise in your feedback. APPROVE iff the
extract_coach_feedback_from_logs(&coach_result, &coach_agent, &output)?;
// Log the size of the feedback for debugging
info!(
debug!(
"Coach feedback extracted: {} characters (from {} total)",
coach_feedback_text.len(),
coach_result.response.len()

View File

@@ -24,7 +24,7 @@ impl MacOSController {
pub fn new() -> Result<Self> {
let ocr = Box::new(DefaultOCR::new()?);
let ocr_name = ocr.name().to_string();
tracing::info!("Initialized macOS controller with OCR engine: {}", ocr_name);
tracing::debug!("Initialized macOS controller with OCR engine: {}", ocr_name);
Ok(Self {
ocr_engine: ocr,
ocr_name,
@@ -155,7 +155,7 @@ impl ComputerController for MacOSController {
// 1. At layer 0 (normal windows, not menu bar)
// 2. Have real bounds (width and height >= 100)
if layer == 0 && has_real_bounds {
tracing::info!("Found valid window: ID {} for app '{}' (layer={}, bounds valid)", id, owner, layer);
tracing::debug!("Found valid window: ID {} for app '{}' (layer={}, bounds valid)", id, owner, layer);
found_window_id = Some((id as u32, owner.clone()));
break;
} else {
@@ -178,7 +178,7 @@ impl ComputerController for MacOSController {
let (cg_window_id, matched_owner) = cg_window_id.ok_or_else(|| {
anyhow::anyhow!("Could not find window for application '{}'. Use list_windows to see available windows.", app_name)
})?;
tracing::info!(
tracing::debug!(
"Taking screenshot of window ID {} for app '{}'",
cg_window_id,
matched_owner
@@ -468,7 +468,7 @@ impl MacOSController {
// Only accept windows with real bounds (>= 100x100 pixels)
if w >= 100 && h >= 100 {
tracing::info!("Found valid window bounds for '{}': x={}, y={}, w={}, h={} (layer={})", owner, x, y, w, h, layer);
tracing::debug!("Found valid window bounds for '{}': x={}, y={}, w={}, h={} (layer={})", owner, x, y, w, h, layer);
return Ok((x, y, w, h));
} else {
tracing::debug!(

View File

@@ -3,7 +3,7 @@ use crate::process::ProcessController;
use axum::{extract::State, http::StatusCode, Json};
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::{error, info};
use tracing::{debug, error};
pub type ControllerState = Arc<Mutex<ProcessController>>;
@@ -22,7 +22,7 @@ pub async fn kill_instance(
match controller.kill_process(pid) {
Ok(_) => {
info!("Successfully killed process {}", pid);
debug!("Successfully killed process {}", pid);
Ok(Json(serde_json::json!({
"status": "terminating"
})))
@@ -38,7 +38,7 @@ pub async fn restart_instance(
State(controller): State<ControllerState>,
axum::extract::Path(id): axum::extract::Path<String>,
) -> Result<Json<LaunchResponse>, StatusCode> {
info!("Restarting instance: {}", id);
debug!("Restarting instance: {}", id);
// Extract PID from instance ID (format: pid_timestamp)
let pid: u32 = id
@@ -81,7 +81,7 @@ pub async fn launch_instance(
State(controller): State<ControllerState>,
Json(request): Json<LaunchRequest>,
) -> Result<Json<LaunchResponse>, (StatusCode, Json<serde_json::Value>)> {
info!("Launching new g3 instance: {:?}", request);
debug!("Launching new g3 instance: {:?}", request);
// Validate binary path if provided
if let Some(ref binary_path) = request.g3_binary_path {
@@ -149,7 +149,7 @@ pub async fn launch_instance(
) {
Ok(pid) => {
let id = format!("{}_{}", pid, chrono::Utc::now().timestamp());
info!("Successfully launched g3 instance with PID {}", pid);
debug!("Successfully launched g3 instance with PID {}", pid);
Ok(Json(LaunchResponse {
id,
status: "starting".to_string(),

View File

@@ -3,7 +3,7 @@ use axum::{http::StatusCode, Json};
use serde::{Deserialize, Serialize};
use std::os::unix::fs::PermissionsExt;
use std::path::PathBuf;
use tracing::{error, info};
use tracing::{debug, error};
pub async fn get_state() -> Result<Json<ConsoleState>, StatusCode> {
let state = ConsoleState::load();
@@ -15,7 +15,7 @@ pub async fn save_state(
) -> Result<Json<serde_json::Value>, StatusCode> {
match state.save() {
Ok(_) => {
info!("Console state saved successfully");
debug!("Console state saved successfully");
Ok(Json(serde_json::json!({
"status": "saved"
})))

View File

@@ -1,7 +1,7 @@
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::PathBuf;
use tracing::info;
use tracing::debug;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConsoleState {
@@ -42,7 +42,7 @@ impl ConsoleState {
pub fn save(&self) -> anyhow::Result<()> {
let config_path = Self::config_path();
info!("Saving console state to: {:?}", config_path);
debug!("Saving console state to: {:?}", config_path);
// Create parent directory if it doesn't exist
if let Some(parent) = config_path.parent() {
@@ -51,7 +51,7 @@ impl ConsoleState {
let content = serde_json::to_string_pretty(self)?;
fs::write(&config_path, content)?;
info!("Console state saved successfully to: {:?}", config_path);
debug!("Console state saved successfully to: {:?}", config_path);
Ok(())
}

View File

@@ -16,7 +16,7 @@ use std::sync::Arc;
use tokio::sync::Mutex;
use tower_http::cors::CorsLayer;
use tower_http::services::ServeDir;
use tracing::{info, Level};
use tracing::Level;
use tracing_subscriber;
#[derive(Parser, Debug)]
@@ -84,12 +84,12 @@ async fn main() -> anyhow::Result<()> {
.layer(CorsLayer::permissive());
let addr = format!("{}:{}", args.host, args.port);
info!("Starting g3-console on http://{}", addr);
debug!("Starting g3-console on http://{}", addr);
// Auto-open browser if requested
if args.open {
let url = format!("http://{}", addr);
info!("Opening browser to {}", url);
debug!("Opening browser to {}", url);
let _ = open::that(&url);
}

View File

@@ -6,7 +6,7 @@ use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::sync::Mutex;
use sysinfo::{Pid, Process, Signal, System};
use tracing::{debug, info};
use tracing::debug;
pub struct ProcessController {
system: System,
@@ -26,7 +26,7 @@ impl ProcessController {
self.system.refresh_processes();
if let Some(process) = self.system.process(sysinfo_pid) {
info!("Killing process {} ({})", pid, process.name());
debug!("Killing process {} ({})", pid, process.name());
// Try SIGTERM first
if process.kill_with(Signal::Term).is_some() {
@@ -107,7 +107,7 @@ impl ProcessController {
});
}
info!("Launching g3: {:?}", cmd);
debug!("Launching g3: {:?}", cmd);
// Spawn and wait for the intermediate process to exit
let mut child = cmd.spawn().context("Failed to spawn g3 process")?;
@@ -120,7 +120,7 @@ impl ProcessController {
// The actual g3 process is now running as orphan
// We need to scan for it by matching workspace and recent start time
info!(
debug!(
"Scanning for newly launched g3 process in workspace: {}",
workspace
);
@@ -171,7 +171,7 @@ impl ProcessController {
found
} else {
// If we couldn't find it, try one more refresh after a longer delay
info!("Process not found on first scan, trying again...");
debug!("Process not found on first scan, trying again...");
std::thread::sleep(std::time::Duration::from_millis(2000));
self.system.refresh_processes();
@@ -204,7 +204,7 @@ impl ProcessController {
retry_found.unwrap_or(intermediate_pid)
};
info!("Launched g3 process with PID {}", pid);
debug!("Launched g3 process with PID {}", pid);
// Store launch params for restart
let params = LaunchParams {

View File

@@ -3,7 +3,7 @@ use anyhow::Result;
use chrono::{DateTime, Utc};
use std::path::PathBuf;
use sysinfo::{Pid, Process, System};
use tracing::{debug, info, warn};
use tracing::{debug, warn};
pub struct ProcessDetector {
system: System,
@@ -17,7 +17,7 @@ impl ProcessDetector {
}
pub fn detect_instances(&mut self) -> Result<Vec<Instance>> {
info!("Scanning for g3 processes...");
debug!("Scanning for g3 processes...");
// Refresh all processes to ensure we catch newly started ones
// Using refresh_all() instead of just refresh_processes() to ensure
// we get complete information about new processes
@@ -37,7 +37,7 @@ impl ProcessDetector {
}
}
info!("Detected {} g3 instances", instances.len());
debug!("Detected {} g3 instances", instances.len());
Ok(instances)
}

View File

@@ -9,7 +9,7 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::{error, info, warn};
use tracing::{debug, error, warn};
/// Base delay for exponential backoff (in milliseconds)
const BASE_RETRY_DELAY_MS: u64 = 1000;
@@ -149,7 +149,7 @@ impl ErrorContext {
if let Err(e) = std::fs::write(&filename, json_content) {
error!("Failed to save error context to {:?}: {}", &filename, e);
} else {
info!("Error details saved to: {:?}", &filename);
debug!("Error details saved to: {:?}", &filename);
}
}
Err(e) => {
@@ -328,7 +328,7 @@ where
match operation().await {
Ok(result) => {
if attempt > 1 {
info!(
debug!(
"Operation '{}' succeeded after {} attempts",
operation_name, attempt
);
@@ -357,7 +357,7 @@ where
// Special handling for token limit errors
if matches!(recoverable_type, RecoverableError::TokenLimit) {
info!("Token limit error detected. Consider triggering summarization.");
debug!("Token limit error detected. Consider triggering summarization.");
}
tokio::time::sleep(delay).await;

View File

@@ -12,7 +12,7 @@ use crate::{logs_dir, Agent, TaskResult};
use crate::ui_writer::UiWriter;
use serde_json::Value;
use std::path::PathBuf;
use tracing::{debug, info, warn};
use tracing::{debug, warn};
/// Result of feedback extraction with source information
#[derive(Debug, Clone)]
@@ -103,21 +103,21 @@ where
// Try session log first (most reliable)
if let Some(session_id) = agent.get_session_id() {
if let Some(feedback) = try_extract_from_session_log(&session_id, config) {
info!("Extracted coach feedback from session log: {} chars", feedback.len());
debug!("Extracted coach feedback from session log: {} chars", feedback.len());
return ExtractedFeedback::new(feedback, FeedbackSource::SessionLog);
}
}
// Try native tool call JSON parsing
if let Some(feedback) = try_extract_from_native_tool_call(&coach_result.response) {
info!("Extracted coach feedback from native tool call: {} chars", feedback.len());
debug!("Extracted coach feedback from native tool call: {} chars", feedback.len());
return ExtractedFeedback::new(feedback, FeedbackSource::NativeToolCall);
}
// Try conversation history
if let Some(session_id) = agent.get_session_id() {
if let Some(feedback) = try_extract_from_conversation_history(&session_id, config) {
info!("Extracted coach feedback from conversation history: {} chars", feedback.len());
debug!("Extracted coach feedback from conversation history: {} chars", feedback.len());
return ExtractedFeedback::new(feedback, FeedbackSource::ConversationHistory);
}
}
@@ -125,7 +125,7 @@ where
// Try TaskResult parsing
let extracted = coach_result.extract_final_output();
if !extracted.is_empty() {
info!("Extracted coach feedback from task result: {} chars", extracted.len());
debug!("Extracted coach feedback from task result: {} chars", extracted.len());
return ExtractedFeedback::new(extracted, FeedbackSource::TaskResultResponse);
}

View File

@@ -39,7 +39,7 @@ use serde_json::json;
use std::io::Write;
use std::time::{Duration, Instant};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use tracing::{debug, error, warn};
/// Get the path to the todo.g3.md file.
///
@@ -2778,7 +2778,7 @@ impl<W: UiWriter> Agent<W> {
/// Manually trigger context summarization regardless of context window size
/// Returns Ok(true) if summarization was successful, Ok(false) if it failed
pub async fn force_summarize(&mut self) -> Result<bool> {
info!("Manual summarization triggered");
debug!("Manual summarization triggered");
self.ui_writer.print_context_status(&format!(
"\n🗜️ Manual summarization requested (current usage: {}%)...",
@@ -2896,7 +2896,7 @@ impl<W: UiWriter> Agent<W> {
/// Manually trigger context thinning regardless of thresholds
pub fn force_thin(&mut self) -> String {
info!("Manual context thinning triggered");
debug!("Manual context thinning triggered");
let (message, chars_saved) = self.context_window.thin_context(self.session_id.as_deref());
self.thinning_events.push(chars_saved);
message
@@ -2905,7 +2905,7 @@ impl<W: UiWriter> Agent<W> {
/// Manually trigger context thinning for the ENTIRE context window
/// Unlike force_thin which only processes the first third, this processes all messages
pub fn force_thin_all(&mut self) -> String {
info!("Manual full context skinnifying triggered");
debug!("Manual full context skinnifying triggered");
let (message, chars_saved) = self.context_window.thin_context_all(self.session_id.as_deref());
self.thinning_events.push(chars_saved);
message
@@ -2914,7 +2914,7 @@ impl<W: UiWriter> Agent<W> {
/// Reload README.md and AGENTS.md and replace the first system message
/// Returns Ok(true) if README was found and reloaded, Ok(false) if no README was present initially
pub fn reload_readme(&mut self) -> Result<bool> {
info!("Manual README reload triggered");
debug!("Manual README reload triggered");
// Check if the second message in conversation history is a system message with README content
// (The first message should always be the system prompt)
@@ -2957,7 +2957,7 @@ impl<W: UiWriter> Agent<W> {
// Replace the second message (README) with the new content
if let Some(first_msg) = self.context_window.conversation_history.get_mut(1) {
first_msg.content = combined_content;
info!("README content reloaded successfully");
debug!("README content reloaded successfully");
Ok(true)
} else {
Ok(false)
@@ -3191,7 +3191,7 @@ impl<W: UiWriter> Agent<W> {
error!("Failed to clear continuation artifacts: {}", e);
}
info!("Session cleared");
debug!("Session cleared");
}
/// Restore session from a continuation artifact
@@ -3236,7 +3236,7 @@ impl<W: UiWriter> Agent<W> {
});
}
info!("Restored full context from session log");
debug!("Restored full context from session log");
return Ok(true);
}
}
@@ -3261,7 +3261,7 @@ impl<W: UiWriter> Agent<W> {
});
}
info!("Restored session from summary");
debug!("Restored session from summary");
Ok(false)
}
@@ -3871,7 +3871,7 @@ impl<W: UiWriter> Agent<W> {
match provider.stream(request.clone()).await {
Ok(stream) => {
if attempt > 1 {
info!("Stream started successfully after {} attempts", attempt);
debug!("Stream started successfully after {} attempts", attempt);
}
debug!("Stream started successfully");
debug!(
@@ -6559,7 +6559,7 @@ impl<W: UiWriter> Agent<W> {
let driver = mutex.into_inner();
match driver.quit().await {
Ok(_) => {
info!("WebDriver session closed successfully");
debug!("WebDriver session closed successfully");
// Kill the safaridriver process
if let Some(mut process) =
@@ -6568,7 +6568,7 @@ impl<W: UiWriter> Agent<W> {
if let Err(e) = process.kill().await {
warn!("Failed to kill safaridriver process: {}", e);
} else {
info!("Safaridriver process terminated");
debug!("Safaridriver process terminated");
}
}

View File

@@ -10,7 +10,7 @@ use crate::ui_writer::UiWriter;
use crate::{Agent, DiscoveryOptions, TaskResult};
use anyhow::Result;
use std::time::Instant;
use tracing::{info, warn};
use tracing::{debug, warn};
/// Configuration for retry behavior
#[derive(Debug, Clone)]
@@ -142,7 +142,7 @@ where
match result {
Ok(task_result) => {
if retry_count > 0 {
info!(
debug!(
"{} task succeeded after {} retries (elapsed: {:?})",
config.role_name,
retry_count,
@@ -259,7 +259,7 @@ where
match operation().await {
Ok(result) => {
if retry_count > 0 {
info!(
debug!(
"Operation '{}' succeeded after {} retries",
operation_name, retry_count
);

View File

@@ -6,7 +6,7 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use tracing::{debug, error, info, warn};
use tracing::{debug, error, warn};
/// Version of the session continuation format
const CONTINUATION_VERSION: &str = "1.0";
@@ -89,7 +89,7 @@ pub fn save_continuation(continuation: &SessionContinuation) -> Result<PathBuf>
let json = serde_json::to_string_pretty(continuation)?;
std::fs::write(&latest_path, &json)?;
info!("Saved session continuation to {:?}", latest_path);
debug!("Saved session continuation to {:?}", latest_path);
Ok(latest_path)
}
@@ -113,7 +113,7 @@ pub fn load_continuation() -> Result<Option<SessionContinuation>> {
);
}
info!("Loaded session continuation from {:?}", latest_path);
debug!("Loaded session continuation from {:?}", latest_path);
Ok(Some(continuation))
}
@@ -131,7 +131,7 @@ pub fn clear_continuation() -> Result<()> {
debug!("Removed session file: {:?}", path);
}
}
info!("Cleared session continuation artifacts");
debug!("Cleared session continuation artifacts");
}
Ok(())

View File

@@ -7,7 +7,7 @@ use std::path::{Path, PathBuf};
use std::process::Stdio;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tracing::{debug, error, info, warn};
use tracing::{debug, error, warn};
use uuid::Uuid;
use crate::status::{FlockStatus, SegmentState, SegmentStatus};
@@ -174,7 +174,7 @@ impl FlockMode {
/// Run flock mode
pub async fn run(&mut self) -> Result<()> {
info!(
debug!(
"Starting flock mode with {} segments",
self.config.num_segments
);
@@ -625,7 +625,7 @@ async fn run_segment(
status_file: PathBuf,
session_id: String,
) -> Result<SegmentStatus> {
info!(
debug!(
"Starting segment {} in {}",
segment_id,
segment_dir.display()

View File

@@ -3,7 +3,7 @@ use regex::Regex;
use std::io::Write;
use std::process::Command;
use tempfile::NamedTempFile;
use tracing::{debug, error, info};
use tracing::{debug, error};
/// Expand tilde (~) in a path to the user's home directory
fn expand_tilde(path: &str) -> String {
@@ -72,7 +72,7 @@ impl CodeExecutor {
}
for (language, code) in code_blocks {
info!("Executing {} code", language);
debug!("Executing {} code", language);
if show_code {
results.push(format!("📋 Running {} code:", language));
@@ -459,7 +459,7 @@ pub fn is_cargo_llvm_cov_installed() -> Result<bool> {
/// Install llvm-tools-preview via rustup
pub fn install_llvm_tools() -> Result<()> {
info!("Installing llvm-tools-preview...");
debug!("Installing llvm-tools-preview...");
let output = Command::new("rustup")
.args(&["component", "add", "llvm-tools-preview"])
.output()?;
@@ -469,13 +469,13 @@ pub fn install_llvm_tools() -> Result<()> {
anyhow::bail!("Failed to install llvm-tools-preview: {}", stderr);
}
info!("✅ llvm-tools-preview installed successfully");
debug!("✅ llvm-tools-preview installed successfully");
Ok(())
}
/// Install cargo-llvm-cov via cargo install
pub fn install_cargo_llvm_cov() -> Result<()> {
info!("Installing cargo-llvm-cov... (this may take a few minutes)");
debug!("Installing cargo-llvm-cov... (this may take a few minutes)");
let output = Command::new("cargo")
.args(&["install", "cargo-llvm-cov"])
.output()?;
@@ -485,7 +485,7 @@ pub fn install_cargo_llvm_cov() -> Result<()> {
anyhow::bail!("Failed to install cargo-llvm-cov: {}", stderr);
}
info!("✅ cargo-llvm-cov installed successfully");
debug!("✅ cargo-llvm-cov installed successfully");
Ok(())
}
@@ -496,20 +496,20 @@ pub fn ensure_coverage_tools_installed() -> Result<bool> {
// Check and install llvm-tools-preview
if !is_llvm_tools_installed()? {
info!("llvm-tools-preview not found, installing...");
debug!("llvm-tools-preview not found, installing...");
install_llvm_tools()?;
already_installed = false;
} else {
info!("✅ llvm-tools-preview is already installed");
debug!("✅ llvm-tools-preview is already installed");
}
// Check and install cargo-llvm-cov
if !is_cargo_llvm_cov_installed()? {
info!("cargo-llvm-cov not found, installing...");
debug!("cargo-llvm-cov not found, installing...");
install_cargo_llvm_cov()?;
already_installed = false;
} else {
info!("✅ cargo-llvm-cov is already installed");
debug!("✅ cargo-llvm-cov is already installed");
}
Ok(already_installed)

View File

@@ -328,7 +328,7 @@ impl AnthropicProvider {
tracing::debug!("create_request_body called: max_tokens={}, disable_thinking={}, thinking_budget_tokens={:?}", max_tokens, disable_thinking, self.thinking_budget_tokens);
let thinking = if disable_thinking {
tracing::info!(
tracing::debug!(
"Thinking mode explicitly disabled for this request (max_tokens={})",
max_tokens
);

View File

@@ -64,7 +64,7 @@ use serde::{Deserialize, Serialize};
use std::time::Duration;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, error, info, warn};
use tracing::{debug, error, warn};
use crate::{
CompletionChunk, CompletionRequest, CompletionResponse, CompletionStream, LLMProvider, Message,
@@ -166,7 +166,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!(
debug!(
"Initialized Databricks provider with model: {} on host: {}",
model, host
);
@@ -196,7 +196,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!("Initialized Databricks provider '{}' with model: {} on host: {}", name, model, host);
debug!("Initialized Databricks provider '{}' with model: {} on host: {}", name, model, host);
Ok(Self {
client,
@@ -220,7 +220,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!(
debug!(
"Initialized Databricks provider with OAuth for model: {} on host: {}",
model, host
);
@@ -249,7 +249,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!("Initialized Databricks provider '{}' with OAuth for model: {} on host: {}", name, model, host);
debug!("Initialized Databricks provider '{}' with OAuth for model: {} on host: {}", name, model, host);
Ok(Self {
client,
@@ -857,7 +857,7 @@ impl LLMProvider for DatabricksProvider {
if status == reqwest::StatusCode::FORBIDDEN
&& (error_text.contains("Invalid Token") || error_text.contains("invalid_token"))
{
info!("Received 403 Invalid Token error, attempting to refresh OAuth token");
debug!("Received 403 Invalid Token error, attempting to refresh OAuth token");
// Try to refresh the token if we're using OAuth
if let DatabricksAuth::OAuth { .. } = &provider_clone.auth {
@@ -867,7 +867,7 @@ impl LLMProvider for DatabricksProvider {
// Try to get a new token (will attempt refresh or new OAuth flow)
match provider_clone.auth.get_token().await {
Ok(_new_token) => {
info!("Successfully refreshed OAuth token, retrying request");
debug!("Successfully refreshed OAuth token, retrying request");
// Retry the request with the new token
response = provider_clone
@@ -1038,7 +1038,7 @@ impl LLMProvider for DatabricksProvider {
if status == reqwest::StatusCode::FORBIDDEN
&& (error_text.contains("Invalid Token") || error_text.contains("invalid_token"))
{
info!("Received 403 Invalid Token error, attempting to refresh OAuth token");
debug!("Received 403 Invalid Token error, attempting to refresh OAuth token");
// Try to refresh the token if we're using OAuth
if let DatabricksAuth::OAuth { .. } = &provider_clone.auth {
@@ -1048,7 +1048,7 @@ impl LLMProvider for DatabricksProvider {
// Try to get a new token (will attempt refresh or new OAuth flow)
match provider_clone.auth.get_token().await {
Ok(_new_token) => {
info!("Successfully refreshed OAuth token, retrying streaming request");
debug!("Successfully refreshed OAuth token, retrying streaming request");
// Retry the request with the new token
response = provider_clone

View File

@@ -12,7 +12,7 @@ use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, error, info};
use tracing::{debug, error};
pub struct EmbeddedProvider {
session: Arc<Mutex<LlamaSession>>,
@@ -32,7 +32,7 @@ impl EmbeddedProvider {
gpu_layers: Option<u32>,
threads: Option<u32>,
) -> Result<Self> {
info!("Loading embedded model from: {}", model_path);
debug!("Loading embedded model from: {}", model_path);
// Expand tilde in path
let expanded_path = shellexpand::tilde(&model_path);
@@ -41,7 +41,7 @@ impl EmbeddedProvider {
// If model doesn't exist and it's the default Qwen model, offer to download it
if !model_path_buf.exists() {
if model_path.contains("qwen2.5-7b-instruct-q3_k_m.gguf") {
info!("Model file not found. Attempting to download Qwen 2.5 7B model...");
debug!("Model file not found. Attempting to download Qwen 2.5 7B model...");
Self::download_qwen_model(&model_path_buf)?;
} else {
anyhow::bail!("Model file not found: {}", model_path_buf.display());
@@ -55,14 +55,14 @@ impl EmbeddedProvider {
if let Some(gpu_layers) = gpu_layers {
params.n_gpu_layers = gpu_layers;
info!("Using {} GPU layers", gpu_layers);
debug!("Using {} GPU layers", gpu_layers);
}
let context_size = context_length.unwrap_or(4096);
info!("Using context length: {}", context_size);
debug!("Using context length: {}", context_size);
// Load the model
info!("Loading model...");
debug!("Loading model...");
let model = LlamaModel::load_from_file(model_path, params)
.map_err(|e| anyhow::anyhow!("Failed to load model: {}", e))?;
@@ -79,7 +79,7 @@ impl EmbeddedProvider {
.create_session(session_params)
.map_err(|e| anyhow::anyhow!("Failed to create session: {}", e))?;
info!("Successfully loaded {} model", model_type);
debug!("Successfully loaded {} model", model_type);
Ok(Self {
session: Arc::new(Mutex::new(session)),
@@ -330,7 +330,7 @@ impl EmbeddedProvider {
Ok(inner_result) => match inner_result {
Ok(task_result) => match task_result {
Ok((text, token_count)) => {
info!(
debug!(
"Completed generation: {} tokens (dynamic limit was {})",
token_count, dynamic_max_tokens
);
@@ -448,9 +448,9 @@ impl EmbeddedProvider {
fs::create_dir_all(parent)?;
}
info!("Downloading Qwen 2.5 7B model (Q3_K_M quantization, ~3.5GB)...");
info!("This is a one-time download that may take several minutes depending on your connection.");
info!("Downloading to: {}", model_path.display());
debug!("Downloading Qwen 2.5 7B model (Q3_K_M quantization, ~3.5GB)...");
debug!("This is a one-time download that may take several minutes depending on your connection.");
debug!("Downloading to: {}", model_path.display());
// Use curl with progress bar for download
let output = Command::new("curl")
@@ -497,7 +497,7 @@ impl EmbeddedProvider {
);
}
info!("Successfully downloaded Qwen 2.5 7B model ({}MB)", size_mb);
debug!("Successfully downloaded Qwen 2.5 7B model ({}MB)", size_mb);
Ok(())
}
}

View File

@@ -392,7 +392,7 @@ pub async fn get_oauth_token_async(
if let Err(e) = token_cache.save_token(&new_token) {
tracing::warn!("Failed to save refreshed token: {}", e);
}
tracing::info!("Successfully refreshed token");
tracing::debug!("Successfully refreshed token");
return Ok(new_token.access_token);
}
Err(e) => {