Convert all INFO logs to DEBUG to reduce CLI noise

Converted ~77 info! macro calls to debug! across the codebase to prevent
log messages from interrupting the CLI experience during normal operation.
Users can still see these logs by setting RUST_LOG=debug if needed.

Affected crates:
- g3-cli
- g3-computer-control
- g3-console
- g3-core
- g3-ensembles
- g3-execution
- g3-providers
This commit is contained in:
Dhanji R. Prasanna
2025-12-22 16:27:35 +11:00
parent 58cbf3431a
commit 923def0ab2
19 changed files with 92 additions and 92 deletions

View File

@@ -328,7 +328,7 @@ impl AnthropicProvider {
tracing::debug!("create_request_body called: max_tokens={}, disable_thinking={}, thinking_budget_tokens={:?}", max_tokens, disable_thinking, self.thinking_budget_tokens);
let thinking = if disable_thinking {
tracing::info!(
tracing::debug!(
"Thinking mode explicitly disabled for this request (max_tokens={})",
max_tokens
);

View File

@@ -64,7 +64,7 @@ use serde::{Deserialize, Serialize};
use std::time::Duration;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, error, info, warn};
use tracing::{debug, error, warn};
use crate::{
CompletionChunk, CompletionRequest, CompletionResponse, CompletionStream, LLMProvider, Message,
@@ -166,7 +166,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!(
debug!(
"Initialized Databricks provider with model: {} on host: {}",
model, host
);
@@ -196,7 +196,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!("Initialized Databricks provider '{}' with model: {} on host: {}", name, model, host);
debug!("Initialized Databricks provider '{}' with model: {} on host: {}", name, model, host);
Ok(Self {
client,
@@ -220,7 +220,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!(
debug!(
"Initialized Databricks provider with OAuth for model: {} on host: {}",
model, host
);
@@ -249,7 +249,7 @@ impl DatabricksProvider {
.build()
.map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
info!("Initialized Databricks provider '{}' with OAuth for model: {} on host: {}", name, model, host);
debug!("Initialized Databricks provider '{}' with OAuth for model: {} on host: {}", name, model, host);
Ok(Self {
client,
@@ -857,7 +857,7 @@ impl LLMProvider for DatabricksProvider {
if status == reqwest::StatusCode::FORBIDDEN
&& (error_text.contains("Invalid Token") || error_text.contains("invalid_token"))
{
info!("Received 403 Invalid Token error, attempting to refresh OAuth token");
debug!("Received 403 Invalid Token error, attempting to refresh OAuth token");
// Try to refresh the token if we're using OAuth
if let DatabricksAuth::OAuth { .. } = &provider_clone.auth {
@@ -867,7 +867,7 @@ impl LLMProvider for DatabricksProvider {
// Try to get a new token (will attempt refresh or new OAuth flow)
match provider_clone.auth.get_token().await {
Ok(_new_token) => {
info!("Successfully refreshed OAuth token, retrying request");
debug!("Successfully refreshed OAuth token, retrying request");
// Retry the request with the new token
response = provider_clone
@@ -1038,7 +1038,7 @@ impl LLMProvider for DatabricksProvider {
if status == reqwest::StatusCode::FORBIDDEN
&& (error_text.contains("Invalid Token") || error_text.contains("invalid_token"))
{
info!("Received 403 Invalid Token error, attempting to refresh OAuth token");
debug!("Received 403 Invalid Token error, attempting to refresh OAuth token");
// Try to refresh the token if we're using OAuth
if let DatabricksAuth::OAuth { .. } = &provider_clone.auth {
@@ -1048,7 +1048,7 @@ impl LLMProvider for DatabricksProvider {
// Try to get a new token (will attempt refresh or new OAuth flow)
match provider_clone.auth.get_token().await {
Ok(_new_token) => {
info!("Successfully refreshed OAuth token, retrying streaming request");
debug!("Successfully refreshed OAuth token, retrying streaming request");
// Retry the request with the new token
response = provider_clone

View File

@@ -12,7 +12,7 @@ use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, error, info};
use tracing::{debug, error};
pub struct EmbeddedProvider {
session: Arc<Mutex<LlamaSession>>,
@@ -32,7 +32,7 @@ impl EmbeddedProvider {
gpu_layers: Option<u32>,
threads: Option<u32>,
) -> Result<Self> {
info!("Loading embedded model from: {}", model_path);
debug!("Loading embedded model from: {}", model_path);
// Expand tilde in path
let expanded_path = shellexpand::tilde(&model_path);
@@ -41,7 +41,7 @@ impl EmbeddedProvider {
// If model doesn't exist and it's the default Qwen model, offer to download it
if !model_path_buf.exists() {
if model_path.contains("qwen2.5-7b-instruct-q3_k_m.gguf") {
info!("Model file not found. Attempting to download Qwen 2.5 7B model...");
debug!("Model file not found. Attempting to download Qwen 2.5 7B model...");
Self::download_qwen_model(&model_path_buf)?;
} else {
anyhow::bail!("Model file not found: {}", model_path_buf.display());
@@ -55,14 +55,14 @@ impl EmbeddedProvider {
if let Some(gpu_layers) = gpu_layers {
params.n_gpu_layers = gpu_layers;
info!("Using {} GPU layers", gpu_layers);
debug!("Using {} GPU layers", gpu_layers);
}
let context_size = context_length.unwrap_or(4096);
info!("Using context length: {}", context_size);
debug!("Using context length: {}", context_size);
// Load the model
info!("Loading model...");
debug!("Loading model...");
let model = LlamaModel::load_from_file(model_path, params)
.map_err(|e| anyhow::anyhow!("Failed to load model: {}", e))?;
@@ -79,7 +79,7 @@ impl EmbeddedProvider {
.create_session(session_params)
.map_err(|e| anyhow::anyhow!("Failed to create session: {}", e))?;
info!("Successfully loaded {} model", model_type);
debug!("Successfully loaded {} model", model_type);
Ok(Self {
session: Arc::new(Mutex::new(session)),
@@ -330,7 +330,7 @@ impl EmbeddedProvider {
Ok(inner_result) => match inner_result {
Ok(task_result) => match task_result {
Ok((text, token_count)) => {
info!(
debug!(
"Completed generation: {} tokens (dynamic limit was {})",
token_count, dynamic_max_tokens
);
@@ -448,9 +448,9 @@ impl EmbeddedProvider {
fs::create_dir_all(parent)?;
}
info!("Downloading Qwen 2.5 7B model (Q3_K_M quantization, ~3.5GB)...");
info!("This is a one-time download that may take several minutes depending on your connection.");
info!("Downloading to: {}", model_path.display());
debug!("Downloading Qwen 2.5 7B model (Q3_K_M quantization, ~3.5GB)...");
debug!("This is a one-time download that may take several minutes depending on your connection.");
debug!("Downloading to: {}", model_path.display());
// Use curl with progress bar for download
let output = Command::new("curl")
@@ -497,7 +497,7 @@ impl EmbeddedProvider {
);
}
info!("Successfully downloaded Qwen 2.5 7B model ({}MB)", size_mb);
debug!("Successfully downloaded Qwen 2.5 7B model ({}MB)", size_mb);
Ok(())
}
}

View File

@@ -392,7 +392,7 @@ pub async fn get_oauth_token_async(
if let Err(e) = token_cache.save_token(&new_token) {
tracing::warn!("Failed to save refreshed token: {}", e);
}
tracing::info!("Successfully refreshed token");
tracing::debug!("Successfully refreshed token");
return Ok(new_token.access_token);
}
Err(e) => {