refactor: mover logs verbose de info! para trace!
All checks were successful
BotServer CI/CD / build (push) Successful in 3m25s

Move logs detalhados de LLM e DriveMonitor de info! para trace!
para reduzir poluição nos logs de produção:

- bot/mod.rs: LLM chunk logs, streaming start, abort
- llm/mod.rs: LLM Request Details, provider creation logs

Estes logs são úteis para debug mas geram muito ruído em produção.
Com trace! só aparecem quando RUST_LOG=trace está configurado.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2026-04-15 12:41:31 -03:00
parent d1cd7513d7
commit adbf84f812
2 changed files with 28 additions and 28 deletions

View file

@ -788,11 +788,11 @@ impl BotOrchestrator {
// Use bot-specific LLM provider if the bot has its own llm-url configured
let llm: std::sync::Arc<dyn crate::llm::LLMProvider> = if let Some(ref url) = bot_llm_url {
info!("Bot has custom llm-url: {}, creating per-bot LLM provider", url);
trace!("Bot has custom llm-url: {}, creating per-bot LLM provider", url);
// Parse explicit provider type if configured (e.g., "openai", "bedrock", "claude")
let explicit_type = explicit_llm_provider.as_ref().map(|p| {
let parsed: crate::llm::LLMProviderType = p.as_str().into();
info!("Using explicit llm-provider config: {:?} for bot {}", parsed, session.bot_id);
trace!("Using explicit llm-provider config: {:?} for bot {}", parsed, session.bot_id);
parsed
});
crate::llm::create_llm_provider_from_url(url, Some(model.clone()), bot_endpoint_path, explicit_type)
@ -849,13 +849,13 @@ impl BotOrchestrator {
}
});
// Wait for cancellation to abort LLM task
tokio::spawn(async move {
if cancel_rx_for_abort.recv().await.is_ok() {
info!("Aborting LLM task for session {}", session_id_str);
llm_task.abort();
}
});
// Wait for cancellation to abort LLM task
tokio::spawn(async move {
if cancel_rx_for_abort.recv().await.is_ok() {
trace!("Aborting LLM task for session {}", session_id_str);
llm_task.abort();
}
});
let mut full_response = String::new();
let mut analysis_buffer = String::new();
@ -866,7 +866,7 @@ impl BotOrchestrator {
let _handler = llm_models::get_handler(&model);
trace!("Using model handler for {}", model);
info!("LLM streaming started for session {}", session.id);
trace!("LLM streaming started for session {}", session.id);
trace!("Receiving LLM stream chunks...");
let mut chunk_count: usize = 0;
@ -903,10 +903,10 @@ while let Some(chunk) = stream_rx.recv().await {
Err(broadcast::error::TryRecvError::Lagged(_)) => {}
}
chunk_count += 1;
if chunk_count <= 3 || chunk_count % 50 == 0 {
info!("LLM chunk #{chunk_count} received for session {} (len={})", session.id, chunk.len());
}
chunk_count += 1;
if chunk_count <= 3 || chunk_count % 50 == 0 {
trace!("LLM chunk #{} received for session {} (len={})", chunk_count, session.id, chunk.len());
}
// ===== GENERIC TOOL EXECUTION =====
// Add chunk to tool_call_buffer and try to parse

View file

@ -1,6 +1,6 @@
use async_trait::async_trait;
use futures::StreamExt;
use log::{error, info};
use log::{error, info, trace};
use serde_json::Value;
use std::sync::Arc;
use tokio::sync::{mpsc, RwLock};
@ -291,15 +291,15 @@ impl LLMProvider for OpenAIClient {
let auth_header = format!("Bearer {}", key);
// Debug logging to help troubleshoot 401 errors
info!("LLM Request Details:");
info!(" URL: {}", full_url);
info!(" Authorization: Bearer <{} chars>", key.len());
info!(" Model: {}", model);
trace!("LLM Request Details:");
trace!(" URL: {}", full_url);
trace!(" Authorization: Bearer <{} chars>", key.len());
trace!(" Model: {}", model);
if let Some(msg_array) = messages.as_array() {
info!(" Messages: {} messages", msg_array.len());
trace!(" Messages: {} messages", msg_array.len());
}
info!(" API Key First 8 chars: '{}...'", &key.chars().take(8).collect::<String>());
info!(" API Key Last 8 chars: '...{}'", &key.chars().rev().take(8).collect::<String>());
trace!(" API Key First 8 chars: '{}...'", &key.chars().take(8).collect::<String>());
trace!(" API Key Last 8 chars: '...{}'", &key.chars().rev().take(8).collect::<String>());
// Build the request body (no tools for non-streaming generate)
let response = self
@ -381,15 +381,15 @@ impl LLMProvider for OpenAIClient {
let auth_header = format!("Bearer {}", key);
// Debug logging to help troubleshoot 401 errors
info!("LLM Request Details:");
info!(" URL: {}", full_url);
info!(" Authorization: Bearer <{} chars>", key.len());
info!(" Model: {}", model);
trace!("LLM Request Details:");
trace!(" URL: {}", full_url);
trace!(" Authorization: Bearer <{} chars>", key.len());
trace!(" Model: {}", model);
if let Some(msg_array) = messages.as_array() {
info!(" Messages: {} messages", msg_array.len());
trace!(" Messages: {} messages", msg_array.len());
}
if let Some(tools) = tools {
info!(" Tools: {} tools provided", tools.len());
trace!(" Tools: {} tools provided", tools.len());
}
// Build the request body - include tools if provided