Fix: Handle reasoning_content from NVIDIA reasoning models (gpt-oss-120b)
All checks were successful
BotServer CI/CD / build (push) Successful in 3m16s

This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2026-04-11 22:30:39 -03:00
parent 7a1ec157f1
commit 47cb470c8e

View file

@ -457,7 +457,10 @@ impl LLMProvider for OpenAIClient {
for line in chunk_str.lines() { for line in chunk_str.lines() {
if line.starts_with("data: ") && !line.contains("[DONE]") { if line.starts_with("data: ") && !line.contains("[DONE]") {
if let Ok(data) = serde_json::from_str::<Value>(&line[6..]) { if let Ok(data) = serde_json::from_str::<Value>(&line[6..]) {
if let Some(content) = data["choices"][0]["delta"]["content"].as_str() { // Handle content (standard) or reasoning_content (NVIDIA reasoning models)
let content = data["choices"][0]["delta"]["content"].as_str()
.or_else(|| data["choices"][0]["delta"]["reasoning_content"].as_str());
if let Some(content) = content {
let processed = handler.process_content(content); let processed = handler.process_content(content);
if !processed.is_empty() { if !processed.is_empty() {
let _ = tx.send(processed).await; let _ = tx.send(processed).await;