botserver/src/basic/keywords/llm_keyword.rs
Rodrigo Rodriguez 5ea171d126
Some checks failed
BotServer CI / build (push) Failing after 1m34s
Refactor: Split large files into modular subdirectories
Split 20+ files over 1000 lines into focused subdirectories for better
maintainability and code organization. All changes maintain backward
compatibility through re-export wrappers.

Major splits:
- attendance/llm_assist.rs (2074→7 modules)
- basic/keywords/face_api.rs → face_api/ (7 modules)
- basic/keywords/file_operations.rs → file_ops/ (8 modules)
- basic/keywords/hear_talk.rs → hearing/ (6 modules)
- channels/wechat.rs → wechat/ (10 modules)
- channels/youtube.rs → youtube/ (5 modules)
- contacts/mod.rs → contacts_api/ (6 modules)
- core/bootstrap/mod.rs → bootstrap/ (5 modules)
- core/shared/admin.rs → admin_*.rs (5 modules)
- designer/canvas.rs → canvas_api/ (6 modules)
- designer/mod.rs → designer_api/ (6 modules)
- docs/handlers.rs → handlers_api/ (11 modules)
- drive/mod.rs → drive_handlers.rs, drive_types.rs
- learn/mod.rs → types.rs
- main.rs → main_module/ (7 modules)
- meet/webinar.rs → webinar_api/ (8 modules)
- paper/mod.rs → (10 modules)
- security/auth.rs → auth_api/ (7 modules)
- security/passkey.rs → (4 modules)
- sources/mod.rs → sources_api/ (5 modules)
- tasks/mod.rs → task_api/ (5 modules)

Stats: 38,040 deletions, 1,315 additions across 318 files

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-02-12 21:09:30 +00:00

83 lines
3.3 KiB
Rust

use crate::core::shared::models::UserSession;
use crate::core::shared::state::AppState;
use log::error;
use rhai::{Dynamic, Engine};
use std::sync::Arc;
use std::time::Duration;
use uuid::Uuid;
pub fn llm_keyword(state: Arc<AppState>, _user: UserSession, engine: &mut Engine) {
let state_clone = Arc::clone(&state);
engine
.register_custom_syntax(["LLM", "$expr$"], false, move |context, inputs| {
let first_input = inputs.first().ok_or_else(|| {
Box::new(rhai::EvalAltResult::ErrorRuntime(
"LLM requires at least one input".into(),
rhai::Position::NONE,
))
})?;
let text = context
.eval_expression_tree(first_input)?
.to_string();
let state_for_thread = Arc::clone(&state_clone);
let prompt = build_llm_prompt(&text);
let (tx, rx) = std::sync::mpsc::channel();
std::thread::spawn(move || {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build();
let send_err = if let Ok(rt) = rt {
let result = rt.block_on(async move {
execute_llm_generation(state_for_thread, prompt).await
});
tx.send(result).err()
} else {
tx.send(Err("failed to build tokio runtime".into())).err()
};
if send_err.is_some() {
error!("Failed to send LLM thread result");
}
});
match rx.recv_timeout(Duration::from_secs(500)) {
Ok(Ok(result)) => Ok(Dynamic::from(result)),
Ok(Err(e)) => Err(Box::new(rhai::EvalAltResult::ErrorRuntime(
e.to_string().into(),
rhai::Position::NONE,
))),
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
Err(Box::new(rhai::EvalAltResult::ErrorRuntime(
"LLM generation timed out".into(),
rhai::Position::NONE,
)))
}
Err(e) => Err(Box::new(rhai::EvalAltResult::ErrorRuntime(
format!("LLM thread failed: {e}").into(),
rhai::Position::NONE,
))),
}
})
.expect("valid syntax registration");
}
fn build_llm_prompt(user_text: &str) -> String {
user_text.trim().to_string()
}
pub async fn execute_llm_generation(
state: Arc<AppState>,
prompt: String,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone());
let model = config_manager
.get_config(&Uuid::nil(), "llm-model", None)
.unwrap_or_default();
let key = config_manager
.get_config(&Uuid::nil(), "llm-key", None)
.unwrap_or_default();
let handler = crate::llm::llm_models::get_handler(&model);
let raw_response = state
.llm_provider
.generate(&prompt, &serde_json::Value::Null, &model, &key)
.await?;
let processed = handler.process_content(&raw_response);
Ok(processed)
}