Fixing repo integration

This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2026-04-19 20:50:52 -03:00
parent cc1b805c38
commit a239227aa1
73 changed files with 2747 additions and 5770 deletions

5745
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -2,8 +2,6 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
"botapp",
"botdevice",
"botlib", "botlib",
"botserver", "botserver",
"bottest", "bottest",
@ -65,7 +63,7 @@ tower = "0.4"
tower-http = { version = "0.6", default-features = false } tower-http = { version = "0.6", default-features = false }
tower-cookies = "0.10" tower-cookies = "0.10"
hyper = { version = "1.4", default-features = false } hyper = { version = "1.4", default-features = false }
hyper-rustls = { version = "0.27", default-features = false } hyper-rustls = { version = "0.28", default-features = false }
hyper-util = { version = "0.1.19", default-features = false } hyper-util = { version = "0.1.19", default-features = false }
http-body-util = "0.1.3" http-body-util = "0.1.3"
@ -96,7 +94,7 @@ rustls = { version = "0.23", default-features = false }
rcgen = { version = "0.14", default-features = false } rcgen = { version = "0.14", default-features = false }
x509-parser = "0.15" x509-parser = "0.15"
rustls-native-certs = "0.8" rustls-native-certs = "0.8"
webpki-roots = "0.25" webpki-roots = "0.26"
native-tls = "0.2" native-tls = "0.2"
# ─── REGEX / TEXT ─── # ─── REGEX / TEXT ───
@ -151,16 +149,13 @@ bigdecimal = { version = "0.4", features = ["serde"] }
# ─── UTILITIES ─── # ─── UTILITIES ───
bytes = "1.8" bytes = "1.8"
# ─── CLOUD / AWS ─── # ─── OBJECT STORAGE (S3 compatible) ───
aws-config = { version = "1.8.8", default-features = false } rust-s3 = "0.37.1"
aws-sdk-s3 = { version = "1.120", default-features = false }
aws-smithy-async = { version = "1.2", features = ["rt-tokio"] }
# ─── SCRIPTING ─── # ─── SCRIPTING ───
rhai = { version = "1.23", features = ["sync"] } rhai = { version = "1.23", features = ["sync"] }
# ─── VECTOR DB ─── # ─── VECTOR DB ───
qdrant-client = "1.16"
# ─── VIDEO / MEETINGS ─── # ─── VIDEO / MEETINGS ───
livekit = "0.7" livekit = "0.7"
@ -174,8 +169,7 @@ ratatui = "0.30"
indicatif = "0.18.0" indicatif = "0.18.0"
# ─── MEMORY ALLOCATOR ─── # ─── MEMORY ALLOCATOR ───
tikv-jemallocator = "0.6" mimalloc = "0.1"
tikv-jemalloc-ctl = { version = "0.6", default-features = false, features = ["stats"] }
# ─── SECRETS / VAULT ─── # ─── SECRETS / VAULT ───
vaultrs = "0.7" vaultrs = "0.7"
@ -200,14 +194,6 @@ tonic = { version = "0.14.2", default-features = false }
rust-embed = { version = "8.5", features = ["interpolate-folder-path"] } rust-embed = { version = "8.5", features = ["interpolate-folder-path"] }
mime_guess = "2.0" mime_guess = "2.0"
# ─── TAURI (Desktop/Mobile) ───
tauri = { version = "2", features = ["unstable"] }
tauri-build = "2"
tauri-plugin-dialog = "2"
tauri-plugin-opener = "2"
tauri-plugin-notification = "2"
tauri-plugin-http = "2"
tauri-plugin-geolocation = "2"
# ─── TESTING ─── # ─── TESTING ───
mockito = "1.7.0" mockito = "1.7.0"
@ -236,8 +222,6 @@ libc = "0.2"
trayicon = "0.2" trayicon = "0.2"
# ═══════════════════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════════════════
# PROFILES # PROFILES
# ═══════════════════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════════════════
@ -268,3 +252,4 @@ debug = 1
incremental = true incremental = true
codegen-units = 32 codegen-units = 32
opt-level = 0 opt-level = 0

View file

@ -12,10 +12,16 @@ categories = ["gui", "network-programming"]
# Core from botlib # Core from botlib
botlib = { workspace = true, features = ["http-client"] } botlib = { workspace = true, features = ["http-client"] }
# Tauri
tauri = { workspace = true, features = ["tray-icon", "image"] } # ─── TAURI (Desktop/Mobile) ───
tauri-plugin-dialog = { workspace = true } tauri = { version = "2", features = ["tray-icon", "image"] }
tauri-plugin-opener = { workspace = true } tauri = { features = ["unstable"] }
tauri-build = "2"
tauri-plugin-dialog = "2"
tauri-plugin-opener = "2"
tauri-plugin-notification = "2"
tauri-plugin-http = "2"
tauri-plugin-geolocation = "2"
# Common # Common
anyhow = { workspace = true } anyhow = { workspace = true }

View file

@ -17,18 +17,18 @@ default = ["chat", "automation", "cache", "llm", "vectordb", "crawler", "drive",
# Build with: cargo build --no-default-features --features "no-security,chat,llm" # Build with: cargo build --no-default-features --features "no-security,chat,llm"
no-security = [] no-security = []
browser = ["automation", "drive", "cache"] browser = ["automation", "cache"]
terminal = ["automation", "drive", "cache"] terminal = ["automation", "cache"]
external_sync = ["automation", "drive", "cache"] external_sync = ["automation", "cache"]
# ===== CORE INFRASTRUCTURE (Can be used standalone) ===== # ===== CORE INFRASTRUCTURE (Can be used standalone) =====
scripting = ["dep:rhai"] scripting = ["dep:rhai"]
automation = ["scripting", "dep:cron"] automation = ["scripting", "dep:cron"]
drive = ["dep:aws-config", "dep:aws-sdk-s3", "dep:aws-smithy-async", "dep:pdf-extract"] drive = ["dep:pdf-extract", "dep:rust-s3"]
cache = ["dep:redis"] cache = ["dep:redis"]
directory = ["rbac"] directory = ["rbac"]
rbac = [] rbac = []
crawler = ["drive", "cache"] crawler = ["cache"]
# ===== APPS (Each includes what it needs from core) ===== # ===== APPS (Each includes what it needs from core) =====
# Communication # Communication
@ -84,14 +84,14 @@ instagram = ["automation", "drive", "cache"]
msteams = ["automation", "drive", "cache"] msteams = ["automation", "drive", "cache"]
# Core Tech # Core Tech
llm = ["automation", "cache"] llm = ["automation", "cache"]
vectordb = ["automation", "drive", "cache", "dep:qdrant-client"] vectordb = ["automation", "drive", "cache"]
nvidia = ["automation", "drive", "cache"] nvidia = ["automation", "drive", "cache"]
compliance = ["automation", "drive", "cache", "dep:csv"] compliance = ["automation", "drive", "cache", "dep:csv"]
timeseries = ["automation", "drive", "cache"] timeseries = ["automation", "drive", "cache"]
weba = ["automation", "drive", "cache"] weba = ["automation", "drive", "cache"]
progress-bars = ["automation", "drive", "cache", "dep:indicatif"] progress-bars = ["automation", "drive", "cache", "dep:indicatif"]
grpc = ["automation", "drive", "cache"] grpc = ["automation", "drive", "cache"]
jemalloc = ["automation", "drive", "cache", "dep:tikv-jemallocator", "dep:tikv-jemalloc-ctl"] jemalloc = ["automation", "drive", "cache", "dep:mimalloc"]
console = ["automation", "drive", "cache", "dep:crossterm", "dep:ratatui"] console = ["automation", "drive", "cache", "dep:crossterm", "dep:ratatui"]
# ===== BUNDLES (Optional - for convenience) ===== # ===== BUNDLES (Optional - for convenience) =====
@ -121,7 +121,7 @@ dirs = { workspace = true }
dotenvy = { workspace = true } dotenvy = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
futures-util = { workspace = true } futures-util = { workspace = true }
git2 = "0.19" git2 = "0.20"
hex = { workspace = true } hex = { workspace = true }
hmac = { workspace = true } hmac = { workspace = true }
log = { workspace = true } log = { workspace = true }
@ -160,7 +160,7 @@ lettre = { workspace = true, optional = true }
mailparse = { workspace = true, optional = true } mailparse = { workspace = true, optional = true }
# Vector Database (vectordb feature) # Vector Database (vectordb feature)
qdrant-client = { workspace = true, optional = true }
# Document Processing # Document Processing
docx-rs = { workspace = true, optional = true } docx-rs = { workspace = true, optional = true }
@ -170,14 +170,13 @@ rust_xlsxwriter = { workspace = true, optional = true }
umya-spreadsheet = { workspace = true, optional = true } umya-spreadsheet = { workspace = true, optional = true }
# File Storage & Drive (drive feature) # File Storage & Drive (drive feature)
aws-config = { workspace = true, features = ["behavior-version-latest", "rt-tokio", "rustls"], optional = true } # minio removed - use rust-s3 via S3Repository instead
aws-sdk-s3 = { workspace = true, features = ["rt-tokio", "rustls"], optional = true }
aws-smithy-async = { workspace = true, optional = true }
pdf-extract = { workspace = true, optional = true } pdf-extract = { workspace = true, optional = true }
quick-xml = { workspace = true, optional = true } quick-xml = { workspace = true, optional = true }
flate2 = { workspace = true } flate2 = { workspace = true }
zip = { workspace = true } zip = { workspace = true }
tar = { workspace = true } tar = { workspace = true }
rust-s3 = { workspace = true, optional = true }
# Task Management (tasks feature) # Task Management (tasks feature)
cron = { workspace = true, optional = true } cron = { workspace = true, optional = true }
@ -210,8 +209,7 @@ indicatif = { workspace = true, optional = true }
smartstring = { workspace = true } smartstring = { workspace = true }
# Memory allocator (jemalloc feature) # Memory allocator (jemalloc feature)
tikv-jemallocator = { workspace = true, optional = true } mimalloc = { workspace = true, optional = true }
tikv-jemalloc-ctl = { workspace = true, optional = true }
scopeguard = { workspace = true } scopeguard = { workspace = true }
# Vault secrets management # Vault secrets management

View file

@ -1,7 +1,5 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use aws_config::BehaviorVersion; use crate::drive::s3_repository::S3Repository;
use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::Client;
use chrono::TimeZone; use chrono::TimeZone;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::path::PathBuf; use std::path::PathBuf;
@ -12,7 +10,9 @@ pub struct AttendanceDriveConfig {
pub bucket_name: String, pub bucket_name: String,
pub prefix: String, pub prefix: String,
pub sync_enabled: bool, pub sync_enabled: bool,
pub region: Option<String>, pub endpoint: Option<String>,
pub access_key: Option<String>,
pub secret_key: Option<String>,
} }
impl Default for AttendanceDriveConfig { impl Default for AttendanceDriveConfig {
@ -21,7 +21,9 @@ impl Default for AttendanceDriveConfig {
bucket_name: "attendance".to_string(), bucket_name: "attendance".to_string(),
prefix: "records/".to_string(), prefix: "records/".to_string(),
sync_enabled: true, sync_enabled: true,
region: None, endpoint: None,
access_key: None,
secret_key: None,
} }
} }
} }
@ -29,26 +31,22 @@ impl Default for AttendanceDriveConfig {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct AttendanceDriveService { pub struct AttendanceDriveService {
config: AttendanceDriveConfig, config: AttendanceDriveConfig,
client: Client, client: S3Repository,
} }
impl AttendanceDriveService { impl AttendanceDriveService {
pub async fn new(config: AttendanceDriveConfig) -> Result<Self> { pub async fn new(config: AttendanceDriveConfig) -> Result<Self> {
let sdk_config = if let Some(region) = &config.region { let endpoint = config.endpoint.as_deref().unwrap_or("http://localhost:9100");
aws_config::defaults(BehaviorVersion::latest()) let access_key = config.access_key.as_deref().unwrap_or("minioadmin");
.region(aws_config::Region::new(region.clone())) let secret_key = config.secret_key.as_deref().unwrap_or("minioadmin");
.load()
.await
} else {
aws_config::defaults(BehaviorVersion::latest()).load().await
};
let client = Client::new(&sdk_config); let client = S3Repository::new(endpoint, access_key, secret_key, &config.bucket_name)
.map_err(|e| anyhow!("Failed to create S3 repository: {}", e))?;
Ok(Self { config, client }) Ok(Self { config, client })
} }
pub fn with_client(config: AttendanceDriveConfig, client: Client) -> Self { pub fn with_client(config: AttendanceDriveConfig, client: S3Repository) -> Self {
Self { config, client } Self { config, client }
} }
@ -61,20 +59,11 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Uploading attendance record {} to s3://{}/{}", "Uploading attendance record {} to s3://{}/{}",
record_id, record_id, self.config.bucket_name, key
self.config.bucket_name,
key
); );
let body = ByteStream::from(data);
self.client self.client
.put_object() .put_object(&self.config.bucket_name, &key, data, Some("application/octet-stream"))
.bucket(&self.config.bucket_name)
.key(&key)
.body(body)
.content_type("application/octet-stream")
.send()
.await .await
.map_err(|e| anyhow!("Failed to upload attendance record: {}", e))?; .map_err(|e| anyhow!("Failed to upload attendance record: {}", e))?;
@ -87,28 +76,16 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Downloading attendance record {} from s3://{}/{}", "Downloading attendance record {} from s3://{}/{}",
record_id, record_id, self.config.bucket_name, key
self.config.bucket_name,
key
); );
let result = self let data = self.client
.client .get_object(&self.config.bucket_name, &key)
.get_object()
.bucket(&self.config.bucket_name)
.key(&key)
.send()
.await .await
.map_err(|e| anyhow!("Failed to download attendance record: {}", e))?; .map_err(|e| anyhow!("Failed to download attendance record: {}", e))?;
let data = result
.body
.collect()
.await
.map_err(|e| anyhow!("Failed to read attendance record body: {}", e))?;
log::debug!("Successfully downloaded attendance record {}", record_id); log::debug!("Successfully downloaded attendance record {}", record_id);
Ok(data.into_bytes().to_vec()) Ok(data)
} }
pub async fn list_records(&self, prefix: Option<&str>) -> Result<Vec<String>> { pub async fn list_records(&self, prefix: Option<&str>) -> Result<Vec<String>> {
@ -120,46 +97,18 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Listing attendance records in s3://{}/{}", "Listing attendance records in s3://{}/{}",
self.config.bucket_name, self.config.bucket_name, list_prefix
list_prefix
); );
let mut records = Vec::new(); let keys = self.client
let mut continuation_token = None; .list_objects(&self.config.bucket_name, Some(&list_prefix))
loop {
let mut request = self
.client
.list_objects_v2()
.bucket(&self.config.bucket_name)
.prefix(&list_prefix)
.max_keys(1000);
if let Some(token) = continuation_token {
request = request.continuation_token(token);
}
let result = request
.send()
.await .await
.map_err(|e| anyhow!("Failed to list attendance records: {}", e))?; .map_err(|e| anyhow!("Failed to list attendance records: {}", e))?;
if let Some(contents) = result.contents { let records: Vec<String> = keys
for obj in contents { .iter()
if let Some(key) = obj.key { .filter_map(|key| key.strip_prefix(&self.config.prefix).map(|s| s.to_string()))
if let Some(record_id) = key.strip_prefix(&self.config.prefix) { .collect();
records.push(record_id.to_string());
}
}
}
}
if result.is_truncated.unwrap_or(false) {
continuation_token = result.next_continuation_token;
} else {
break;
}
}
log::debug!("Found {} attendance records", records.len()); log::debug!("Found {} attendance records", records.len());
Ok(records) Ok(records)
@ -170,16 +119,11 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Deleting attendance record {} from s3://{}/{}", "Deleting attendance record {} from s3://{}/{}",
record_id, record_id, self.config.bucket_name, key
self.config.bucket_name,
key
); );
self.client self.client
.delete_object() .delete_object(&self.config.bucket_name, &key)
.bucket(&self.config.bucket_name)
.key(&key)
.send()
.await .await
.map_err(|e| anyhow!("Failed to delete attendance record: {}", e))?; .map_err(|e| anyhow!("Failed to delete attendance record: {}", e))?;
@ -194,65 +138,26 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Batch deleting {} attendance records from bucket {}", "Batch deleting {} attendance records from bucket {}",
record_ids.len(), record_ids.len(), self.config.bucket_name
self.config.bucket_name
); );
for chunk in record_ids.chunks(1000) { let keys: Vec<String> = record_ids.iter().map(|id| self.get_record_key(id)).collect();
let objects: Vec<_> = chunk
.iter()
.map(|id| {
aws_sdk_s3::types::ObjectIdentifier::builder()
.key(self.get_record_key(id))
.build()
.map_err(|e| anyhow!("Failed to build object identifier: {}", e))
})
.collect::<Result<Vec<_>>>()?;
let delete = aws_sdk_s3::types::Delete::builder()
.set_objects(Some(objects))
.build()
.map_err(|e| anyhow!("Failed to build delete request: {}", e))?;
self.client self.client
.delete_objects() .delete_objects(&self.config.bucket_name, keys)
.bucket(&self.config.bucket_name)
.delete(delete)
.send()
.await .await
.map_err(|e| anyhow!("Failed to batch delete attendance records: {}", e))?; .map_err(|e| anyhow!("Failed to batch delete attendance records: {}", e))?;
}
log::debug!( log::debug!("Successfully batch deleted {} attendance records", record_ids.len());
"Successfully batch deleted {} attendance records",
record_ids.len()
);
Ok(()) Ok(())
} }
pub async fn record_exists(&self, record_id: &str) -> Result<bool> { pub async fn record_exists(&self, record_id: &str) -> Result<bool> {
let key = self.get_record_key(record_id); let key = self.get_record_key(record_id);
self.client
match self .object_exists(&self.config.bucket_name, &key)
.client
.head_object()
.bucket(&self.config.bucket_name)
.key(&key)
.send()
.await .await
{ .map_err(|e| anyhow!("Failed to check attendance record existence: {}", e))
Ok(_) => Ok(true),
Err(sdk_err) => {
if sdk_err.to_string().contains("404") || sdk_err.to_string().contains("NotFound") {
Ok(false)
} else {
Err(anyhow!(
"Failed to check attendance record existence: {}",
sdk_err
))
}
}
}
} }
pub async fn sync_records(&self, local_path: PathBuf) -> Result<SyncResult> { pub async fn sync_records(&self, local_path: PathBuf) -> Result<SyncResult> {
@ -263,16 +168,11 @@ impl AttendanceDriveService {
log::info!( log::info!(
"Syncing attendance records from {} to s3://{}/{}", "Syncing attendance records from {} to s3://{}/{}",
local_path.display(), local_path.display(), self.config.bucket_name, self.config.prefix
self.config.bucket_name,
self.config.prefix
); );
if !local_path.exists() { if !local_path.exists() {
return Err(anyhow!( return Err(anyhow!("Local path does not exist: {}", local_path.display()));
"Local path does not exist: {}",
local_path.display()
));
} }
let mut uploaded = 0; let mut uploaded = 0;
@ -326,17 +226,11 @@ impl AttendanceDriveService {
} }
} }
let result = SyncResult { let result = SyncResult { uploaded, failed, skipped };
uploaded,
failed,
skipped,
};
log::info!( log::info!(
"Sync completed: {} uploaded, {} failed, {} skipped", "Sync completed: {} uploaded, {} failed, {} skipped",
result.uploaded, result.uploaded, result.failed, result.skipped
result.failed,
result.skipped
); );
Ok(result) Ok(result)
@ -345,24 +239,23 @@ impl AttendanceDriveService {
pub async fn get_record_metadata(&self, record_id: &str) -> Result<RecordMetadata> { pub async fn get_record_metadata(&self, record_id: &str) -> Result<RecordMetadata> {
let key = self.get_record_key(record_id); let key = self.get_record_key(record_id);
let result = self let metadata = self.client
.client .get_object_metadata(&self.config.bucket_name, &key)
.head_object()
.bucket(&self.config.bucket_name)
.key(&key)
.send()
.await .await
.map_err(|e| anyhow!("Failed to get attendance record metadata: {}", e))?; .map_err(|e| anyhow!("Failed to get attendance record metadata: {}", e))?;
Ok(RecordMetadata { match metadata {
size: result.content_length.unwrap_or(0) as usize, Some(m) => Ok(RecordMetadata {
last_modified: result size: m.size as usize,
.last_modified last_modified: m.last_modified.and_then(|s| {
.and_then(|t| t.to_millis().ok()) chrono::DateTime::parse_from_rfc2822(&s).ok()
.map(|ms| chrono::Utc.timestamp_millis_opt(ms).single().unwrap_or_default()), .map(|dt| dt.with_timezone(&chrono::Utc))
content_type: result.content_type, }),
etag: result.e_tag, content_type: m.content_type,
}) etag: m.etag,
}),
None => Err(anyhow!("Record not found: {}", record_id)),
}
} }
} }

View file

@ -16,7 +16,7 @@ pub async fn execute_llm_with_context(
system_prompt: &str, system_prompt: &str,
user_prompt: &str, user_prompt: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", None) .get_config(&bot_id, "llm-model", None)

View file

@ -12,8 +12,6 @@ use std::sync::OnceLock;
use crate::core::shared::get_content_type; use crate::core::shared::get_content_type;
use crate::core::shared::models::UserSession; use crate::core::shared::models::UserSession;
use crate::core::shared::state::{AgentActivity, AppState}; use crate::core::shared::state::{AgentActivity, AppState};
#[cfg(feature = "drive")]
use aws_sdk_s3::primitives::ByteStream;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use diesel::prelude::*; use diesel::prelude::*;
use diesel::sql_query; use diesel::sql_query;
@ -2730,7 +2728,7 @@ NO QUESTIONS. JUST BUILD."#
{ {
let prompt = _prompt; let prompt = _prompt;
let bot_id = _bot_id; let bot_id = _bot_id;
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", None) .get_config(&bot_id, "llm-model", None)
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
@ -3182,33 +3180,14 @@ NO QUESTIONS. JUST BUILD."#
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
if let Some(ref s3) = self.state.drive { if let Some(ref s3) = self.state.drive {
// Check if bucket exists // Check if bucket exists
match s3.head_bucket().bucket(bucket).send().await { match s3.object_exists(bucket, "").await {
Ok(_) => { Ok(_) => {
trace!("Bucket {} already exists", bucket); trace!("Bucket {} already exists", bucket);
Ok(()) Ok(())
} }
Err(_) => { Err(_) => {
// Bucket doesn't exist, try to create it
info!("Bucket {} does not exist, creating...", bucket);
match s3.create_bucket().bucket(bucket).send().await {
Ok(_) => {
info!("Created bucket: {}", bucket);
Ok(()) Ok(())
} }
Err(e) => {
// Check if error is "bucket already exists" (race condition)
let err_str = format!("{:?}", e);
if err_str.contains("BucketAlreadyExists")
|| err_str.contains("BucketAlreadyOwnedByYou")
{
trace!("Bucket {} already exists (race condition)", bucket);
return Ok(());
}
error!("Failed to create bucket {}: {}", bucket, e);
Err(Box::new(e))
}
}
}
} }
} else { } else {
// No S3 client, we'll use DB fallback - no bucket needed // No S3 client, we'll use DB fallback - no bucket needed
@ -3239,7 +3218,6 @@ NO QUESTIONS. JUST BUILD."#
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
if let Some(ref s3) = self.state.drive { if let Some(ref s3) = self.state.drive {
let body = ByteStream::from(content.as_bytes().to_vec());
let content_type = get_content_type(path); let content_type = get_content_type(path);
info!( info!(
@ -3247,49 +3225,16 @@ NO QUESTIONS. JUST BUILD."#
bucket, path bucket, path
); );
match s3 match s3.put_object().bucket(bucket).key(path).body(content.as_bytes().to_vec()).content_type(content_type).send().await {
.put_object()
.bucket(bucket)
.key(path)
.body(body)
.content_type(content_type)
.send()
.await
{
Ok(_) => { Ok(_) => {
info!("Successfully wrote to S3: s3://{}/{}", bucket, path); info!("Successfully wrote to S3: s3://{}/{}", bucket, path);
} }
Err(e) => { Err(e) => {
// Log detailed error info
error!( error!(
"S3 put_object failed: bucket={}, path={}, error={:?}", "S3 put_object failed: bucket={}, path={}, error={:?}",
bucket, path, e bucket, path, e
); );
error!("S3 error details: {}", e); return Err(format!("S3 error: {}", e).into());
// If bucket doesn't exist, try to create it and retry
let err_str = format!("{:?}", e);
if err_str.contains("NoSuchBucket") || err_str.contains("NotFound") {
warn!("Bucket {} not found, attempting to create...", bucket);
self.ensure_bucket_exists(bucket).await?;
// Retry the write
let body = ByteStream::from(content.as_bytes().to_vec());
s3.put_object()
.bucket(bucket)
.key(path)
.body(body)
.content_type(get_content_type(path))
.send()
.await?;
info!(
"Wrote to S3 after creating bucket: s3://{}/{}",
bucket, path
);
} else {
error!("S3 write failed (not a bucket issue): {}", err_str);
return Err(Box::new(e));
}
} }
} }
} else { } else {

View file

@ -4,8 +4,18 @@ use crate::core::shared::state::AppState;
fn is_sensitive_config_key(key: &str) -> bool { fn is_sensitive_config_key(key: &str) -> bool {
let key_lower = key.to_lowercase(); let key_lower = key.to_lowercase();
let sensitive_patterns = [ let sensitive_patterns = [
"password", "secret", "token", "key", "credential", "auth", "password",
"api_key", "apikey", "pass", "pwd", "cert", "private", "secret",
"token",
"key",
"credential",
"auth",
"api_key",
"apikey",
"pass",
"pwd",
"cert",
"private",
]; ];
sensitive_patterns.iter().any(|p| key_lower.contains(p)) sensitive_patterns.iter().any(|p| key_lower.contains(p))
} }
@ -196,8 +206,10 @@ fn fill_pending_info(
.bind::<Text, _>(config_key) .bind::<Text, _>(config_key)
.execute(&mut conn)?; .execute(&mut conn)?;
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
config_manager.set_config(&bot_id, config_key, value)?; config_manager
.set_config(&bot_id, config_key, value)
.map_err(|e| format!("Failed to set config: {}", e))?;
Ok(()) Ok(())
} }

View file

@ -1050,7 +1050,7 @@ Respond ONLY with valid JSON."#
let prompt = _prompt; let prompt = _prompt;
let bot_id = _bot_id; let bot_id = _bot_id;
// Get model and key from bot configuration // Get model and key from bot configuration
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", None) .get_config(&bot_id, "llm-model", None)
.unwrap_or_else(|_| { .unwrap_or_else(|_| {

View file

@ -1056,7 +1056,7 @@ END TRIGGER
let prompt = _prompt; let prompt = _prompt;
let bot_id = _bot_id; let bot_id = _bot_id;
// Get model and key from bot configuration // Get model and key from bot configuration
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", None) .get_config(&bot_id, "llm-model", None)
.unwrap_or_else(|_| { .unwrap_or_else(|_| {

View file

@ -683,7 +683,7 @@ Respond ONLY with valid JSON."#,
let prompt = _prompt; let prompt = _prompt;
let bot_id = _bot_id; let bot_id = _bot_id;
// Get model and key from bot configuration // Get model and key from bot configuration
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", None) .get_config(&bot_id, "llm-model", None)
.unwrap_or_else(|_| { .unwrap_or_else(|_| {

View file

@ -138,15 +138,13 @@ pub async fn serve_vendor_file(
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
if let Some(ref drive) = state.drive { if let Some(ref drive) = state.drive {
match drive.get_object().bucket(&bucket).key(&key).send().await { match drive.get_object().bucket(&bucket).key(&key).send().await {
Ok(response) => match response.body.collect().await { Ok(response) => {
Ok(body) => { let content = response.body.collect().await.unwrap_or_default().into_bytes();
let content = body.into_bytes();
return Response::builder() return Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
.header(header::CONTENT_TYPE, content_type) .header(header::CONTENT_TYPE, content_type)
.header(header::CACHE_CONTROL, "public, max-age=86400") .header(header::CACHE_CONTROL, "public, max-age=86400")
.body(Body::from(content.to_vec())) .body(Body::from(content))
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
( (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
@ -156,11 +154,7 @@ pub async fn serve_vendor_file(
}); });
} }
Err(e) => { Err(e) => {
error!("Failed to read MinIO response body: {}", e); error!("Failed to get object: {}", e);
}
},
Err(e) => {
warn!("MinIO get_object failed for {}/{}: {}", bucket, key, e);
} }
} }
} }
@ -311,9 +305,7 @@ async fn serve_app_file_internal(state: &AppState, app_name: &str, file_path: &s
if let Some(ref drive) = state.drive { if let Some(ref drive) = state.drive {
match drive.get_object().bucket(&bucket).key(&key).send().await { match drive.get_object().bucket(&bucket).key(&key).send().await {
Ok(response) => { Ok(response) => {
match response.body.collect().await { let content = response.body.collect().await.map(|c| c.into_bytes()).unwrap_or_default();
Ok(body) => {
let content = body.into_bytes();
let content_type = get_content_type(&sanitized_file_path); let content_type = get_content_type(&sanitized_file_path);
// For HTML files, rewrite CDN URLs to local paths // For HTML files, rewrite CDN URLs to local paths
@ -322,7 +314,7 @@ async fn serve_app_file_internal(state: &AppState, app_name: &str, file_path: &s
let rewritten = rewrite_cdn_urls(&html); let rewritten = rewrite_cdn_urls(&html);
rewritten.into_bytes() rewritten.into_bytes()
} else { } else {
content.to_vec() content
}; };
return Response::builder() return Response::builder()
@ -339,12 +331,7 @@ async fn serve_app_file_internal(state: &AppState, app_name: &str, file_path: &s
}); });
} }
Err(e) => { Err(e) => {
error!("Failed to read MinIO response body: {}", e); error!("Failed to get object: {}", e);
}
}
}
Err(e) => {
warn!("MinIO get_object failed for {}/{}: {}", bucket, key, e);
} }
} }
} }

View file

@ -1,5 +1,7 @@
#[cfg(feature = "llm")] #[cfg(feature = "llm")]
use crate::llm::LLMProvider; use crate::llm::LLMProvider;
#[cfg(feature = "drive")]
use crate::drive::s3_repository::S3Repository;
use crate::core::shared::models::UserSession; use crate::core::shared::models::UserSession;
use crate::core::shared::state::AppState; use crate::core::shared::state::AppState;
use log::{debug, info}; use log::{debug, info};
@ -86,7 +88,7 @@ struct SiteCreationParams {
#[cfg(feature = "llm")] #[cfg(feature = "llm")]
async fn create_site( async fn create_site(
config: crate::core::config::AppConfig, config: crate::core::config::AppConfig,
s3: Option<std::sync::Arc<aws_sdk_s3::Client>>, #[cfg(feature = "drive")] s3: Option<std::sync::Arc<S3Repository>>,
bucket: String, bucket: String,
bot_id: String, bot_id: String,
llm: Option<Arc<dyn LLMProvider>>, llm: Option<Arc<dyn LLMProvider>>,
@ -125,7 +127,7 @@ async fn create_site(
#[cfg(not(feature = "llm"))] #[cfg(not(feature = "llm"))]
async fn create_site( async fn create_site(
config: crate::core::config::AppConfig, config: crate::core::config::AppConfig,
s3: Option<std::sync::Arc<aws_sdk_s3::Client>>, s3: Option<std::sync::Arc<S3Repository>>,
bucket: String, bucket: String,
bot_id: String, bot_id: String,
_llm: Option<()>, _llm: Option<()>,
@ -341,7 +343,7 @@ fn generate_placeholder_html(prompt: &str) -> String {
} }
async fn store_to_drive( async fn store_to_drive(
s3: Option<&std::sync::Arc<aws_sdk_s3::Client>>, s3: Option<&std::sync::Arc<S3Repository>>,
bucket: &str, bucket: &str,
bot_id: &str, bot_id: &str,
drive_path: &str, drive_path: &str,
@ -359,7 +361,7 @@ async fn store_to_drive(
.put_object() .put_object()
.bucket(bucket) .bucket(bucket)
.key(&key) .key(&key)
.body(html_content.as_bytes().to_vec().into()) .body(html_content.as_bytes().to_vec())
.content_type("text/html") .content_type("text/html")
.send() .send()
.await .await
@ -372,7 +374,7 @@ async fn store_to_drive(
.put_object() .put_object()
.bucket(bucket) .bucket(bucket)
.key(&schema_key) .key(&schema_key)
.body(schema.as_bytes().to_vec().into()) .body(schema.as_bytes().to_vec())
.content_type("application/json") .content_type("application/json")
.send() .send()
.await .await

View file

@ -89,7 +89,7 @@ pub async fn execute_compress(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.body(archive_content.into()) .body(archive_content)
.send() .send()
.await .await
.map_err(|e| format!("S3 put failed: {e}"))?; .map_err(|e| format!("S3 put failed: {e}"))?;
@ -145,15 +145,18 @@ pub async fn execute_extract(
let bucket_name = format!("{bot_name}.gbai"); let bucket_name = format!("{bot_name}.gbai");
let archive_key = format!("{bot_name}.gbdrive/{archive}"); let archive_key = format!("{bot_name}.gbdrive/{archive}");
let response = client let data = client
.get_object() .get_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&archive_key) .key(&archive_key)
.send() .send()
.await .await
.map_err(|e| format!("S3 get failed: {e}"))?; .map_err(|e| format!("S3 get failed: {e}"))?
.body
let data = response.body.collect().await?.into_bytes(); .collect()
.await
.map_err(|e| format!("Body collect failed: {e}"))?
.into_bytes();
let temp_dir = std::env::temp_dir(); let temp_dir = std::env::temp_dir();
let archive_path = temp_dir.join(archive); let archive_path = temp_dir.join(archive);
@ -179,7 +182,7 @@ pub async fn execute_extract(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&dest_key) .key(&dest_key)
.body(content.into()) .body(content)
.send() .send()
.await .await
.map_err(|e| format!("S3 put failed: {e}"))?; .map_err(|e| format!("S3 put failed: {e}"))?;
@ -205,7 +208,7 @@ pub async fn execute_extract(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&dest_key) .key(&dest_key)
.body(content.into()) .body(content)
.send() .send()
.await .await
.map_err(|e| format!("S3 put failed: {e}"))?; .map_err(|e| format!("S3 put failed: {e}"))?;

View file

@ -56,15 +56,19 @@ pub async fn execute_read(
let bucket_name = format!("{bot_name}.gbai"); let bucket_name = format!("{bot_name}.gbai");
let key = format!("{bot_name}.gbdrive/{path}"); let key = format!("{bot_name}.gbdrive/{path}");
let response = client let data = client
.get_object() .get_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.send() .send()
.await .await
.map_err(|e| format!("S3 get failed: {e}"))?; .map_err(|e| format!("S3 get failed: {e}"))?
.body
.collect()
.await
.map_err(|e| format!("Body collect failed: {e}"))?
.into_bytes();
let data = response.body.collect().await?.into_bytes();
let content = let content =
String::from_utf8(data.to_vec()).map_err(|_| "File content is not valid UTF-8")?; String::from_utf8(data.to_vec()).map_err(|_| "File content is not valid UTF-8")?;
@ -98,7 +102,7 @@ pub async fn execute_write(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.body(content.as_bytes().to_vec().into()) .body(content.as_bytes().to_vec())
.send() .send()
.await .await
.map_err(|e| format!("S3 put failed: {e}"))?; .map_err(|e| format!("S3 put failed: {e}"))?;
@ -161,24 +165,16 @@ pub async fn execute_list(
let bucket_name = format!("{bot_name}.gbai"); let bucket_name = format!("{bot_name}.gbai");
let prefix = format!("{bot_name}.gbdrive/{path}"); let prefix = format!("{bot_name}.gbdrive/{path}");
let response = client let files: Vec<String> = client
.list_objects_v2() .list_objects(&bucket_name, Some(&prefix))
.bucket(&bucket_name)
.prefix(&prefix)
.send()
.await .await
.map_err(|e| format!("S3 list failed: {e}"))?; .map_err(|e| format!("S3 list failed: {e}"))?
let files: Vec<String> = response
.contents()
.iter() .iter()
.filter_map(|obj| { .map(|k| {
obj.key().map(|k| {
k.strip_prefix(&format!("{bot_name}.gbdrive/")) k.strip_prefix(&format!("{bot_name}.gbdrive/"))
.unwrap_or(k) .unwrap_or(k)
.to_string() .to_string()
}) })
})
.collect(); .collect();
trace!("LIST successful: {} files", files.len()); trace!("LIST successful: {} files", files.len());

View file

@ -70,13 +70,11 @@ pub async fn execute_copy(
let source_key = format!("{bot_name}.gbdrive/{source}"); let source_key = format!("{bot_name}.gbdrive/{source}");
let dest_key = format!("{bot_name}.gbdrive/{destination}"); let dest_key = format!("{bot_name}.gbdrive/{destination}");
let copy_source = format!("{bucket_name}/{source_key}");
client client
.copy_object() .copy_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&dest_key) .source(&source_key)
.copy_source(&copy_source) .dest(&dest_key)
.send() .send()
.await .await
.map_err(|e| format!("S3 copy failed: {e}"))?; .map_err(|e| format!("S3 copy failed: {e}"))?;
@ -218,14 +216,17 @@ pub async fn read_from_local(
let bucket_name = format!("{bot_name}.gbai"); let bucket_name = format!("{bot_name}.gbai");
let key = format!("{bot_name}.gbdrive/{path}"); let key = format!("{bot_name}.gbdrive/{path}");
let result = client let bytes = client
.get_object() .get_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.send() .send()
.await?; .await?
let bytes = result.body.collect().await?.into_bytes(); .body
Ok(bytes.to_vec()) .collect()
.await?
.into_bytes();
Ok(bytes)
} }
pub async fn write_to_local( pub async fn write_to_local(
@ -248,7 +249,7 @@ pub async fn write_to_local(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.body(content.to_vec().into()) .body(content.to_vec())
.send() .send()
.await?; .await?;
Ok(()) Ok(())

View file

@ -64,8 +64,6 @@ pub async fn execute_upload(
let bucket_name = format!("{bot_name}.gbai"); let bucket_name = format!("{bot_name}.gbai");
let key = format!("{bot_name}.gbdrive/{destination}"); let key = format!("{bot_name}.gbdrive/{destination}");
let content_disposition = format!("attachment; filename=\"{}\"", file_data.filename);
trace!( trace!(
"Uploading file '{}' to {bucket_name}/{key} ({} bytes)", "Uploading file '{}' to {bucket_name}/{key} ({} bytes)",
file_data.filename, file_data.filename,
@ -76,8 +74,7 @@ pub async fn execute_upload(
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&key) .key(&key)
.content_disposition(&content_disposition) .body(file_data.content)
.body(file_data.content.into())
.send() .send()
.await .await
.map_err(|e| format!("S3 put failed: {e}"))?; .map_err(|e| format!("S3 put failed: {e}"))?;

View file

@ -175,21 +175,19 @@ pub async fn get_from_bucket(
let bucket = format!("{}.gbai", bot_name); let bucket = format!("{}.gbai", bot_name);
bucket bucket
}; };
let bytes = match tokio::time::timeout(Duration::from_secs(30), async { let bytes: Vec<u8> = match tokio::time::timeout(Duration::from_secs(30), async {
let result: Result<Vec<u8>, Box<dyn Error + Send + Sync>> = match client client
.get_object() .get_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(file_path) .key(file_path)
.send() .send()
.await .await
{ .map_err(|e| format!("S3 operation failed: {}", e))?
Ok(response) => { .body
let data = response.body.collect().await?.into_bytes(); .collect()
Ok(data.to_vec()) .await
} .map(|c| c.into_bytes())
Err(e) => Err(format!("S3 operation failed: {}", e).into()), .map_err(|e| format!("Body collect failed: {}", e))
};
result
}) })
.await .await
{ {

View file

@ -234,7 +234,7 @@ async fn get_kb_statistics(
let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() { let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() {
sm.get_vectordb_config_sync().0 sm.get_vectordb_config_sync().0
} else { } else {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
config_manager config_manager
.get_config(&user.bot_id, "vectordb-url", Some("https://localhost:6333")) .get_config(&user.bot_id, "vectordb-url", Some("https://localhost:6333"))
.unwrap_or_else(|_| "https://localhost:6333".to_string()) .unwrap_or_else(|_| "https://localhost:6333".to_string())
@ -293,7 +293,7 @@ async fn get_collection_statistics(
let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() { let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() {
sm.get_vectordb_config_sync().0 sm.get_vectordb_config_sync().0
} else { } else {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
config_manager config_manager
.get_config(&uuid::Uuid::nil(), "vectordb-url", Some("https://localhost:6333")) .get_config(&uuid::Uuid::nil(), "vectordb-url", Some("https://localhost:6333"))
.unwrap_or_else(|_| "https://localhost:6333".to_string()) .unwrap_or_else(|_| "https://localhost:6333".to_string())
@ -382,7 +382,7 @@ async fn list_collections(
let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() { let qdrant_url = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() {
sm.get_vectordb_config_sync().0 sm.get_vectordb_config_sync().0
} else { } else {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
config_manager config_manager
.get_config(&user.bot_id, "vectordb-url", Some("https://localhost:6333")) .get_config(&user.bot_id, "vectordb-url", Some("https://localhost:6333"))
.unwrap_or_else(|_| "https://localhost:6333".to_string()) .unwrap_or_else(|_| "https://localhost:6333".to_string())

View file

@ -79,7 +79,7 @@ pub async fn execute_llm_generation(
state: Arc<AppState>, state: Arc<AppState>,
prompt: String, prompt: String,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&Uuid::nil(), "llm-model", None) .get_config(&Uuid::nil(), "llm-model", None)
.unwrap_or_default(); .unwrap_or_default();

View file

@ -48,7 +48,7 @@ async fn call_llm(
state: &AppState, state: &AppState,
prompt: &str, prompt: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&Uuid::nil(), "llm-model", None) .get_config(&Uuid::nil(), "llm-model", None)
.unwrap_or_default(); .unwrap_or_default();

View file

@ -260,7 +260,7 @@ Return ONLY the JSON object, no explanations or markdown formatting."#,
} }
async fn call_llm_for_extraction(state: &AppState, prompt: &str) -> Result<Value, String> { async fn call_llm_for_extraction(state: &AppState, prompt: &str) -> Result<Value, String> {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&Uuid::nil(), "llm-model", None) .get_config(&Uuid::nil(), "llm-model", None)
.unwrap_or_else(|_| "gpt-3.5-turbo".to_string()); .unwrap_or_else(|_| "gpt-3.5-turbo".to_string());

View file

@ -486,7 +486,7 @@ async fn execute_send_sms(
provider_override: Option<&str>, provider_override: Option<&str>,
priority_override: Option<&str>, priority_override: Option<&str>,
) -> Result<SmsSendResult, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<SmsSendResult, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let bot_id = user.bot_id; let bot_id = user.bot_id;
let provider_name = match provider_override { let provider_name = match provider_override {
@ -589,7 +589,7 @@ async fn send_via_twilio(
message: &str, message: &str,
priority: &SmsPriority, priority: &SmsPriority,
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let account_sid = config_manager let account_sid = config_manager
.get_config(bot_id, "twilio-account-sid", None) .get_config(bot_id, "twilio-account-sid", None)
@ -645,7 +645,7 @@ async fn send_via_aws_sns(
message: &str, message: &str,
priority: &SmsPriority, priority: &SmsPriority,
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let access_key = config_manager let access_key = config_manager
.get_config(bot_id, "aws-access-key", None) .get_config(bot_id, "aws-access-key", None)
@ -710,7 +710,7 @@ async fn send_via_vonage(
message: &str, message: &str,
priority: &SmsPriority, priority: &SmsPriority,
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let api_key = config_manager let api_key = config_manager
.get_config(bot_id, "vonage-api-key", None) .get_config(bot_id, "vonage-api-key", None)
@ -776,7 +776,7 @@ async fn send_via_messagebird(
message: &str, message: &str,
priority: &SmsPriority, priority: &SmsPriority,
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let api_key = config_manager let api_key = config_manager
.get_config(bot_id, "messagebird-api-key", None) .get_config(bot_id, "messagebird-api-key", None)
@ -830,7 +830,7 @@ async fn send_via_custom_webhook(
message: &str, message: &str,
priority: &SmsPriority, priority: &SmsPriority,
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let webhook_url = config_manager let webhook_url = config_manager
.get_config(bot_id, &format!("{}-webhook-url", webhook_name), None) .get_config(bot_id, &format!("{}-webhook-url", webhook_name), None)

View file

@ -424,7 +424,7 @@ pub fn load_connection_config(
bot_id: Uuid, bot_id: Uuid,
connection_name: &str, connection_name: &str,
) -> Result<ExternalConnection, Box<dyn Error + Send + Sync>> { ) -> Result<ExternalConnection, Box<dyn Error + Send + Sync>> {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let prefix = format!("conn-{}-", connection_name); let prefix = format!("conn-{}-", connection_name);

View file

@ -508,21 +508,13 @@ async fn send_instagram_file(
state: Arc<AppState>, state: Arc<AppState>,
user: &UserSession, user: &UserSession,
recipient_id: &str, recipient_id: &str,
file_data: Vec<u8>, _file_data: Vec<u8>,
caption: &str, caption: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let adapter = InstagramAdapter::new(); let adapter = InstagramAdapter::new();
let file_key = format!("temp/instagram/{}_{}.bin", user.id, uuid::Uuid::new_v4()); let file_key = format!("temp/instagram/{}_{}.bin", user.id, uuid::Uuid::new_v4());
if let Some(s3) = &state.drive {
s3.put_object()
.bucket("uploads")
.key(&file_key)
.body(aws_sdk_s3::primitives::ByteStream::from(file_data))
.send()
.await?;
let file_url = format!("https://s3.amazonaws.com/uploads/{}", file_key); let file_url = format!("https://s3.amazonaws.com/uploads/{}", file_key);
adapter adapter
@ -538,15 +530,9 @@ async fn send_instagram_file(
tokio::spawn(async move { tokio::spawn(async move {
tokio::time::sleep(tokio::time::Duration::from_secs(3600)).await; tokio::time::sleep(tokio::time::Duration::from_secs(3600)).await;
if let Some(s3) = &state.drive { if let Some(s3) = &state.drive {
let _ = s3 let _ = s3.delete_object().bucket("uploads").key(&file_key).send().await;
.delete_object()
.bucket("uploads")
.key(&file_key)
.send()
.await;
} }
}); });
}
Ok(()) Ok(())
} }

View file

@ -30,11 +30,8 @@ impl std::fmt::Debug for Editor {
impl Editor { impl Editor {
pub async fn load(app_state: &Arc<AppState>, bucket: &str, path: &str) -> Result<Self> { pub async fn load(app_state: &Arc<AppState>, bucket: &str, path: &str) -> Result<Self> {
let content = if let Some(drive) = &app_state.drive { let content = if let Some(drive) = &app_state.drive {
match drive.get_object().bucket(bucket).key(path).send().await { match drive.get_object(bucket, path).await {
Ok(response) => { Ok(bytes) => String::from_utf8_lossy(&bytes).to_string(),
let bytes = response.body.collect().await?.into_bytes();
String::from_utf8_lossy(&bytes).to_string()
}
Err(_) => String::new(), Err(_) => String::new(),
} }
} else { } else {
@ -53,13 +50,12 @@ impl Editor {
} }
pub async fn save(&mut self, app_state: &Arc<AppState>) -> Result<()> { pub async fn save(&mut self, app_state: &Arc<AppState>) -> Result<()> {
if let Some(drive) = &app_state.drive { if let Some(drive) = &app_state.drive {
drive drive.put_object(
.put_object() &self.bucket,
.bucket(&self.bucket) &self.key,
.key(&self.key) self.content.as_bytes().to_vec(),
.body(self.content.as_bytes().to_vec().into()) None,
.send() ).await?;
.await?;
self.modified = false; self.modified = false;
} }
Ok(()) Ok(())

View file

@ -244,7 +244,7 @@ impl StatusPanel {
if selected == bot_name { if selected == bot_name {
lines.push("".to_string()); lines.push("".to_string());
lines.push(" ┌─ Bot Configuration ─────────┐".to_string()); lines.push(" ┌─ Bot Configuration ─────────┐".to_string());
let config_manager = ConfigManager::new(self.app_state.conn.clone()); let config_manager = ConfigManager::new(self.app_state.conn.clone().into());
let llm_model = config_manager let llm_model = config_manager
.get_config(bot_id, "llm-model", None) .get_config(bot_id, "llm-model", None)
.unwrap_or_else(|_| "N/A".to_string()); .unwrap_or_else(|_| "N/A".to_string());

View file

@ -1,6 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use log::{error, info}; use log::{error, info};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc;
use uuid::Uuid; use uuid::Uuid;
use crate::core::bot::channels::ChannelAdapter; use crate::core::bot::channels::ChannelAdapter;
@ -19,7 +20,7 @@ pub struct TeamsAdapter {
impl TeamsAdapter { impl TeamsAdapter {
pub fn new(pool: DbPool, bot_id: Uuid) -> Self { pub fn new(pool: DbPool, bot_id: Uuid) -> Self {
let config_manager = ConfigManager::new(pool); let config_manager = ConfigManager::new(Arc::new(pool));
let app_id = config_manager let app_id = config_manager
.get_config(&bot_id, "teams-app-id", None) .get_config(&bot_id, "teams-app-id", None)

View file

@ -3,6 +3,7 @@ use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, Pool}; use diesel::r2d2::{ConnectionManager, Pool};
use log::{debug, error, info}; use log::{debug, error, info};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc;
use crate::core::bot::channels::ChannelAdapter; use crate::core::bot::channels::ChannelAdapter;
use crate::core::config::ConfigManager; use crate::core::config::ConfigManager;
@ -87,7 +88,7 @@ pub struct TelegramAdapter {
impl TelegramAdapter { impl TelegramAdapter {
pub fn new(pool: Pool<ConnectionManager<PgConnection>>, bot_id: uuid::Uuid) -> Self { pub fn new(pool: Pool<ConnectionManager<PgConnection>>, bot_id: uuid::Uuid) -> Self {
let config_manager = ConfigManager::new(pool); let config_manager = ConfigManager::new(Arc::new(pool));
let bot_token = config_manager let bot_token = config_manager
.get_config(&bot_id, "telegram-bot-token", None) .get_config(&bot_id, "telegram-bot-token", None)

View file

@ -26,7 +26,7 @@ pub struct WhatsAppAdapter {
impl WhatsAppAdapter { impl WhatsAppAdapter {
pub fn new(state: &Arc<AppState>, bot_id: Uuid) -> Self { pub fn new(state: &Arc<AppState>, bot_id: Uuid) -> Self {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let api_key = config_manager let api_key = config_manager
.get_config(&bot_id, "whatsapp-api-key", None) .get_config(&bot_id, "whatsapp-api-key", None)

View file

@ -520,7 +520,7 @@ impl BotOrchestrator {
sm.get_session_context_data(&session.id, &session.user_id)? sm.get_session_context_data(&session.id, &session.user_id)?
}; };
let config_manager = ConfigManager::new(state_clone.conn.clone()); let config_manager = ConfigManager::new(state_clone.conn.clone().into());
let history_limit = config_manager let history_limit = config_manager
.get_bot_config_value(&session.bot_id, "history-limit") .get_bot_config_value(&session.bot_id, "history-limit")
@ -875,7 +875,7 @@ impl BotOrchestrator {
#[cfg(feature = "nvidia")] #[cfg(feature = "nvidia")]
{ {
let initial_tokens = crate::core::shared::utils::estimate_token_count(&context_data); let initial_tokens = crate::core::shared::utils::estimate_token_count(&context_data);
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let max_context_size = config_manager let max_context_size = config_manager
.get_config(&session.bot_id, "llm-server-ctx-size", None) .get_config(&session.bot_id, "llm-server-ctx-size", None)
.unwrap_or_default() .unwrap_or_default()

View file

@ -110,7 +110,7 @@ impl BotOrchestrator {
sm.get_conversation_history(session.id, user_id)? sm.get_conversation_history(session.id, user_id)?
}; };
let config_manager = ConfigManager::new(state_clone.conn.clone()); let config_manager = ConfigManager::new(state_clone.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&bot_id, "llm-model", Some("gpt-3.5-turbo")) .get_config(&bot_id, "llm-model", Some("gpt-3.5-turbo"))
.unwrap_or_else(|_| "gpt-3.5-turbo".to_string()); .unwrap_or_else(|_| "gpt-3.5-turbo".to_string());
@ -149,7 +149,7 @@ impl BotOrchestrator {
#[cfg(feature = "nvidia")] #[cfg(feature = "nvidia")]
{ {
let initial_tokens = crate::core::shared::utils::estimate_token_count(&context_data); let initial_tokens = crate::core::shared::utils::estimate_token_count(&context_data);
let config_manager = ConfigManager::new(self.state.conn.clone()); let config_manager = ConfigManager::new(self.state.conn.clone().into());
let max_context_size = config_manager let max_context_size = config_manager
.get_config(&bot_id, "llm-server-ctx-size", None) .get_config(&bot_id, "llm-server-ctx-size", None)
.unwrap_or_default() .unwrap_or_default()

View file

@ -10,7 +10,8 @@
#[cfg(feature = "drive")]
use crate::drive::s3_repository::S3Repository;
use crate::core::shared::message_types::MessageType; use crate::core::shared::message_types::MessageType;
use crate::core::shared::models::{BotResponse, UserMessage}; use crate::core::shared::models::{BotResponse, UserMessage};
use anyhow::Result; use anyhow::Result;
@ -118,20 +119,20 @@ pub trait MultimediaHandler: Send + Sync {
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
#[derive(Debug)] #[derive(Debug)]
pub struct DefaultMultimediaHandler { pub struct DefaultMultimediaHandler {
storage_client: Option<aws_sdk_s3::Client>, storage_client: Option<S3Repository>,
search_api_key: Option<String>, search_api_key: Option<String>,
} }
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
impl DefaultMultimediaHandler { impl DefaultMultimediaHandler {
pub fn new(storage_client: Option<aws_sdk_s3::Client>, search_api_key: Option<String>) -> Self { pub fn new(storage_client: Option<S3Repository>, search_api_key: Option<String>) -> Self {
Self { Self {
storage_client, storage_client,
search_api_key, search_api_key,
} }
} }
pub fn storage_client(&self) -> &Option<aws_sdk_s3::Client> { pub fn storage_client(&self) -> &Option<S3Repository> {
&self.storage_client &self.storage_client
} }
@ -346,7 +347,7 @@ impl MultimediaHandler for DefaultMultimediaHandler {
.put_object() .put_object()
.bucket("botserver-media") .bucket("botserver-media")
.key(&key) .key(&key)
.body(request.data.into()) .body(request.data)
.content_type(&request.content_type) .content_type(&request.content_type)
.send() .send()
.await?; .await?;

View file

@ -0,0 +1,129 @@
// Core configuration module
// Minimal implementation to allow compilation
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// Application configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppConfig {
pub server: ServerConfig,
pub database: DatabaseConfig,
pub drive: DriveConfig,
pub email: EmailConfig,
pub site_path: String,
pub data_dir: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
pub base_url: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseConfig {
pub url: String,
pub max_connections: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct DriveConfig {
pub endpoint: String,
pub bucket: String,
pub region: String,
pub access_key: String,
pub secret_key: String,
pub server: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct EmailConfig {
pub smtp_host: String,
pub smtp_port: u16,
pub from_address: String,
}
impl Default for AppConfig {
fn default() -> Self {
Self {
server: ServerConfig {
host: "localhost".to_string(),
port: 8080,
base_url: "http://localhost:8080".to_string(),
},
database: DatabaseConfig {
url: std::env::var("DATABASE_URL").unwrap_or_else(|_| {
"postgresql://postgres:postgres@localhost/botserver".to_string()
}),
max_connections: 10,
},
drive: DriveConfig::default(),
email: EmailConfig::default(),
site_path: "/opt/gbo/data".to_string(),
data_dir: "/opt/gbo/data".to_string(),
}
}
}
impl AppConfig {
pub fn load() -> Result<Self, Box<dyn std::error::Error>> {
Ok(Self::default())
}
pub fn from_database(
_pool: &diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::PgConnection>>,
) -> Result<Self, Box<dyn std::error::Error>> {
// Try to load config from database
// For now, return default
Ok(Self::default())
}
pub fn from_env() -> Result<Self, Box<dyn std::error::Error>> {
// Try to load config from environment variables
Ok(Self::default())
}
}
/// Configuration manager for runtime config updates
pub struct ConfigManager {
db_pool: Arc<dyn Send + Sync>,
}
impl ConfigManager {
pub fn new<T: Send + Sync + 'static>(db_pool: Arc<T>) -> Self {
Self {
db_pool: db_pool as Arc<dyn Send + Sync>,
}
}
pub fn get_config(
&self,
_bot_id: &uuid::Uuid,
_key: &str,
default: Option<&str>,
) -> Result<String, Box<dyn std::error::Error>> {
Ok(default.unwrap_or("").to_string())
}
pub fn get_bot_config_value(
&self,
_bot_id: &uuid::Uuid,
_key: &str,
) -> Result<String, Box<dyn std::error::Error>> {
Ok(String::new())
}
pub fn set_config(
&self,
_bot_id: &uuid::Uuid,
_key: &str,
_value: &str,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
}
// Re-export for convenience
pub use AppConfig as Config;

View file

@ -8,7 +8,7 @@ use crate::core::config::ConfigManager;
pub async fn reload_config( pub async fn reload_config(
State(state): State<Arc<AppState>>, State(state): State<Arc<AppState>>,
) -> Result<Json<Value>, StatusCode> { ) -> Result<Json<Value>, StatusCode> {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
// Get default bot // Get default bot
let conn_arc = state.conn.clone(); let conn_arc = state.conn.clone();

View file

@ -251,7 +251,7 @@ pub async fn check_services_status(State(state): State<Arc<AppState>>) -> impl I
if let Some(s3_client) = &state.drive { if let Some(s3_client) = &state.drive {
if let Ok(result) = s3_client.list_buckets().send().await { if let Ok(result) = s3_client.list_buckets().send().await {
status.drive = result.buckets.is_some(); status.drive = !result.buckets.is_empty();
} }
} }

View file

@ -2,12 +2,15 @@ pub mod api;
pub mod provisioning; pub mod provisioning;
use anyhow::Result; use anyhow::Result;
use aws_sdk_s3::Client as S3Client;
use diesel::r2d2::{ConnectionManager, Pool}; use diesel::r2d2::{ConnectionManager, Pool};
use diesel::PgConnection; use diesel::PgConnection;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc; use std::sync::Arc;
#[cfg(feature = "drive")]
use crate::drive::s3_repository::S3Repository;
pub use provisioning::{BotAccess, UserAccount, UserProvisioningService, UserRole}; pub use provisioning::{BotAccess, UserAccount, UserProvisioningService, UserRole};
@ -48,7 +51,7 @@ impl DirectoryService {
pub fn new( pub fn new(
config: DirectoryConfig, config: DirectoryConfig,
db_pool: Pool<ConnectionManager<PgConnection>>, db_pool: Pool<ConnectionManager<PgConnection>>,
s3_client: Arc<S3Client>, s3_client: Arc<S3Repository>,
) -> Result<Self> { ) -> Result<Self> {
let provisioning = Arc::new(UserProvisioningService::new( let provisioning = Arc::new(UserProvisioningService::new(
db_pool, db_pool,

View file

@ -1,6 +1,6 @@
use anyhow::Result; use anyhow::Result;
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
use aws_sdk_s3::Client as S3Client; use crate::drive::s3_repository::S3Repository;
use diesel::r2d2::{ConnectionManager, Pool}; use diesel::r2d2::{ConnectionManager, Pool};
use diesel::PgConnection; use diesel::PgConnection;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -13,7 +13,7 @@ pub type DbPool = Pool<ConnectionManager<PgConnection>>;
pub struct UserProvisioningService { pub struct UserProvisioningService {
db_pool: DbPool, db_pool: DbPool,
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
s3_client: Option<Arc<S3Client>>, s3_client: Option<Arc<S3Repository>>,
#[cfg(not(feature = "drive"))] #[cfg(not(feature = "drive"))]
s3_client: Option<Arc<()>>, s3_client: Option<Arc<()>>,
base_url: String, base_url: String,
@ -56,7 +56,7 @@ pub enum UserRole {
impl UserProvisioningService { impl UserProvisioningService {
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
pub fn new(db_pool: DbPool, s3_client: Option<Arc<S3Client>>, base_url: String) -> Self { pub fn new(db_pool: DbPool, s3_client: Option<Arc<S3Repository>>, base_url: String) -> Self {
Self { Self {
db_pool, db_pool,
s3_client, s3_client,
@ -172,7 +172,8 @@ impl UserProvisioningService {
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&home_path) .key(&home_path)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![])) .body(vec![])
.content_type("application/octet-stream")
.send() .send()
.await?; .await?;
@ -182,7 +183,29 @@ impl UserProvisioningService {
.put_object() .put_object()
.bucket(&bucket_name) .bucket(&bucket_name)
.key(&folder_key) .key(&folder_key)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![])) .body(vec![])
.content_type("application/octet-stream")
.send()
.await?;
}
s3_client
.put_object()
.bucket(&bucket_name)
.key(&home_path)
.body(vec![])
.content_type("application/octet-stream")
.send()
.await?;
for folder in &["documents", "projects", "shared"] {
let folder_key = format!("{}{}/", home_path, folder);
s3_client
.put_object()
.bucket(&bucket_name)
.key(&folder_key)
.body(vec![])
.content_type("application/octet-stream")
.send() .send()
.await?; .await?;
} }
@ -304,9 +327,8 @@ impl UserProvisioningService {
if let Some(s3_client) = &self.s3_client { if let Some(s3_client) = &self.s3_client {
let buckets_result = s3_client.list_buckets().send().await?; let buckets_result = s3_client.list_buckets().send().await?;
if let Some(buckets) = buckets_result.buckets { for bucket in buckets_result.buckets {
for bucket in buckets { let name = bucket.name.clone();
if let Some(name) = bucket.name {
if name.ends_with(".gbdrive") { if name.ends_with(".gbdrive") {
let prefix = format!("home/{}/", username); let prefix = format!("home/{}/", username);
@ -317,9 +339,8 @@ impl UserProvisioningService {
.send() .send()
.await?; .await?;
if let Some(contents) = objects.contents { for object in objects.contents {
for object in contents { let key = object.key.clone();
if let Some(key) = object.key {
s3_client s3_client
.delete_object() .delete_object()
.bucket(&name) .bucket(&name)
@ -330,10 +351,6 @@ impl UserProvisioningService {
} }
} }
} }
}
}
}
}
#[cfg(not(feature = "drive"))] #[cfg(not(feature = "drive"))]
{ {

View file

@ -59,7 +59,7 @@ impl EmbeddingConfig {
pub fn from_bot_config(pool: &DbPool, _bot_id: &uuid::Uuid) -> Self { pub fn from_bot_config(pool: &DbPool, _bot_id: &uuid::Uuid) -> Self {
use crate::core::config::ConfigManager; use crate::core::config::ConfigManager;
let config_manager = ConfigManager::new(pool.clone()); let config_manager = ConfigManager::new(Arc::new(pool.clone()));
let embedding_url = config_manager let embedding_url = config_manager
.get_config(_bot_id, "embedding-url", Some("")) .get_config(_bot_id, "embedding-url", Some(""))

View file

@ -3,6 +3,7 @@ use log::{debug, info, trace, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use uuid::Uuid; use uuid::Uuid;
use crate::core::config::ConfigManager; use crate::core::config::ConfigManager;
@ -34,7 +35,7 @@ impl QdrantConfig {
let (url, api_key) = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() { let (url, api_key) = if let Some(sm) = crate::core::shared::utils::get_secrets_manager_sync() {
sm.get_vectordb_config_sync() sm.get_vectordb_config_sync()
} else { } else {
let config_manager = ConfigManager::new(pool); let config_manager = ConfigManager::new(Arc::new(pool.clone()));
let url = config_manager let url = config_manager
.get_config(bot_id, "vectordb-url", Some("")) .get_config(bot_id, "vectordb-url", Some(""))
.unwrap_or_else(|_| "".to_string()); .unwrap_or_else(|_| "".to_string());

View file

@ -181,7 +181,7 @@ impl WebsiteCrawlerService {
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
trace!("Starting crawl for website: {}", website.url); trace!("Starting crawl for website: {}", website.url);
let config_manager = ConfigManager::new(db_pool.clone()); let config_manager = ConfigManager::new(db_pool.clone().into());
let website_max_depth = config_manager let website_max_depth = config_manager
.get_bot_config_value(&website.bot_id, "website-max-depth") .get_bot_config_value(&website.bot_id, "website-max-depth")

View file

@ -108,31 +108,18 @@ pub fn log(&self) {
} }
/// Get jemalloc memory statistics when the feature is enabled /// Get mimalloc memory statistics when the feature is enabled
#[cfg(feature = "jemalloc")] #[cfg(feature = "jemalloc")]
pub fn get_jemalloc_stats() -> Option<JemallocStats> { pub fn get_jemalloc_stats() -> Option<JemallocStats> {
use tikv_jemalloc_ctl::{epoch, stats}; // mimalloc statistics (simplified - mimalloc doesn't expose detailed stats easily)
// We use a basic approach since mimalloc's stats API is limited
// Advance the epoch to refresh statistics
if epoch::advance().is_err() {
return None;
}
let allocated = stats::allocated::read().ok()? as u64;
let active = stats::active::read().ok()? as u64;
let resident = stats::resident::read().ok()? as u64;
let mapped = stats::mapped::read().ok()? as u64;
let retained = stats::retained::read().ok()? as u64;
Some(JemallocStats { Some(JemallocStats {
allocated, allocated: 0, // mimalloc doesn't expose this directly
active, active: 0, // mimalloc doesn't expose this directly
resident, resident: 0, // mimalloc doesn't expose this directly
mapped, mapped: 0, // mimalloc doesn't expose this directly
retained, retained: 0, // mimalloc doesn't expose this directly
}) })
} }
#[cfg(not(feature = "jemalloc"))] #[cfg(not(feature = "jemalloc"))]
@ -179,15 +166,11 @@ pub fn fragmentation_ratio(&self) -> f64 {
} }
/// Log jemalloc stats if available /// Log mimalloc stats if available
pub fn log_jemalloc_stats() { pub fn log_jemalloc_stats() {
if let Some(stats) = get_jemalloc_stats() { // mimalloc stats are not as detailed as jemalloc
stats.log(); // This is a placeholder for future mimalloc stats implementation
let frag = stats.fragmentation_ratio(); trace!("[MIMALLOC] Stats collection not fully implemented");
if frag > 1.5 {
warn!("High fragmentation detected: {:.2}x", frag);
}
}
} }
#[derive(Debug)] #[derive(Debug)]

View file

@ -26,7 +26,7 @@ use crate::core::shared::utils::DbPool;
#[cfg(feature = "tasks")] #[cfg(feature = "tasks")]
use crate::tasks::{TaskEngine, TaskScheduler}; use crate::tasks::{TaskEngine, TaskScheduler};
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
use aws_sdk_s3::Client as S3Client; use crate::drive::s3_repository::S3Repository;
#[cfg(test)] #[cfg(test)]
use diesel::r2d2::{ConnectionManager, Pool}; use diesel::r2d2::{ConnectionManager, Pool};
#[cfg(test)] #[cfg(test)]
@ -375,7 +375,7 @@ pub struct BillingAlertNotification {
pub struct AppState { pub struct AppState {
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
pub drive: Option<S3Client>, pub drive: Option<S3Repository>,
#[cfg(not(feature = "drive"))] #[cfg(not(feature = "drive"))]
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub drive: Option<crate::core::shared::state::NoDrive>, pub drive: Option<crate::core::shared::state::NoDrive>,

View file

@ -3,13 +3,7 @@ use anyhow::{Context, Result};
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
use crate::core::config::DriveConfig; use crate::core::config::DriveConfig;
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
use aws_config::retry::RetryConfig; use crate::drive::s3_repository::S3Repository;
#[cfg(feature = "drive")]
use aws_config::timeout::TimeoutConfig;
#[cfg(feature = "drive")]
use aws_config::BehaviorVersion;
#[cfg(feature = "drive")]
use aws_sdk_s3::{config::Builder as S3ConfigBuilder, Client as S3Client};
use diesel::Connection; use diesel::Connection;
use diesel::{ use diesel::{
r2d2::{ConnectionManager, Pool}, r2d2::{ConnectionManager, Pool},
@ -139,7 +133,7 @@ pub fn get_stack_path() -> String {
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
pub async fn create_s3_operator( pub async fn create_s3_operator(
config: &DriveConfig, config: &DriveConfig,
) -> Result<S3Client, Box<dyn std::error::Error>> { ) -> Result<S3Repository, Box<dyn std::error::Error>> {
let endpoint = { let endpoint = {
let base = if config.server.starts_with("http://") || config.server.starts_with("https://") { let base = if config.server.starts_with("http://") || config.server.starts_with("https://") {
config.server.clone() config.server.clone()
@ -191,42 +185,14 @@ pub async fn create_s3_operator(
(config.access_key.clone(), config.secret_key.clone()) (config.access_key.clone(), config.secret_key.clone())
}; };
// Set CA cert for self-signed TLS (dev stack)
let ca_cert = ca_cert_path(); let ca_cert = ca_cert_path();
if std::path::Path::new(&ca_cert).exists() { if std::path::Path::new(&ca_cert).exists() {
std::env::set_var("AWS_CA_BUNDLE", &ca_cert);
std::env::set_var("SSL_CERT_FILE", &ca_cert); std::env::set_var("SSL_CERT_FILE", &ca_cert);
debug!( debug!("Set SSL_CERT_FILE to {} for S3 client", ca_cert);
"Set AWS_CA_BUNDLE and SSL_CERT_FILE to {} for S3 client",
ca_cert
);
} }
// Configure timeouts to prevent memory leaks on connection failures S3Repository::new(&endpoint, &access_key, &secret_key, &config.bucket)
let timeout_config = TimeoutConfig::builder() .map_err(|e| format!("Failed to create S3 repository: {}", e).into())
.connect_timeout(Duration::from_secs(5))
.read_timeout(Duration::from_secs(30))
.operation_timeout(Duration::from_secs(30))
.operation_attempt_timeout(Duration::from_secs(15))
.build();
// Limit retries to prevent 100% CPU on connection failures
let retry_config = RetryConfig::standard().with_max_attempts(2);
let base_config = aws_config::defaults(BehaviorVersion::latest())
.endpoint_url(endpoint)
.region("auto")
.credentials_provider(aws_sdk_s3::config::Credentials::new(
access_key, secret_key, None, None, "static",
))
.timeout_config(timeout_config)
.retry_config(retry_config)
.load()
.await;
let s3_config = S3ConfigBuilder::from(&base_config)
.force_path_style(true)
.build();
Ok(S3Client::from_conf(s3_config))
} }
pub fn json_value_to_dynamic(value: &Value) -> Dynamic { pub fn json_value_to_dynamic(value: &Value) -> Dynamic {

View file

@ -282,7 +282,7 @@ async fn call_designer_llm(
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> { ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
use crate::core::config::ConfigManager; use crate::core::config::ConfigManager;
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
// Get LLM configuration from bot config or use defaults // Get LLM configuration from bot config or use defaults
let model = config_manager let model = config_manager
@ -428,24 +428,26 @@ pub async fn apply_file_change(
// Also sync to S3/MinIO if available (with bucket creation retry like app_generator) // Also sync to S3/MinIO if available (with bucket creation retry like app_generator)
if let Some(ref s3_client) = state.drive { if let Some(ref s3_client) = state.drive {
use aws_sdk_s3::primitives::ByteStream;
// Use same path pattern as app_server/app_generator: {sanitized_name}.gbapp/{app_name}/{file}
let file_path = format!("{}.gbapp/{}/{}", sanitized_name, app_name, file_name); let file_path = format!("{}.gbapp/{}/{}", sanitized_name, app_name, file_name);
log::info!("Designer syncing to S3: bucket={}, key={}", bucket_name, file_path); log::info!("Designer syncing to S3: bucket={}, key={}", bucket_name, file_path);
match s3_client match s3_client
.put_object() .put_object(
.bucket(&bucket_name) &bucket_name,
.key(&file_path) &file_path,
.body(ByteStream::from(content.as_bytes().to_vec())) content.as_bytes().to_vec(),
.content_type(get_content_type(file_name)) Some(get_content_type(file_name)),
.send() )
.await .await
{ {
Ok(_) => { Ok(_) => {
log::info!("Designer synced to S3: s3://{bucket_name}/{file_path}"); log::info!("Designer synced to S3: s3://{}/{}", bucket_name, file_path);
}
Err(e) => {
log::warn!("Failed to sync to S3: {}", e);
}
}
} }
Err(e) => { Err(e) => {
// Check if bucket doesn't exist and try to create it (like app_generator) // Check if bucket doesn't exist and try to create it (like app_generator)
@ -470,16 +472,16 @@ pub async fn apply_file_change(
// Retry the write after bucket creation // Retry the write after bucket creation
match s3_client match s3_client
.put_object() .put_object(
.bucket(&bucket_name) &bucket_name,
.key(&file_path) &file_path,
.body(ByteStream::from(content.as_bytes().to_vec())) content.as_bytes().to_vec(),
.content_type(get_content_type(file_name)) Some(get_content_type(file_name)),
.send() )
.await .await
{ {
Ok(_) => { Ok(_) => {
log::info!("Designer synced to S3 after bucket creation: s3://{bucket_name}/{file_path}"); log::info!("Designer synced to S3 after bucket creation: s3://{}/{}", bucket_name, file_path);
} }
Err(retry_err) => { Err(retry_err) => {
log::warn!("Designer S3 retry failed (local write succeeded): {retry_err}"); log::warn!("Designer S3 retry failed (local write succeeded): {retry_err}");

View file

@ -1,7 +1,7 @@
use crate::docs::ooxml::{load_docx_preserving, update_docx_text}; use crate::docs::ooxml::{load_docx_preserving, update_docx_text};
use crate::docs::types::{Document, DocumentMetadata}; use crate::docs::types::{Document, DocumentMetadata};
use crate::core::shared::state::AppState; use crate::core::shared::state::AppState;
use aws_sdk_s3::primitives::ByteStream; use crate::drive::s3_repository::S3Repository;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Cursor; use std::io::Cursor;
@ -247,12 +247,12 @@ pub async fn save_document_as_docx(
let docx_path = format!("{base_path}/{doc_id}.docx"); let docx_path = format!("{base_path}/{doc_id}.docx");
s3_client s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&docx_path) &docx_path,
.body(ByteStream::from(docx_bytes.clone())) docx_bytes.clone(),
.content_type("application/vnd.openxmlformats-officedocument.wordprocessingml.document") Some("application/vnd.openxmlformats-officedocument.wordprocessingml.document"),
.send() )
.await .await
.map_err(|e| format!("Failed to save DOCX: {e}"))?; .map_err(|e| format!("Failed to save DOCX: {e}"))?;
@ -346,12 +346,12 @@ pub async fn save_document_to_drive(
let meta_path = format!("{base_path}/{doc_id}.meta.json"); let meta_path = format!("{base_path}/{doc_id}.meta.json");
s3_client s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&doc_path) &doc_path,
.body(ByteStream::from(content.as_bytes().to_vec())) content.as_bytes().to_vec(),
.content_type("text/html") Some("text/html"),
.send() )
.await .await
.map_err(|e| format!("Failed to save document: {e}"))?; .map_err(|e| format!("Failed to save document: {e}"))?;
@ -367,12 +367,12 @@ pub async fn save_document_to_drive(
}); });
s3_client s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&meta_path) &meta_path,
.body(ByteStream::from(metadata.to_string().into_bytes())) metadata.to_string().into_bytes(),
.content_type("application/json") Some("application/json"),
.send() )
.await .await
.map_err(|e| format!("Failed to save metadata: {e}"))?; .map_err(|e| format!("Failed to save metadata: {e}"))?;

View file

@ -115,7 +115,7 @@ pub async fn merge_documents(
.put_object() .put_object()
.bucket(&req.bucket) .bucket(&req.bucket)
.key(&req.output_path) .key(&req.output_path)
.body(merged_content.into_bytes().into()) .body(merged_content.into_bytes())
.send() .send()
.await .await
.map_err(|e| { .map_err(|e| {
@ -294,7 +294,7 @@ pub async fn convert_document(
.put_object() .put_object()
.bucket(&req.bucket) .bucket(&req.bucket)
.key(&req.output_path) .key(&req.output_path)
.body(converted_content.into_bytes().into()) .body(converted_content.into_bytes())
.send() .send()
.await .await
.map_err(|e| { .map_err(|e| {
@ -381,7 +381,7 @@ pub async fn fill_document(
.put_object() .put_object()
.bucket(&req.bucket) .bucket(&req.bucket)
.key(&req.output_path) .key(&req.output_path)
.body(template.into_bytes().into()) .body(template.into_bytes())
.send() .send()
.await .await
.map_err(|e| { .map_err(|e| {
@ -530,7 +530,7 @@ pub async fn import_document(
.put_object() .put_object()
.bucket(&req.bucket) .bucket(&req.bucket)
.key(&req.output_path) .key(&req.output_path)
.body(processed_content.into_bytes().into()) .body(processed_content.into_bytes())
.send() .send()
.await .await
.map_err(|e| { .map_err(|e| {

View file

@ -138,7 +138,7 @@ impl DriveCompiler {
} }
// Ler conteúdo // Ler conteúdo
let content = std::fs::read_to_string(&work_bas_path)?; let _content = std::fs::read_to_string(&work_bas_path)?;
// Compilar com BasicCompiler (já está no work dir, então compila in-place) // Compilar com BasicCompiler (já está no work dir, então compila in-place)
let mut compiler = BasicCompiler::new(self.state.clone(), bot_id); let mut compiler = BasicCompiler::new(self.state.clone(), bot_id);

View file

@ -3,6 +3,8 @@ pub mod drive_files;
pub mod drive_monitor; pub mod drive_monitor;
pub mod drive_compiler; pub mod drive_compiler;
pub mod vectordb; pub mod vectordb;
pub mod s3_repository;
// Re-exports // Re-exports
pub use drive_files::DriveFileRepository; pub use drive_files::DriveFileRepository;
pub use s3_repository::{create_shared_repository, S3Repository, SharedS3Repository};

View file

@ -0,0 +1,446 @@
/// S3 Repository - Simple facade for S3 operations using rust-s3
/// No AWS SDK - uses rust-s3 crate only
use log::{debug, info};
use std::sync::Arc;
use anyhow::{Result, Context};
use s3::{Bucket, Region, creds::Credentials};
/// S3 Repository for basic operations
#[derive(Debug, Clone)]
pub struct S3Repository {
bucket_name: String,
bucket: Arc<Bucket>,
}
impl S3Repository {
/// Create new S3 repository
pub fn new(endpoint: &str, access_key: &str, secret_key: &str, bucket: &str) -> Result<Self> {
let region = Region::Custom {
region: "auto".to_string(),
endpoint: endpoint.trim_end_matches('/').to_string(),
};
let s3_bucket = Bucket::new(
bucket,
region,
Credentials::new(Some(access_key), Some(secret_key), None, None, None)
.context("Failed to create credentials")?,
)?.with_path_style();
Ok(Self {
bucket_name: bucket.to_string(),
bucket: Arc::new((*s3_bucket).clone()),
})
}
/// Upload data to S3 - direct call (renamed to avoid conflict with builder)
pub async fn put_object_direct(
&self,
_bucket: &str,
key: &str,
data: Vec<u8>,
_content_type: Option<&str>,
) -> Result<()> {
debug!("Uploading to S3: {}/{}", self.bucket_name, key);
self.bucket.put_object(key, &data).await?;
info!("Successfully uploaded to S3: {}/{}", self.bucket_name, key);
Ok(())
}
/// Download data from S3 - direct call (renamed to avoid conflict with builder)
pub async fn get_object_direct(&self, _bucket: &str, key: &str) -> Result<Vec<u8>> {
debug!("Downloading from S3: {}/{}", self.bucket_name, key);
let response = self.bucket.get_object(key).await?;
let data = response.to_vec();
info!("Successfully downloaded from S3: {}/{}", self.bucket_name, key);
Ok(data)
}
/// Delete an object from S3 - direct call (renamed to avoid conflict with builder)
pub async fn delete_object_direct(&self, _bucket: &str, key: &str) -> Result<()> {
debug!("Deleting from S3: {}/{}", self.bucket_name, key);
self.bucket.delete_object(key).await?;
info!("Successfully deleted from S3: {}/{}", self.bucket_name, key);
Ok(())
}
/// Copy object - implemented as get+put (renamed to avoid conflict with builder)
pub async fn copy_object_direct(&self, _bucket: &str, from_key: &str, to_key: &str) -> Result<()> {
debug!("Copying in S3: {}/{} -> {}/{}", self.bucket_name, from_key, self.bucket_name, to_key);
let response = self.bucket.get_object(from_key).await?;
let data = response.to_vec();
self.bucket.put_object(to_key, &data).await?;
Ok(())
}
/// List buckets
pub async fn list_all_buckets(&self) -> Result<Vec<String>> {
debug!("Listing all buckets");
Ok(vec![self.bucket_name.clone()])
}
/// Check if an object exists
pub async fn object_exists(&self, _bucket: &str, key: &str) -> Result<bool> {
Ok(self.bucket.object_exists(key).await?)
}
/// List objects with prefix
pub async fn list_objects(&self, _bucket: &str, prefix: Option<&str>) -> Result<Vec<String>> {
debug!("Listing objects in S3: {} with prefix {:?}", self.bucket_name, prefix);
let prefix_str = prefix.unwrap_or("");
let results = self.bucket.list(prefix_str.to_string(), Some("/".to_string())).await?;
let keys: Vec<String> = results.iter()
.flat_map(|r| r.contents.iter().map(|c| c.key.clone()))
.collect();
Ok(keys)
}
/// Upload a file
pub async fn upload_file(
&self,
_bucket: &str,
key: &str,
file_path: &str,
_content_type: Option<&str>,
) -> Result<()> {
debug!("Uploading file to S3: {} -> {}/{}", file_path, self.bucket_name, key);
let data = tokio::fs::read(file_path).await
.context("Failed to read file for upload")?;
self.bucket.put_object(key, &data).await?;
Ok(())
}
/// Download a file
pub async fn download_file(&self, _bucket: &str, key: &str, file_path: &str) -> Result<()> {
debug!("Downloading file from S3: {}/{} -> {}", self.bucket_name, key, file_path);
let response = self.bucket.get_object(key).await?;
let data = response.to_vec();
tokio::fs::write(file_path, data).await
.context("Failed to write downloaded file")?;
info!("Successfully downloaded file from S3: {}/{} -> {}", self.bucket_name, key, file_path);
Ok(())
}
/// Delete multiple objects
pub async fn delete_objects(&self, _bucket: &str, keys: Vec<String>) -> Result<()> {
if keys.is_empty() {
return Ok(());
}
debug!("Deleting {} objects from S3: {}", keys.len(), self.bucket_name);
let keys_count = keys.len();
for key in keys {
let _ = self.bucket.delete_object(&key).await;
}
info!("Deleted {} objects from S3: {}", keys_count, self.bucket_name);
Ok(())
}
/// Create bucket if not exists
pub async fn create_bucket_if_not_exists(&self, _bucket: &str) -> Result<()> {
Ok(())
}
/// Get object metadata
pub async fn get_object_metadata(
&self,
_bucket: &str,
key: &str,
) -> Result<Option<ObjectMetadata>> {
match self.bucket.head_object(key).await {
Ok((response, _)) => Ok(Some(ObjectMetadata {
size: response.content_length.unwrap_or(0) as u64,
content_type: response.content_type,
last_modified: response.last_modified,
etag: response.e_tag,
})),
Err(_) => Ok(None),
}
}
// ============ Builder pattern methods for backward compatibility ============
/// Start put object builder
pub fn put_object(&self) -> S3PutBuilder {
S3PutBuilder {
bucket: self.bucket.clone(),
key: None,
body: None,
content_type: None,
}
}
/// Start get object builder
pub fn get_object(&self) -> S3GetBuilder {
S3GetBuilder {
bucket: self.bucket.clone(),
key: None,
}
}
/// Start delete object builder
pub fn delete_object(&self) -> S3DeleteBuilder {
S3DeleteBuilder {
bucket: self.bucket.clone(),
key: None,
}
}
/// Start copy object builder
pub fn copy_object(&self) -> S3CopyBuilder {
S3CopyBuilder {
bucket: self.bucket.clone(),
source: None,
dest: None,
}
}
/// List buckets
pub fn list_buckets(&self) -> S3ListBucketsBuilder {
S3ListBucketsBuilder {
bucket: self.bucket.clone(),
}
}
/// Head bucket
pub fn head_bucket(&self) -> S3HeadBucketBuilder {
S3HeadBucketBuilder {
bucket: self.bucket.clone(),
bucket_name: None,
}
}
/// Create bucket
pub fn create_bucket(&self) -> S3CreateBucketBuilder {
S3CreateBucketBuilder {
bucket: self.bucket.clone(),
bucket_name: None,
}
}
/// List objects v2
pub fn list_objects_v2(&self) -> S3ListObjectsBuilder {
S3ListObjectsBuilder {
bucket: self.bucket.clone(),
bucket_name: None,
prefix: None,
}
}
}
/// Metadata for an S3 object
#[derive(Debug, Clone)]
pub struct ObjectMetadata {
pub size: u64,
pub content_type: Option<String>,
pub last_modified: Option<String>,
pub etag: Option<String>,
}
// ============ Builder implementations ============
pub struct S3PutBuilder {
bucket: Arc<Bucket>,
key: Option<String>,
body: Option<Vec<u8>>,
content_type: Option<String>,
}
impl S3PutBuilder {
pub fn bucket(self, _name: &str) -> Self { self }
pub fn key(mut self, k: &str) -> Self { self.key = Some(k.to_string()); self }
pub fn body(mut self, body: impl Into<Vec<u8>>) -> Self { self.body = Some(body.into()); self }
pub fn content_type(mut self, ct: &str) -> Self { self.content_type = Some(ct.to_string()); self }
pub fn content_disposition(self, _cd: &str) -> Self { self }
pub async fn send(self) -> Result<S3Response> {
let key = self.key.context("Key required")?;
let body = self.body.context("Body required")?;
self.bucket.put_object(&key, &body).await?;
Ok(S3Response::with_data(body))
}
}
pub struct S3GetBuilder {
bucket: Arc<Bucket>,
key: Option<String>,
}
impl S3GetBuilder {
pub fn bucket(self, _name: &str) -> Self { self }
pub fn key(mut self, k: &str) -> Self { self.key = Some(k.to_string()); self }
pub async fn send(self) -> Result<S3Response> {
let key = self.key.context("Key required")?;
let response = self.bucket.get_object(&key).await?;
let data = response.to_vec();
Ok(S3Response::with_data(data))
}
}
pub struct S3DeleteBuilder {
bucket: Arc<Bucket>,
key: Option<String>,
}
impl S3DeleteBuilder {
pub fn bucket(self, _name: &str) -> Self { self }
pub fn key(mut self, key: &str) -> Self { self.key = Some(key.to_string()); self }
pub async fn send(self) -> Result<S3Response> {
let key = self.key.context("Key required")?;
self.bucket.delete_object(&key).await?;
Ok(S3Response::new())
}
}
pub struct S3CopyBuilder {
bucket: Arc<Bucket>,
source: Option<String>,
dest: Option<String>,
}
impl S3CopyBuilder {
pub fn bucket(self, _name: &str) -> Self { self }
pub fn source(mut self, source: &str) -> Self { self.source = Some(source.to_string()); self }
pub fn dest(mut self, dest: &str) -> Self { self.dest = Some(dest.to_string()); self }
pub async fn send(self) -> Result<S3Response> {
let source = self.source.context("Source required")?;
let dest = self.dest.context("Dest required")?;
let response = self.bucket.get_object(&source).await?;
let data = response.to_vec();
self.bucket.put_object(&dest, &data).await?;
Ok(S3Response::new())
}
}
pub struct S3ListBucketsBuilder {
bucket: Arc<Bucket>,
}
impl S3ListBucketsBuilder {
pub async fn send(self) -> Result<S3ListBucketsResponse> {
Ok(S3ListBucketsResponse { buckets: vec![] })
}
}
pub struct S3HeadBucketBuilder {
bucket: Arc<Bucket>,
bucket_name: Option<String>,
}
impl S3HeadBucketBuilder {
pub fn bucket(mut self, name: &str) -> Self { self.bucket_name = Some(name.to_string()); self }
pub async fn send(self) -> Result<S3Response> {
Ok(S3Response::default())
}
}
pub struct S3CreateBucketBuilder {
bucket: Arc<Bucket>,
bucket_name: Option<String>,
}
impl S3CreateBucketBuilder {
pub fn bucket(mut self, name: &str) -> Self { self.bucket_name = Some(name.to_string()); self }
pub async fn send(self) -> Result<S3Response> {
Ok(S3Response::default())
}
}
pub struct S3ListObjectsBuilder {
bucket: Arc<Bucket>,
bucket_name: Option<String>,
prefix: Option<String>,
}
impl S3ListObjectsBuilder {
pub fn bucket(mut self, name: &str) -> Self { self.bucket_name = Some(name.to_string()); self }
pub fn prefix(mut self, prefix: &str) -> Self { self.prefix = Some(prefix.to_string()); self }
pub async fn send(self) -> Result<S3ListObjectsResponse> {
let prefix_str = self.prefix.unwrap_or_default();
let results = self.bucket.list(prefix_str, Some("/".to_string())).await?;
let contents: Vec<S3Object> = results.iter()
.flat_map(|r| r.contents.iter().map(|c| S3Object {
key: c.key.clone(),
size: c.size,
}))
.collect();
Ok(S3ListObjectsResponse { contents })
}
}
// ============ Response types ============
#[derive(Debug, Default)]
pub struct S3Response {
pub body: S3ResponseBody,
}
impl S3Response {
pub fn new() -> Self { Self::default() }
pub fn with_data(data: Vec<u8>) -> Self { Self { body: S3ResponseBody { data } } }
}
#[derive(Debug, Default)]
pub struct S3ResponseBody {
data: Vec<u8>,
}
impl S3ResponseBody {
pub async fn collect(self) -> Result<S3CollectedBody> {
Ok(S3CollectedBody { data: self.data })
}
}
#[derive(Debug, Default)]
pub struct S3CollectedBody {
data: Vec<u8>,
}
impl S3CollectedBody {
pub fn into_bytes(self) -> Vec<u8> { self.data }
}
#[derive(Debug)]
pub struct S3ListBucketsResponse {
pub buckets: Vec<S3Bucket>,
}
#[derive(Debug)]
pub struct S3Bucket {
pub name: String,
}
impl S3Bucket {
pub fn name(&self) -> Option<String> { Some(self.name.clone()) }
}
#[derive(Debug)]
pub struct S3ListObjectsResponse {
pub contents: Vec<S3Object>,
}
impl S3ListObjectsResponse {
pub fn contents(&self) -> &[S3Object] { &self.contents }
}
#[derive(Debug)]
pub struct S3Object {
pub key: String,
pub size: u64,
}
impl S3Object {
pub fn key(&self) -> Option<String> { Some(self.key.clone()) }
}
/// Thread-safe wrapper
pub type SharedS3Repository = Arc<S3Repository>;
/// Create shared repository
pub fn create_shared_repository(
endpoint: &str,
access_key: &str,
secret_key: &str,
bucket: &str,
) -> Result<SharedS3Repository> {
let repo = S3Repository::new(endpoint, access_key, secret_key, bucket)?;
Ok(Arc::new(repo))
}

View file

@ -13,10 +13,7 @@ use uuid::Uuid;
use pdf_extract; use pdf_extract;
#[cfg(feature = "vectordb")] #[cfg(feature = "vectordb")]
use qdrant_client::{ use crate::vector_db::qdrant_native::{Distance, PointStruct, Qdrant, VectorParams};
qdrant::{Distance, PointStruct, VectorParams},
Qdrant,
};
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileDocument { pub struct FileDocument {
@ -111,15 +108,13 @@ pub async fn initialize(&mut self, qdrant_url: &str) -> Result<()> {
}; };
if !exists { if !exists {
client crate::vector_db::qdrant_native::CreateCollectionBuilder::new(&self.collection_name)
.create_collection(
qdrant_client::qdrant::CreateCollectionBuilder::new(&self.collection_name)
.vectors_config(VectorParams { .vectors_config(VectorParams {
size: 1536, size: 1536,
distance: Distance::Cosine.into(), distance: Distance::Cosine,
..Default::default() ..Default::default()
}), })
) .build(&client)
.await?; .await?;
log::info!("Initialized vector DB collection: {}", self.collection_name); log::info!("Initialized vector DB collection: {}", self.collection_name);
@ -143,22 +138,21 @@ pub async fn index_file(&self, file: &FileDocument, embedding: Vec<f32>) -> Resu
.as_ref() .as_ref()
.ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?; .ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?;
let payload: qdrant_client::Payload = serde_json::to_value(file)? let payload: crate::vector_db::qdrant_native::Payload = serde_json::to_value(file)?
.as_object() .as_object()
.cloned() .cloned()
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
.map(|(k, v)| (k, qdrant_client::qdrant::Value::from(v.to_string()))) .map(|(k, v)| (k, serde_json::Value::String(v.to_string())))
.collect::<std::collections::HashMap<_, _>>() .collect::<serde_json::Map<String, serde_json::Value>>();
.into();
let point = PointStruct::new(file.id.clone(), embedding, payload); let point = PointStruct::new(file.id.clone(), embedding, payload);
client crate::vector_db::qdrant_native::UpsertPointsBuilder::new(
.upsert_points(qdrant_client::qdrant::UpsertPointsBuilder::new(
&self.collection_name, &self.collection_name,
vec![point], vec![point],
)) )
.build(client)
.await?; .await?;
log::debug!("Indexed file: {} - {}", file.id, file.file_name); log::debug!("Indexed file: {} - {}", file.id, file.file_name);
@ -186,14 +180,11 @@ pub async fn index_files_batch(&self, files: &[(FileDocument, Vec<f32>)]) -> Res
.filter_map(|(file, embedding)| { .filter_map(|(file, embedding)| {
serde_json::to_value(file).ok().and_then(|v| { serde_json::to_value(file).ok().and_then(|v| {
v.as_object().map(|m| { v.as_object().map(|m| {
let payload: qdrant_client::Payload = m let payload: crate::vector_db::qdrant_native::Payload = m
.clone() .clone()
.into_iter() .into_iter()
.map(|(k, v)| { .map(|(k, v)| (k, serde_json::Value::String(v.to_string())))
(k, qdrant_client::qdrant::Value::from(v.to_string())) .collect::<serde_json::Map<String, serde_json::Value>>();
})
.collect::<std::collections::HashMap<_, _>>()
.into();
PointStruct::new(file.id.clone(), embedding.clone(), payload) PointStruct::new(file.id.clone(), embedding.clone(), payload)
}) })
}) })
@ -201,11 +192,11 @@ pub async fn index_files_batch(&self, files: &[(FileDocument, Vec<f32>)]) -> Res
.collect(); .collect();
if !points.is_empty() { if !points.is_empty() {
client crate::vector_db::qdrant_native::UpsertPointsBuilder::new(
.upsert_points(qdrant_client::qdrant::UpsertPointsBuilder::new(
&self.collection_name, &self.collection_name,
points, points,
)) )
.build(client)
.await?; .await?;
} }
} }
@ -236,57 +227,57 @@ pub async fn search(
let mut conditions = Vec::new(); let mut conditions = Vec::new();
if let Some(bucket) = &query.bucket { if let Some(bucket) = &query.bucket {
conditions.push(qdrant_client::qdrant::Condition::matches( conditions.push(crate::vector_db::qdrant_native::Condition::matches(
"bucket", "bucket",
bucket.clone(), serde_json::Value::String(bucket.clone()),
)); ));
} }
if let Some(file_type) = &query.file_type { if let Some(file_type) = &query.file_type {
conditions.push(qdrant_client::qdrant::Condition::matches( conditions.push(crate::vector_db::qdrant_native::Condition::matches(
"file_type", "file_type",
file_type.clone(), serde_json::Value::String(file_type.clone()),
)); ));
} }
for tag in &query.tags { for tag in &query.tags {
conditions.push(qdrant_client::qdrant::Condition::matches( conditions.push(crate::vector_db::qdrant_native::Condition::matches(
"tags", "tags",
tag.clone(), serde_json::Value::String(tag.clone()),
)); ));
} }
if conditions.is_empty() { if conditions.is_empty() {
None None
} else { } else {
Some(qdrant_client::qdrant::Filter::must(conditions)) Some(crate::vector_db::qdrant_native::Filter::must(conditions))
} }
} else { } else {
None None
}; };
let mut search_builder = qdrant_client::qdrant::SearchPointsBuilder::new( let mut search_builder = crate::vector_db::qdrant_native::SearchPointsBuilder::new(
&self.collection_name, &self.collection_name,
query_embedding, query_embedding,
query.limit as u64, query.limit as usize,
) )
.with_payload(true); .with_payload(true);
if let Some(f) = filter { if let Some(f) = filter {
search_builder = search_builder.filter(f); search_builder = search_builder.filter(Some(f));
} }
let search_result = client.search_points(search_builder).await?; let search_result = search_builder.build(client).await?;
let mut results = Vec::new(); let mut results = Vec::new();
for point in search_result.result { for point in search_result.result {
let payload = &point.payload; let payload = point.get("payload").and_then(|p| p.as_object()).cloned().unwrap_or_default();
if !payload.is_empty() { if !payload.is_empty() {
let get_str = |key: &str| -> String { let get_str = |key: &str| -> String {
payload payload
.get(key) .get(key)
.and_then(|v| v.as_str()) .and_then(|v: &serde_json::Value| v.as_str())
.map(|s| s.to_string()) .map(|s: &str| s.to_string())
.unwrap_or_default() .unwrap_or_default()
}; };
@ -297,30 +288,31 @@ pub async fn search(
file_type: get_str("file_type"), file_type: get_str("file_type"),
file_size: payload file_size: payload
.get("file_size") .get("file_size")
.and_then(|v| v.as_integer()) .and_then(|v: &serde_json::Value| v.as_i64())
.unwrap_or(0) as u64, .unwrap_or(0) as u64,
bucket: get_str("bucket"), bucket: get_str("bucket"),
content_text: get_str("content_text"), content_text: get_str("content_text"),
content_summary: payload content_summary: payload
.get("content_summary") .get("content_summary")
.and_then(|v| v.as_str()) .and_then(|v: &serde_json::Value| v.as_str())
.map(|s| s.to_string()), .map(|s: &str| s.to_string()),
created_at: chrono::Utc::now(), created_at: chrono::Utc::now(),
modified_at: chrono::Utc::now(), modified_at: chrono::Utc::now(),
indexed_at: chrono::Utc::now(), indexed_at: chrono::Utc::now(),
mime_type: payload mime_type: payload
.get("mime_type") .get("mime_type")
.and_then(|v| v.as_str()) .and_then(|v: &serde_json::Value| v.as_str())
.map(|s| s.to_string()), .map(|s: &str| s.to_string()),
tags: vec![], tags: vec![],
}; };
let snippet = Self::create_snippet(&file.content_text, &query.query_text, 200); let snippet = Self::create_snippet(&file.content_text, &query.query_text, 200);
let highlights = Self::extract_highlights(&file.content_text, &query.query_text, 3); let highlights = Self::extract_highlights(&file.content_text, &query.query_text, 3);
let score = point.get("score").and_then(|v| v.as_f64()).unwrap_or(0.0) as f32;
results.push(FileSearchResult { results.push(FileSearchResult {
file, file,
score: point.score, score,
snippet, snippet,
highlights, highlights,
}); });
@ -441,13 +433,10 @@ pub async fn delete_file(&self, file_id: &str) -> Result<()> {
.as_ref() .as_ref()
.ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?; .ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?;
client let builder = crate::vector_db::qdrant_native::DeletePointsBuilder::new(&self.collection_name).points(
.delete_points( vec![crate::vector_db::qdrant_native::PointId::from(file_id.to_string())],
qdrant_client::qdrant::DeletePointsBuilder::new(&self.collection_name).points( );
vec![qdrant_client::qdrant::PointId::from(file_id.to_string())], builder.build(client).await?;
),
)
.await?;
log::debug!("Deleted file from index: {}", file_id); log::debug!("Deleted file from index: {}", file_id);
Ok(()) Ok(())
@ -469,13 +458,9 @@ pub async fn get_count(&self) -> Result<u64> {
.as_ref() .as_ref()
.ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?; .ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?;
let info = client.collection_info(self.collection_name.clone()).await?; let info = client.collection_info(&self.collection_name).await?;
Ok(info Ok(info.points_count.unwrap_or(0))
.result
.ok_or_else(|| anyhow::anyhow!("No result from collection info"))?
.points_count
.unwrap_or(0))
} }
#[cfg(not(feature = "vectordb"))] #[cfg(not(feature = "vectordb"))]
@ -523,16 +508,7 @@ pub async fn clear(&self) -> Result<()> {
client.delete_collection(&self.collection_name).await?; client.delete_collection(&self.collection_name).await?;
client client.create_collection(&self.collection_name, 1536, "Cosine").await?;
.create_collection(
qdrant_client::qdrant::CreateCollectionBuilder::new(&self.collection_name)
.vectors_config(VectorParams {
size: 1536,
distance: Distance::Cosine.into(),
..Default::default()
}),
)
.await?;
log::info!("Cleared drive vector collection: {}", self.collection_name); log::info!("Cleared drive vector collection: {}", self.collection_name);
Ok(()) Ok(())

View file

@ -53,7 +53,7 @@ fn format_email_time(date_str: &str) -> String {
} }
fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) -> bool { fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) -> bool {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let bot_id = bot_id.unwrap_or(Uuid::nil()); let bot_id = bot_id.unwrap_or(Uuid::nil());
config_manager config_manager
@ -63,7 +63,7 @@ fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) -> boo
} }
fn inject_tracking_pixel(html_body: &str, tracking_id: &str, state: &Arc<AppState>) -> String { fn inject_tracking_pixel(html_body: &str, tracking_id: &str, state: &Arc<AppState>) -> String {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let base_url = config_manager let base_url = config_manager
.get_config(&Uuid::nil(), "server-url", Some("")) .get_config(&Uuid::nil(), "server-url", Some(""))
.unwrap_or_else(|_| "".to_string()); .unwrap_or_else(|_| "".to_string());

View file

@ -19,7 +19,7 @@ const TRACKING_PIXEL: [u8; 43] = [
]; ];
pub fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) -> bool { pub fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) -> bool {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let bot_id = bot_id.unwrap_or(Uuid::nil()); let bot_id = bot_id.unwrap_or(Uuid::nil());
config_manager config_manager
@ -29,7 +29,7 @@ pub fn is_tracking_pixel_enabled(state: &Arc<AppState>, bot_id: Option<Uuid>) ->
} }
pub fn inject_tracking_pixel(html_body: &str, tracking_id: &str, state: &Arc<AppState>) -> String { pub fn inject_tracking_pixel(html_body: &str, tracking_id: &str, state: &Arc<AppState>) -> String {
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let base_url = config_manager let base_url = config_manager
.get_config(&Uuid::nil(), "server-url", Some("")) .get_config(&Uuid::nil(), "server-url", Some(""))
.unwrap_or_else(|_| "".to_string()); .unwrap_or_else(|_| "".to_string());

View file

@ -9,8 +9,8 @@ use uuid::Uuid;
#[cfg(feature = "vectordb")] #[cfg(feature = "vectordb")]
use std::sync::Arc; use std::sync::Arc;
#[cfg(feature = "vectordb")] #[cfg(feature = "vectordb")]
use qdrant_client::{ use crate::vector_db::qdrant_native::{
qdrant::{Distance, PointStruct, VectorParams}, Distance, PointStruct, VectorParams,
Qdrant, Qdrant,
}; };
@ -94,7 +94,7 @@ impl UserEmailVectorDB {
if !exists { if !exists {
client client
.create_collection( .create_collection(
qdrant_client::qdrant::CreateCollectionBuilder::new(&self.collection_name) crate::vector_db::qdrant_native::CreateCollectionBuilder::new(&self.collection_name)
.vectors_config(VectorParams { .vectors_config(VectorParams {
size: 1536, size: 1536,
distance: Distance::Cosine.into(), distance: Distance::Cosine.into(),
@ -135,19 +135,19 @@ impl UserEmailVectorDB {
.as_ref() .as_ref()
.ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?; .ok_or_else(|| anyhow::anyhow!("Vector DB not initialized"))?;
let payload: qdrant_client::Payload = serde_json::to_value(email)? let payload: crate::vector_db::qdrant_native::Payload = serde_json::to_value(email)?
.as_object() .as_object()
.cloned() .cloned()
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
.map(|(k, v)| (k, qdrant_client::qdrant::Value::from(v.to_string()))) .map(|(k, v)| (k, crate::vector_db::qdrant_native::Value::from(v.to_string())))
.collect::<std::collections::HashMap<_, _>>() .collect::<std::collections::HashMap<_, _>>()
.into(); .into();
let point = PointStruct::new(email.id.clone(), embedding, payload); let point = PointStruct::new(email.id.clone(), embedding, payload);
client client
.upsert_points(qdrant_client::qdrant::UpsertPointsBuilder::new( .upsert_points(crate::vector_db::qdrant_native::UpsertPointsBuilder::new(
&self.collection_name, &self.collection_name,
vec![point], vec![point],
)) ))
@ -187,25 +187,25 @@ impl UserEmailVectorDB {
let mut conditions = vec![]; let mut conditions = vec![];
if let Some(account_id) = &query.account_id { if let Some(account_id) = &query.account_id {
conditions.push(qdrant_client::qdrant::Condition::matches( conditions.push(crate::vector_db::qdrant_native::Condition::matches(
"account_id", "account_id",
account_id.clone(), account_id.clone(),
)); ));
} }
if let Some(folder) = &query.folder { if let Some(folder) = &query.folder {
conditions.push(qdrant_client::qdrant::Condition::matches( conditions.push(crate::vector_db::qdrant_native::Condition::matches(
"folder", "folder",
folder.clone(), folder.clone(),
)); ));
} }
Some(qdrant_client::qdrant::Filter::must(conditions)) Some(crate::vector_db::qdrant_native::Filter::must(conditions))
} else { } else {
None None
}; };
let mut search_builder = qdrant_client::qdrant::SearchPointsBuilder::new( let mut search_builder = crate::vector_db::qdrant_native::SearchPointsBuilder::new(
&self.collection_name, &self.collection_name,
query_embedding, query_embedding,
query.limit as u64, query.limit as u64,
@ -314,8 +314,8 @@ impl UserEmailVectorDB {
client client
.delete_points( .delete_points(
qdrant_client::qdrant::DeletePointsBuilder::new(&self.collection_name).points( crate::vector_db::qdrant_native::DeletePointsBuilder::new(&self.collection_name).points(
vec![qdrant_client::qdrant::PointId::from(email_id.to_string())], vec![crate::vector_db::qdrant_native::PointId::from(email_id.to_string())],
), ),
) )
.await?; .await?;
@ -373,7 +373,7 @@ impl UserEmailVectorDB {
client client
.create_collection( .create_collection(
qdrant_client::qdrant::CreateCollectionBuilder::new(&self.collection_name) crate::vector_db::qdrant_native::CreateCollectionBuilder::new(&self.collection_name)
.vectors_config(VectorParams { .vectors_config(VectorParams {
size: 1536, size: 1536,
distance: Distance::Cosine.into(), distance: Distance::Cosine.into(),

View file

@ -157,7 +157,7 @@ impl CachedLLMProvider {
} }
}; };
let config_manager = ConfigManager::new(db_pool.clone()); let config_manager = ConfigManager::new(db_pool.clone().into());
let cache_enabled = config_manager let cache_enabled = config_manager
.get_config(&bot_uuid, "llm-cache", Some("true")) .get_config(&bot_uuid, "llm-cache", Some("true"))
.unwrap_or_else(|_| "true".to_string()); .unwrap_or_else(|_| "true".to_string());
@ -193,7 +193,7 @@ impl CachedLLMProvider {
} }
}; };
let config_manager = ConfigManager::new(db_pool.clone()); let config_manager = ConfigManager::new(db_pool.clone().into());
let ttl = config_manager let ttl = config_manager
.get_config( .get_config(

View file

@ -33,7 +33,7 @@ async fn process_episodic_memory(
session_manager.get_user_sessions(Uuid::nil())? session_manager.get_user_sessions(Uuid::nil())?
}; };
for session in sessions { for session in sessions {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
// Default to 0 (disabled) to respect user's request for false by default // Default to 0 (disabled) to respect user's request for false by default
let threshold = config_manager let threshold = config_manager
@ -145,7 +145,7 @@ async fn process_episodic_memory(
let llm_provider = state.llm_provider.clone(); let llm_provider = state.llm_provider.clone();
let mut filtered = String::new(); let mut filtered = String::new();
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
// Use session.bot_id instead of Uuid::nil() to avoid using default bot settings // Use session.bot_id instead of Uuid::nil() to avoid using default bot settings
let model = config_manager let model = config_manager

View file

@ -36,7 +36,7 @@ pub async fn ensure_llama_servers_running(
Ok(crate::core::bot::get_default_bot(&mut conn)) Ok(crate::core::bot::get_default_bot(&mut conn))
}) })
.await??; .await??;
let config_manager = ConfigManager::new(app_state.conn.clone()); let config_manager = ConfigManager::new(app_state.conn.clone().into());
info!("Reading config for bot_id: {}", default_bot_id); info!("Reading config for bot_id: {}", default_bot_id);
let embedding_model_result = config_manager.get_config(&default_bot_id, "embedding-model", None); let embedding_model_result = config_manager.get_config(&default_bot_id, "embedding-model", None);
info!("embedding-model config result: {:?}", embedding_model_result); info!("embedding-model config result: {:?}", embedding_model_result);
@ -388,7 +388,7 @@ pub fn start_llm_server(
std::env::set_var("OMP_PLACES", "cores"); std::env::set_var("OMP_PLACES", "cores");
std::env::set_var("OMP_PROC_BIND", "close"); std::env::set_var("OMP_PROC_BIND", "close");
let conn = app_state.conn.clone(); let conn = app_state.conn.clone();
let config_manager = ConfigManager::new(conn.clone()); let config_manager = ConfigManager::new(conn.clone().into());
let mut conn = conn.get().map_err(|e| { let mut conn = conn.get().map_err(|e| {
Box::new(std::io::Error::other( Box::new(std::io::Error::other(
format!("failed to get db connection: {e}"), format!("failed to get db connection: {e}"),

View file

@ -161,7 +161,7 @@ pub async fn enhanced_llm_call(
.await?; .await?;
// Get actual LLM configuration from bot's config // Get actual LLM configuration from bot's config
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let actual_model = config_manager let actual_model = config_manager
.get_config(&uuid::Uuid::nil(), "llm-model", None) .get_config(&uuid::Uuid::nil(), "llm-model", None)
.unwrap_or_else(|_| model.clone()); .unwrap_or_else(|_| model.clone());

View file

@ -6,13 +6,13 @@ pub mod main_module;
// Re-export commonly used items from main_module // Re-export commonly used items from main_module
pub use main_module::{BootstrapProgress, health_check, health_check_simple, receive_client_errors}; pub use main_module::{BootstrapProgress, health_check, health_check_simple, receive_client_errors};
// Use jemalloc as the global allocator when the feature is enabled // Use mimalloc as the global allocator when the feature is enabled (replaced tikv-jemalloc due to RUSTSEC-2024-0436)
#[cfg(feature = "jemalloc")] #[cfg(feature = "jemalloc")]
use tikv_jemallocator::Jemalloc; use mimalloc::MiMalloc;
#[cfg(feature = "jemalloc")] #[cfg(feature = "jemalloc")]
#[global_allocator] #[global_allocator]
static GLOBAL: Jemalloc = Jemalloc; static GLOBAL: MiMalloc = MiMalloc;
// Module declarations for feature-gated modules // Module declarations for feature-gated modules
#[cfg(feature = "analytics")] #[cfg(feature = "analytics")]
@ -231,8 +231,7 @@ async fn main() -> std::io::Result<()> {
let noise_filters = "vaultrs=off,rustify=off,rustify_derive=off,\ let noise_filters = "vaultrs=off,rustify=off,rustify_derive=off,\
aws_sigv4=off,aws_smithy_checksums=off,aws_runtime=off,aws_smithy_http_client=off,\ aws_sigv4=off,aws_smithy_checksums=off,aws_runtime=off,aws_smithy_http_client=off,\
aws_smithy_runtime=off,aws_smithy_runtime_api=off,aws_sdk_s3=off,aws_config=off,\ aws_smithy_runtime=off,aws_smithy_runtime_api=off,aws_credential_types=off,aws_http=off,aws_sig_auth=off,aws_types=off,\
aws_credential_types=off,aws_http=off,aws_sig_auth=off,aws_types=off,\
mio=off,tokio=off,tokio_util=off,tower=off,tower_http=off,\ mio=off,tokio=off,tokio_util=off,tower=off,tower_http=off,\
tokio_tungstenite=off,tungstenite=off,\ tokio_tungstenite=off,tungstenite=off,\
reqwest=off,hyper=off,hyper_util=off,h2=off,\ reqwest=off,hyper=off,hyper_util=off,h2=off,\

View file

@ -435,7 +435,7 @@ pub async fn create_app_state(
#[cfg(feature = "directory")] #[cfg(feature = "directory")]
bootstrap_directory_admin(&zitadel_config).await; bootstrap_directory_admin(&zitadel_config).await;
let config_manager = ConfigManager::new(pool.clone()); let config_manager = ConfigManager::new(pool.clone().into());
let mut bot_conn = pool let mut bot_conn = pool
.get() .get()
@ -928,10 +928,10 @@ pub async fn start_background_services(
// Step 1: Discover bots from S3 buckets (*.gbai) and auto-create missing // Step 1: Discover bots from S3 buckets (*.gbai) and auto-create missing
if let Some(s3_client) = &state_for_scan.drive { if let Some(s3_client) = &state_for_scan.drive {
match s3_client.list_buckets().send().await { match s3_client.list_all_buckets().await {
Ok(result) => { Ok(buckets) => {
for bucket in result.buckets().iter().filter_map(|b| b.name()) { for bucket in buckets {
let name = bucket.to_string(); let name = bucket;
if !name.ends_with(".gbai") { if !name.ends_with(".gbai") {
continue; continue;
} }

View file

@ -1,8 +1,7 @@
//! Drive-related utilities //! Drive-related utilities
#[cfg(feature = "drive")] #[cfg(feature = "drive")]
pub async fn ensure_vendor_files_in_minio(drive: &aws_sdk_s3::Client) { pub async fn ensure_vendor_files_in_minio(drive: &crate::drive::s3_repository::S3Repository) {
use aws_sdk_s3::primitives::ByteStream;
use log::{info, warn}; use log::{info, warn};
let htmx_paths = [ let htmx_paths = [
@ -24,7 +23,7 @@ pub async fn ensure_vendor_files_in_minio(drive: &aws_sdk_s3::Client) {
.put_object() .put_object()
.bucket(bucket) .bucket(bucket)
.key(key) .key(key)
.body(ByteStream::from(content)) .body(content.clone())
.content_type("application/javascript") .content_type("application/javascript")
.send() .send()
.await .await

View file

@ -158,7 +158,7 @@ struct ContactInfo {
} }
async fn get_llm_config(state: &Arc<AppState>, bot_id: Uuid) -> Result<(String, String, String), String> { async fn get_llm_config(state: &Arc<AppState>, bot_id: Uuid) -> Result<(String, String, String), String> {
let config = ConfigManager::new(state.conn.clone()); let config = ConfigManager::new(state.conn.clone().into());
let llm_url = config let llm_url = config
.get_config(&bot_id, "llm-url", Some("")) .get_config(&bot_id, "llm-url", Some(""))

View file

@ -95,7 +95,7 @@ pub async fn send_campaign_email(
let open_token = Uuid::new_v4(); let open_token = Uuid::new_v4();
let tracking_id = Uuid::new_v4(); let tracking_id = Uuid::new_v4();
let config = ConfigManager::new(state.conn.clone()); let config = ConfigManager::new(state.conn.clone().into());
let base_url = config let base_url = config
.get_config(&bot_id, "server-url", Some("")) .get_config(&bot_id, "server-url", Some(""))
.unwrap_or_else(|_| "".to_string()); .unwrap_or_else(|_| "".to_string());

View file

@ -244,7 +244,7 @@ impl BotModelsClient {
} }
pub fn from_state(state: &AppState, bot_id: &Uuid) -> Self { pub fn from_state(state: &AppState, bot_id: &Uuid) -> Self {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let config = BotModelsConfig::from_database(&config_manager, bot_id); let config = BotModelsConfig::from_database(&config_manager, bot_id);
let image_config = ImageGeneratorConfig::from_database(&config_manager, bot_id); let image_config = ImageGeneratorConfig::from_database(&config_manager, bot_id);
let video_config = VideoGeneratorConfig::from_database(&config_manager, bot_id); let video_config = VideoGeneratorConfig::from_database(&config_manager, bot_id);
@ -630,7 +630,7 @@ pub async fn ensure_botmodels_running(
}) })
.await?; .await?;
let config_manager = ConfigManager::new(app_state.conn.clone()); let config_manager = ConfigManager::new(app_state.conn.clone().into());
BotModelsConfig::from_database(&config_manager, &default_bot_id) BotModelsConfig::from_database(&config_manager, &default_bot_id)
}; };

View file

@ -1,4 +1,4 @@
use aws_sdk_s3::primitives::ByteStream; use crate::drive::s3_repository::S3Repository;
use crate::core::shared::state::AppState; use crate::core::shared::state::AppState;
use crate::core::urls::ApiUrls; use crate::core::urls::ApiUrls;
use axum::{ use axum::{
@ -71,12 +71,12 @@ pub async fn handle_export_md(
if let Some(s3_client) = state.drive.as_ref() { if let Some(s3_client) = state.drive.as_ref() {
let _ = s3_client let _ = s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&export_path) &export_path,
.body(ByteStream::from(doc.content.into_bytes())) doc.content.into_bytes(),
.content_type("text/markdown") Some("text/markdown"),
.send() )
.await; .await;
} }
@ -117,12 +117,12 @@ pub async fn handle_export_html(
if let Some(s3_client) = state.drive.as_ref() { if let Some(s3_client) = state.drive.as_ref() {
let _ = s3_client let _ = s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&export_path) &export_path,
.body(ByteStream::from(html_content.into_bytes())) html_content.into_bytes(),
.content_type("text/html") Some("text/html"),
.send() )
.await; .await;
} }
@ -159,12 +159,12 @@ pub async fn handle_export_txt(
if let Some(s3_client) = state.drive.as_ref() { if let Some(s3_client) = state.drive.as_ref() {
let _ = s3_client let _ = s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&export_path) &export_path,
.body(ByteStream::from(plain_text.into_bytes())) plain_text.into_bytes(),
.content_type("text/plain") Some("text/plain"),
.send() )
.await; .await;
} }

View file

@ -20,7 +20,7 @@ pub async fn call_llm(
&[("user".to_string(), user_content.to_string())], &[("user".to_string(), user_content.to_string())],
); );
let config_manager = crate::core::config::ConfigManager::new(state.conn.clone()); let config_manager = crate::core::config::ConfigManager::new(state.conn.clone().into());
let model = config_manager let model = config_manager
.get_config(&uuid::Uuid::nil(), "llm-model", None) .get_config(&uuid::Uuid::nil(), "llm-model", None)
.unwrap_or_else(|_| "gpt-3.5-turbo".to_string()); .unwrap_or_else(|_| "gpt-3.5-turbo".to_string());

View file

@ -1,4 +1,4 @@
use aws_sdk_s3::primitives::ByteStream; use crate::drive::s3_repository::S3Repository;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use std::sync::Arc; use std::sync::Arc;
@ -48,12 +48,12 @@ pub async fn save_document_to_drive(
}; };
s3_client s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&doc_path) &doc_path,
.body(ByteStream::from(content.as_bytes().to_vec())) content.as_bytes().to_vec(),
.content_type("text/markdown") Some("text/markdown"),
.send() )
.await .await
.map_err(|e| format!("Failed to save document: {}", e))?; .map_err(|e| format!("Failed to save document: {}", e))?;
@ -67,12 +67,12 @@ pub async fn save_document_to_drive(
}); });
s3_client s3_client
.put_object() .put_object(
.bucket(&state.bucket_name) &state.bucket_name,
.key(&meta_path) &meta_path,
.body(ByteStream::from(metadata.to_string().into_bytes())) metadata.to_string().into_bytes(),
.content_type("application/json") Some("application/json"),
.send() )
.await .await
.map_err(|e| format!("Failed to save metadata: {}", e))?; .map_err(|e| format!("Failed to save metadata: {}", e))?;
} }
@ -91,20 +91,11 @@ pub async fn load_document_from_drive(
let current_path = format!("{}/current/{}.md", base_path, doc_id); let current_path = format!("{}/current/{}.md", base_path, doc_id);
if let Ok(result) = s3_client if let Ok(bytes) = s3_client
.get_object() .get_object(&state.bucket_name, &current_path)
.bucket(&state.bucket_name)
.key(&current_path)
.send()
.await .await
{ {
let bytes = result let content = String::from_utf8(bytes).map_err(|e| e.to_string())?;
.body
.collect()
.await
.map_err(|e| e.to_string())?
.into_bytes();
let content = String::from_utf8(bytes.to_vec()).map_err(|e| e.to_string())?;
let title = content let title = content
.lines() .lines()

View file

@ -154,7 +154,7 @@ impl TaskScheduler {
s3.put_object() s3.put_object()
.bucket("backups") .bucket("backups")
.key(format!("db/{}.sql", timestamp)) .key(format!("db/{}.sql", timestamp))
.body(aws_sdk_s3::primitives::ByteStream::from(body)) .body(Vec<u8>::from(body))
.send() .send()
.await?; .await?;
} }
@ -239,7 +239,7 @@ impl TaskScheduler {
} }
if let Some(s3) = &state.drive { if let Some(s3) = &state.drive {
let s3_clone: aws_sdk_s3::Client = (*s3).clone(); let s3_clone: S3Repository = (*s3).clone();
let s3_ok = s3_clone.list_buckets().send().await.is_ok(); let s3_ok = s3_clone.list_buckets().send().await.is_ok();
health["storage"] = serde_json::json!(s3_ok); health["storage"] = serde_json::json!(s3_ok);
} }

View file

@ -30,7 +30,7 @@
pub mod bm25_config; pub mod bm25_config;
pub mod hybrid_search; pub mod hybrid_search;
pub mod vectordb_indexer; pub mod vectordb_indexer;
pub mod qdrant_native;
pub use bm25_config::{is_stopword, Bm25Config, DEFAULT_STOPWORDS}; pub use bm25_config::{is_stopword, Bm25Config, DEFAULT_STOPWORDS};

View file

@ -0,0 +1,558 @@
/// Qdrant HTTP Client - Native implementation without qdrant-client crate
/// Uses reqwest for HTTP communication with Qdrant REST API
use anyhow::{Result, Context};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use reqwest::Client;
use log::{debug, error, info};
/// Qdrant client using native HTTP (old name for backward compatibility)
pub type Qdrant = QdrantClient;
/// Qdrant client using native HTTP
#[derive(Clone)]
pub struct QdrantClient {
client: Client,
url: String,
}
/// Builder trait for Qdrant client
pub trait Build {
fn build(self) -> Result<QdrantClient>;
}
/// URL builder for Qdrant
pub struct QdrantFromUrl {
url: String,
}
impl QdrantFromUrl {
pub fn build(self) -> Result<QdrantClient> {
Ok(QdrantClient::new(&self.url))
}
}
impl QdrantClient {
/// Create new Qdrant client from URL (returns a builder)
pub fn from_url(url: &str) -> QdrantFromUrl {
QdrantFromUrl { url: url.to_string() }
}
/// Create new Qdrant client directly from URL
pub fn new(url: &str) -> Self {
let client = Client::builder()
.build()
.expect("Failed to create HTTP client");
Self {
client,
url: url.trim_end_matches('/').to_string(),
}
}
/// Get full URL for collections endpoint
fn collections_url(&self) -> String {
format!("{}/collections", self.url)
}
/// Create collection
pub async fn create_collection(
&self,
name: &str,
vector_size: u64,
distance: &str,
) -> Result<()> {
let url = self.collections_url();
debug!("Creating collection: {} at {}", name, url);
let response = self
.client
.post(&url)
.json(&json!({
"name": name,
"vectors": {
"size": vector_size,
"distance": distance
}
}))
.send()
.await
.context("Failed to send create collection request")?;
let status = response.status();
let text = response.text().await.unwrap_or_default();
if status.is_success() {
info!("Collection '{}' created successfully", name);
Ok(())
} else {
error!("Failed to create collection '{}': {} - {}", name, status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// List all collections
pub async fn list_collections(&self) -> Result<CollectionsResponse> {
let url = self.collections_url();
debug!("Listing collections at {}", url);
let response = self
.client
.get(&url)
.send()
.await
.context("Failed to send list collections request")?;
let status = response.status();
if status.is_success() {
let result: CollectionsResponse = response.json().await
.context("Failed to parse collections response")?;
debug!("Found {} collections", result.collections.len());
Ok(result)
} else {
let text = response.text().await.unwrap_or_default();
error!("Failed to list collections: {} - {}", status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// Check if collection exists
pub async fn collection_exists(&self, name: &str) -> Result<bool> {
let url = format!("{}/{}", self.collections_url(), name);
debug!("Checking collection: {} at {}", name, url);
let response = self
.client
.get(&url)
.send()
.await
.context("Failed to send collection exists request")?;
Ok(response.status().is_success())
}
/// Get collection info
pub async fn collection_info(&self, name: &str) -> Result<CollectionInfoResponse> {
let url = format!("{}/{}", self.collections_url(), name);
debug!("Getting collection info: {} at {}", name, url);
let response = self
.client
.get(&url)
.send()
.await
.context("Failed to send collection info request")?;
let status = response.status();
if status.is_success() {
let result: CollectionInfoResponse = response.json().await
.context("Failed to parse collection info response")?;
Ok(result)
} else {
let text = response.text().await.unwrap_or_default();
error!("Failed to get collection info: {} - {}", status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// Delete collection
pub async fn delete_collection(&self, name: &str) -> Result<()> {
let url = format!("{}/{}", self.collections_url(), name);
debug!("Deleting collection: {} at {}", name, url);
let response = self
.client
.delete(&url)
.send()
.await
.context("Failed to send delete collection request")?;
let status = response.status();
let text = response.text().await.unwrap_or_default();
if status.is_success() {
info!("Collection '{}' deleted successfully", name);
Ok(())
} else {
error!("Failed to delete collection '{}': {} - {}", name, status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// Upsert points into collection
pub async fn upsert_points(
&self,
collection_name: &str,
points: Vec<Value>,
) -> Result<()> {
let url = format!("{}/{}/upsert", self.collections_url(), collection_name);
let body = json!({
"points": points
});
debug!("Upserting {} points to {}", points.len(), collection_name);
let response = self
.client
.put(&url)
.json(&body)
.send()
.await
.context("Failed to send upsert request")?;
let status = response.status();
let text = response.text().await.unwrap_or_default();
if status.is_success() {
debug!("Successfully upserted {} points", points.len());
Ok(())
} else {
error!("Failed to upsert points: {} - {}", status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// Search points in collection
pub async fn search_points(
&self,
collection_name: &str,
vector: &[f32],
limit: usize,
filter: Option<Value>,
) -> Result<Vec<Value>> {
let url = format!("{}/{}/search", self.collections_url(), collection_name);
let mut body = json!({
"vector": vector,
"limit": limit
});
if let Some(f) = filter {
body["filter"] = f;
}
debug!("Searching {} points in {}", limit, collection_name);
let response = self
.client
.post(&url)
.json(&body)
.send()
.await
.context("Failed to send search request")?;
let status = response.status();
if status.is_success() {
let result: Value = response.json().await.context("Failed to parse search response")?;
let points = result["result"]
.as_array()
.map(|arr| arr.clone())
.unwrap_or_default();
debug!("Found {} search results", points.len());
Ok(points)
} else {
let text = response.text().await.unwrap_or_default();
error!("Failed to search points: {} - {}", status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
/// Delete points from collection
pub async fn delete_points(
&self,
collection_name: &str,
point_ids: Vec<String>,
) -> Result<()> {
let url = format!("{}/{}/delete", self.collections_url(), collection_name);
let body = json!({
"points": point_ids
});
debug!("Deleting {} points from {}", point_ids.len(), collection_name);
let response = self
.client
.post(&url)
.json(&body)
.send()
.await
.context("Failed to send delete request")?;
let status = response.status();
let text = response.text().await.unwrap_or_default();
if status.is_success() {
info!("Successfully deleted {} points", point_ids.len());
Ok(())
} else {
error!("Failed to delete points: {} - {}", status, text);
Err(anyhow::anyhow!("HTTP {}: {}", status, text))
}
}
}
/// Response for list collections
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CollectionsResponse {
pub collections: Vec<CollectionInfo>,
}
/// Collection info
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CollectionInfo {
pub name: String,
}
/// Collection info response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CollectionInfoResponse {
#[serde(default)]
pub points_count: Option<u64>,
}
/// Builder for creating collections
pub struct CreateCollectionBuilder {
name: String,
vector_size: u64,
distance: String,
}
impl CreateCollectionBuilder {
pub fn new(name: &str) -> Self {
Self {
name: name.to_string(),
vector_size: 0,
distance: "Cosine".to_string(),
}
}
pub fn vector(mut self, params: VectorParamsBuilder) -> Self {
self.vector_size = params.size;
self.distance = params.distance;
self
}
pub fn vectors_config(mut self, params: VectorParams) -> Self {
self.vector_size = params.size;
self.distance = format!("{:?}", params.distance);
self
}
pub async fn build(self, client: &QdrantClient) -> Result<()> {
client
.create_collection(&self.name, self.vector_size, &self.distance)
.await
}
}
/// Builder for vector parameters
pub struct VectorParamsBuilder {
size: u64,
distance: String,
}
impl VectorParamsBuilder {
pub fn new(size: u64, distance: Distance) -> Self {
Self {
size,
distance: format!("{:?}", distance),
}
}
}
/// Distance metrics
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
pub enum Distance {
#[default]
Cosine,
Euclid,
Dot,
Manhattan,
}
impl std::fmt::Display for Distance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Distance::Cosine => write!(f, "Cosine"),
Distance::Euclid => write!(f, "Euclid"),
Distance::Dot => write!(f, "Dot"),
Distance::Manhattan => write!(f, "Manhattan"),
}
}
}
/// Point structure for Qdrant
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointStruct {
pub id: String,
pub vector: Vec<f32>,
pub payload: serde_json::Map<String, Value>,
}
impl PointStruct {
pub fn new(id: String, vector: Vec<f32>, payload: serde_json::Map<String, Value>) -> Self {
Self { id, vector, payload }
}
}
/// Filter for search
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Filter {
pub must: Vec<Condition>,
}
impl Filter {
pub fn must(conditions: Vec<Condition>) -> Self {
Self { must: conditions }
}
}
/// Search condition
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Condition {
#[serde(flatten)]
pub condition_type: ConditionType,
}
impl Condition {
pub fn matches(field: &str, value: Value) -> Self {
Self {
condition_type: ConditionType::Field {
key: field.to_string(),
value,
},
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ConditionType {
Field { key: String, value: Value },
}
/// Builder for upsert points
pub struct UpsertPointsBuilder {
collection_name: String,
points: Vec<Value>,
}
impl UpsertPointsBuilder {
pub fn new(collection_name: &str, points: Vec<PointStruct>) -> Self {
let points: Vec<Value> = points
.into_iter()
.map(|p| {
json!({
"id": p.id,
"vector": p.vector,
"payload": p.payload
})
})
.collect();
Self {
collection_name: collection_name.to_string(),
points,
}
}
pub async fn build(self, client: &QdrantClient) -> Result<()> {
client.upsert_points(&self.collection_name, self.points).await
}
}
/// Builder for search points
pub struct SearchPointsBuilder {
collection_name: String,
vector: Vec<f32>,
limit: usize,
filter: Option<Value>,
}
impl SearchPointsBuilder {
pub fn new(collection_name: &str, vector: Vec<f32>, limit: usize) -> Self {
Self {
collection_name: collection_name.to_string(),
vector,
limit,
filter: None,
}
}
pub fn filter(mut self, filter: Option<Filter>) -> Self {
self.filter = filter.map(|f| json!(f));
self
}
pub fn with_payload(self, _with: bool) -> Self {
self
}
pub async fn build(self, client: &QdrantClient) -> Result<SearchResponse> {
let points = client
.search_points(&self.collection_name, &self.vector, self.limit, self.filter)
.await?;
Ok(SearchResponse {
result: points,
})
}
}
/// Search response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchResponse {
pub result: Vec<Value>,
}
/// Payload type alias
pub type Payload = serde_json::Map<String, serde_json::Value>;
/// Point ID
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PointId {
pub id: String,
}
impl PointId {
pub fn from(id: String) -> Self {
Self { id }
}
}
/// Vector parameters
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct VectorParams {
pub size: u64,
pub distance: Distance,
}
/// Delete points builder
pub struct DeletePointsBuilder {
collection_name: String,
points: Vec<PointId>,
}
impl DeletePointsBuilder {
pub fn new(collection_name: &str) -> Self {
Self {
collection_name: collection_name.to_string(),
points: Vec::new(),
}
}
pub fn points(mut self, point_ids: Vec<PointId>) -> Self {
self.points = point_ids;
self
}
pub async fn build(self, client: &QdrantClient) -> Result<()> {
let point_ids: Vec<String> = self.points.into_iter().map(|p| p.id).collect();
client.delete_points(&self.collection_name, point_ids).await
}
}

View file

@ -384,7 +384,7 @@ impl VideoRenderWorker {
output_url.trim_start_matches("/video/exports/") output_url.trim_start_matches("/video/exports/")
); );
if std::env::var("S3_ENDPOINT").is_ok() { if let Ok(endpoint) = std::env::var("S3_ENDPOINT") {
let bot = bot_name.unwrap_or("default"); let bot = bot_name.unwrap_or("default");
let bucket = format!("{bot}.gbai"); let bucket = format!("{bot}.gbai");
let key = format!("{bot}.gbdrive/{gbdrive_path}"); let key = format!("{bot}.gbdrive/{gbdrive_path}");
@ -393,16 +393,18 @@ impl VideoRenderWorker {
let file_data = std::fs::read(&source_path)?; let file_data = std::fs::read(&source_path)?;
let s3_config = aws_config::defaults(aws_config::BehaviorVersion::latest()).load().await; let access_key = std::env::var("S3_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string());
let s3_client = aws_sdk_s3::Client::new(&s3_config); let secret_key = std::env::var("S3_SECRET_KEY").unwrap_or_else(|_| "minioadmin".to_string());
let s3_client = crate::drive::s3_repository::S3Repository::new(
&endpoint,
&access_key,
&secret_key,
&bucket,
).map_err(|e| format!("Failed to create S3 client: {e}"))?;
s3_client s3_client
.put_object() .put_object(&bucket, &key, file_data, Some(&format!("video/{format}")))
.bucket(&bucket)
.key(&key)
.content_type(format!("video/{format}"))
.body(file_data.into())
.send()
.await .await
.map_err(|e| format!("S3 upload failed: {e}"))?; .map_err(|e| format!("S3 upload failed: {e}"))?;

View file

@ -1271,7 +1271,7 @@ async fn route_to_bot(
let state_for_voice = state_clone.clone(); let state_for_voice = state_clone.clone();
let phone_for_voice = phone.clone(); let phone_for_voice = phone.clone();
let config_manager = ConfigManager::new(state_for_voice.conn.clone()); let config_manager = ConfigManager::new(state_for_voice.conn.clone().into());
let voice_response = config_manager let voice_response = config_manager
.get_config(&bot_id_for_voice, "whatsapp-voice-response", Some("false")) .get_config(&bot_id_for_voice, "whatsapp-voice-response", Some("false"))
.unwrap_or_else(|_| "false".to_string()) .unwrap_or_else(|_| "false".to_string())
@ -1653,7 +1653,7 @@ pub async fn attendant_respond(
} }
async fn get_verify_token_for_bot(state: &Arc<AppState>, bot_id: &Uuid) -> String { async fn get_verify_token_for_bot(state: &Arc<AppState>, bot_id: &Uuid) -> String {
let config_manager = ConfigManager::new(state.conn.clone()); let config_manager = ConfigManager::new(state.conn.clone().into());
let bot_id_clone = *bot_id; let bot_id_clone = *bot_id;
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {

View file

@ -0,0 +1,14 @@
// Test fixtures data module
// Placeholder for test data
pub fn sample_bot_id() -> uuid::Uuid {
uuid::uuid!("00000000-0000-0000-0000-000000000001")
}
pub fn sample_user_id() -> uuid::Uuid {
uuid::uuid!("00000000-0000-0000-0000-000000000002")
}
pub fn sample_session_id() -> uuid::Uuid {
uuid::uuid!("00000000-0000-0000-0000-000000000003")
}