Update root: cleanup, botserver and botmodels updates

This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2026-04-07 13:34:11 -03:00
parent a8ae578a80
commit 49bc6e52bc
12 changed files with 266 additions and 973 deletions

2
.gitignore vendored
View file

@ -54,6 +54,8 @@ node_modules/
/playwright/.cache/
/playwright/.auth/
.playwright*
.ruff_cache
.opencode
config/directory_config.json
# CI cache bust: Fri Feb 13 22:33:51 UTC 2026

View file

@ -1,429 +0,0 @@
# Folders.md: Sistema de Permissões de Pastas (Estilo Windows ACL)
## Visão Geral
Implementar controle de acesso a pastas baseado em grupos RBAC, permitindo que `USE KB` inclua seletivamente pastas conforme os grupos do usuário.
## Arquitetura Atual
### Já Existe ✅
| Componente | Arquivo | Estado |
|------------|---------|--------|
| `KbPermissions` | `core/kb/permissions.rs` | Completo - `AccessLevel::GroupBased`, `FolderPermission` |
| `UserContext` | `core/kb/permissions.rs` | Tem `groups: Vec<String>` |
| `build_qdrant_permission_filter()` | `core/kb/permissions.rs` | Gera filtros Qdrant por grupo |
| `rbac_groups` | Schema core.rs | Tabela existe |
| `rbac_user_groups` | Schema core.rs | Tabela existe (user → group) |
| `file_shares` | migrations drive | Tem `shared_with_group` |
### Falta Integrar ❌
| Componente | Descrição |
|------------|-----------|
| `folder_group_access` | Tabela para link pasta → grupo |
| `UserContext.groups` | Popular grupos do BD na sessão |
| `USE KB` permission check | Verificar grupos antes de adicionar |
| UI Admin | Atribuir grupos a pastas |
| `USE FOLDER` keyword | BASIC keyword para pastas |
---
## Estrutura de Permissões (Windows-style)
```
Organização
├── Gestores (grupo RBAC)
│ ├── Pasta: /relatorios/financeiros
│ │ └── Permissão: Gestores (ler/escrever)
│ ├── Pasta: /strategic
│ │ └── Permissão: Gestores (ler)
│ └── Pasta: /publico
│ └── Permissão: Todos (ler)
├── RH (grupo RBAC)
│ ├── Pasta: /rh/documentos
│ │ └── Permissão: RH (ler/escrever)
│ └── Pasta: /relatorios/financeiros
│ └── Permissão: RH (ler)
└── Todos (grupo implícito)
├── Pasta: /publico
│ └── Permissão: Todos (ler)
└── Pasta: /intranet
└── Permissão: Autenticados (ler)
```
---
## Plano de Implementação
### Fase 1: Database (Migration)
**Arquivo:** `botserver/migrations/6.2.0-02-folder-access/up.sql`
```sql
-- Tabela principal: pasta ↔ grupo
CREATE TABLE folder_group_access (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
folder_path TEXT NOT NULL, -- Ex: "work/bot1/pasta-protegida"
group_id UUID NOT NULL REFERENCES rbac_groups(id) ON DELETE CASCADE,
permission_level TEXT NOT NULL DEFAULT 'read', -- read|write|admin
created_by UUID REFERENCES users(id),
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(folder_path, group_id)
);
-- Índice para busca rápida
CREATE INDEX idx_folder_group_access_path ON folder_group_access(folder_path);
CREATE INDEX idx_folder_group_access_group ON folder_group_access(group_id);
-- Adicionar coluna de permissões em kb_collections
ALTER TABLE kb_collections
ADD COLUMN IF NOT EXISTS access_level TEXT DEFAULT 'authenticated';
COMMENT ON TABLE folder_group_access IS 'Windows-style ACL: pasta ↔ grupo RBAC';
```
---
### Fase 2: Schema Diesel
**Arquivo:** `botserver/src/core/shared/schema/research.rs`
```rust
diesel::table! {
folder_group_access (id) {
id -> Uuid,
folder_path -> Text,
group_id -> Uuid,
permission_level -> Varchar,
created_by -> Nullable<Uuid>,
created_at -> Timestamptz,
}
}
diesel::joinable!(folder_group_access -> rbac_groups (group_id));
// Adicionar em kb_collections:
access_level -> Varchar, // all|authenticated|role_based|group_based
```
---
### Fase 3: Modelos Rust
**Arquivo:** `botserver/src/core/kb/models.rs` (novo)
```rust
#[derive(Debug, Clone, Queryable, Selectable)]
#[diesel(table_name = folder_group_access)]
pub struct FolderGroupAccess {
pub id: Uuid,
pub folder_path: String,
pub group_id: Uuid,
pub permission_level: String,
pub created_by: Option<Uuid>,
pub created_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Insertable)]
#[diesel(table_name = folder_group_access)]
pub struct NewFolderGroupAccess {
pub folder_path: String,
pub group_id: Uuid,
pub permission_level: String,
pub created_by: Option<Uuid>,
}
```
---
### Fase 4: Carregar Grupos do Usuário
**Arquivo:** `botserver/src/core/shared/state.rs`
Modificar `AppState` ou `UserContext` para popular grupos:
```rust
// Nova função em core/kb/permissions.rs
pub async fn load_user_groups(
db_pool: &DbPool,
user_id: Uuid,
) -> Result<Vec<String>, String> {
use crate::core::shared::schema::core::rbac_groups::dsl::*;
use crate::core::shared::schema::core::rbac_user_groups::dsl::*;
let mut conn = db_pool.get().map_err(|e| e.to_string())?;
let group_names: Vec<String> = rbac_user_groups
.inner_join(rbac_groups)
.filter(user_id.eq(user_id))
.select(name)
.load(&mut conn)
.map_err(|e| e.to_string())?;
Ok(group_names)
}
// Em UserContext, adicionar método:
impl UserContext {
pub async fn with_db_groups(mut self, db_pool: &DbPool) -> Result<Self, String> {
let groups = load_user_groups(db_pool, self.user_id).await?;
self.groups = groups;
Ok(self)
}
}
```
---
### Fase 5: Modificar USE KB
**Arquivo:** `botserver/src/basic/keywords/use_kb.rs`
```rust
use crate::core::kb::permissions::{KbPermissionParser, FolderPermission, AccessLevel};
fn add_kb_to_session(
conn_pool: DbPool,
session_id: Uuid,
bot_id: Uuid,
user_id: Uuid, // Adicionar
kb_name: &str,
) -> Result<(), String> {
// ... código existente ...
// NOVO: Verificar permissões de grupo
let user_groups = load_user_groups(&conn_pool, user_id)?;
let has_access = check_folder_group_access(
&conn_pool,
&kb_folder_path,
&user_groups,
)?;
if !has_access {
return Err(format!(
"Acesso negado: KB '{}' requer grupo específico",
kb_name
));
}
// ... resto do código ...
}
fn check_folder_group_access(
conn_pool: &DbPool,
folder_path: &str,
user_groups: &[String],
) -> Result<bool, String> {
// Buscar grupos associados à pasta
// Se pasta é "pública" (sem grupos) → permitir
// Se usuário está em algum grupo da pasta → permitir
// Caso contrário → negar
}
```
---
### Fase 6: Modificar THINK KB (Filtro Qdrant)
**Arquivo:** `botserver/src/basic/keywords/think_kb.rs`
```rust
use crate::core::kb::permissions::build_qdrant_permission_filter;
async fn think_kb_search(
// ... parâmetros ...
user_id: Uuid,
) -> Result<Value, String> {
// Carregar contexto do usuário com grupos
let user_groups = load_user_groups(&db_pool, user_id).await?;
let user_context = UserContext::authenticated(
user_id,
Some(email),
org_id,
).with_groups(user_groups);
// Filtrar resultados do Qdrant com base nos grupos
let qdrant_filter = build_qdrant_permission_filter(&user_context);
// Buscar no Qdrant com filtro
// ...
}
```
---
### Fase 7: Novo Keyword USE FOLDER
**Arquivo:** `botserver/src/basic/keywords/use_folder.rs` (novo)
```rust
// USE FOLDER "caminho/da/pasta" [READ|WRITE|ADMIN]
engine.register_custom_syntax(
["USE", "FOLDER", "$expr$", "($expr$)", "($expr$)", "($expr$)"],
true,
move |context, inputs| {
let folder_path = context.eval_expression_tree(&inputs[0])?.to_string();
// Verificar acesso, adicionar à sessão
},
);
```
---
### Fase 8: API Endpoints
**Arquivo:** `botserver/src/api/routes/rbac.rs`
```rust
// GET /api/rbac/folders/{path}/groups
// Lista grupos com acesso a uma pasta
async fn get_folder_groups(
Path(folder_path): Path<String>,
State(state): State<ApiState>,
) -> Result<Json<Vec<GroupInfo>>, AppError> {
// Query folder_group_access
}
// POST /api/rbac/folders/{path}/groups/{group_id}
async fn add_folder_group(
Path((folder_path, group_id)): Path<(String, Uuid)>,
Json(payload): Json<FolderAccessPayload>,
) -> Result<Json<FolderGroupAccess>, AppError> {
// INSERT folder_group_access
}
// DELETE /api/rbac/folders/{path}/groups/{group_id}
async fn remove_folder_group(
Path((folder_path, group_id)): Path<(String, Uuid)>,
) -> Result<StatusCode, AppError> {
// DELETE folder_group_access
}
// GET /api/rbac/users/{user_id}/accessible-folders
async fn get_user_accessible_folders(
// Lista pastas que o usuário pode acessar
)
```
---
### Fase 9: UI Admin
**Arquivo:** `botui/ui/suite/admin/groups.html`
Adicionar aba "Pastas" na visualização do grupo:
```html
<!-- Tab de Pastas -->
<div hx-get="/api/admin/groups/{group_id}/folders"
hx-target="#group-folders">
<button class="tab-btn">Pastas</button>
</div>
<div id="group-folders" class="tab-content">
<!-- Lista de pastas com acesso -->
<!-- Botão: Adicionar pasta -->
</div>
```
**Arquivo:** `botui/ui/suite/drive/drive.html`
Mostrar cadeado nas pastas protegidas:
```html
<i class="fa-solid fa-lock" title="Acesso restrito a grupos"></i>
```
---
## Fluxo Completo
```
1. Usuário executa: USE KB "relatorios-financeiros"
2. Sistema carrega:
- user_id da sessão
- grupos do usuário (rbac_user_groups → rbac_groups)
- grupos da pasta (folder_group_access)
3. Verificação:
- Se pasta não tem restrições (pública) → OK
- Se usuário está em algum grupo da pasta → OK
- Caso contrário → ERRO "Acesso negado"
4. Se OK:
- Adiciona KB em session_kb_associations
- THINK KB agora busca no Qdrant com filtro de grupos
5. THINK KB retorna:
- Apenas documentos de pastas que o usuário tem acesso
```
---
## Testes
```rust
#[test]
fn test_group_access_allowed() {
let groups = vec!["gestores".to_string()];
let folder_path = "work/bot/financeiro";
// Gestor tem acesso
assert!(check_folder_group_access(folder_path, &groups).unwrap());
}
#[test]
fn test_group_access_denied() {
let groups = vec!["rh".to_string()];
let folder_path = "work/bot/financeiro";
// RH não tem acesso a financeiro
assert!(!check_folder_group_access(folder_path, &groups).unwrap());
}
#[test]
fn test_public_folder_access() {
let groups = vec![];
let folder_path = "work/bot/publico";
// Pasta pública permite todos
assert!(check_folder_group_access(folder_path, &groups).unwrap());
}
```
---
## Prioridades de Implementação
| # | Tarefa | Prioridade | Complexidade |
|---|--------|------------|--------------|
| 1 | Migration folder_group_access | Alta | Baixa |
| 2 | Schema Diesel | Alta | Baixa |
| 3 | load_user_groups() | Alta | Média |
| 4 | check_folder_group_access() | Alta | Média |
| 5 | Modificar USE KB | Alta | Média |
| 6 | Modificar THINK KB (Qdrant filter) | Alta | Média |
| 7 | API endpoints | Média | Média |
| 8 | UI Admin | Média | Alta |
| 9 | USE FOLDER keyword | Baixa | Média |
---
## Arquivos a Modificar
| Arquivo | Ação |
|---------|------|
| `migrations/6.2.0-02-folder-access/up.sql` | Criar |
| `migrations/6.2.0-02-folder-access/down.sql` | Criar |
| `src/core/shared/schema/research.rs` | Modificar |
| `src/core/kb/permissions.rs` | Modificar (load_user_groups) |
| `src/core/kb/models.rs` | Criar |
| `src/basic/keywords/use_kb.rs` | Modificar |
| `src/basic/keywords/think_kb.rs` | Modificar |
| `src/api/routes/rbac.rs` | Modificar |
| `botui/ui/suite/admin/groups.html` | Modificar |
| `botui/ui/suite/drive/drive.html` | Modificar |

View file

@ -132,6 +132,38 @@ PRINT "-- RESULTADO --"
- ❌ **NEVER** create `.md` docs without checking `botbook/` first
- ❌ **NEVER** hardcode credentials — use `generate_random_string()` or env vars
### Build Pattern (MANDATORY) - Fix Fast Loop
When building botserver, use this pattern to fix errors ASAP:
```bash
# Run cargo in background, kill at 20 lines, fix errors, loop
# IMPORTANT: Never use --all-features (pulls docs/slides dependencies)
cd /home/rodriguez/src/gb
cargo check -p botserver > /tmp/check.log 2>&1 &
CARGO_PID=$!
while kill -0 $CARGO_PID 2>/dev/null; do
LINES=$(wc -l < /tmp/check.log 2>/dev/null || echo 0)
if [ "$LINES" -gt 20 ]; then
kill $CARGO_PID 2>/dev/null
echo "=== Got $LINES lines, killing cargo ==="
break
fi
sleep 1
done
# Check for errors - use strings to handle binary output
if strings /tmp/check.log | grep -q "^error"; then
echo "❌ Errors found:"
strings /tmp/check.log | grep "^error" | head -20
# Fix errors, then re-run this pattern
else
echo "✅ No errors - build clean!"
fi
```
**Key Rule:** Kill cargo at 20 lines, fix errors immediately, loop until clean.
**Why:** Compiling takes 2-3+ minutes. Getting errors in 20s saves 10+ minutes per error.
### Security
- ❌ **NEVER** include sensitive data (IPs, tokens, keys) in docs or code
- ❌ **NEVER** write internal IPs to logs — mask them (e.g., "10.x.x.x")
@ -621,3 +653,58 @@ curl -s -X POST \
- **Admin credentials**: `admin` / `Admin123!` (human user)
- **Database**: `PROD-DIRECTORY` on tables container
- **Zitadel v4.13.1** is the current version
---
## 📊 SEPLAGSE Bot Configuration
### Bot Location
- **Source**: `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/`
- **Work**: `./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/`
### Key Files
| File | Purpose |
|------|---------|
| `start.bas` | Entry point with suggestion buttons |
| `detecta.bas` | Tool for detecting anomalies in folha_salarios |
| `init_folha.bas` | Tool to initialize test data (INSERT keyword has issues) |
| `tables.bas` | Table definitions - auto-processed on compile |
### Tool Button Configuration (start.bas)
```bas
ADD_SUGGESTION_TOOL "detecta" AS "🔍 Detectar Desvios na Folha"
ADD_SUGGESTION_TOOL "init_folha" AS "⚙️ Inicializar Dados de Teste"
```
### Detection Flow
1. User clicks "Detectar Desvios na Folha" button
2. Frontend sends `message_type: 6` (TOOL_EXEC) via WebSocket
3. Backend executes `detecta.bas` directly (skips KB/LLM)
4. `detecta.bas` calls `DETECT "folha_salarios"` keyword
5. Keyword queries bot-specific database for table data
6. Data sent to BotModels API at `/api/anomaly/detect`
7. Results displayed in chat
### Fixes Applied
1. **TOOL_EXEC message type**: Added `MessageType::TOOL_EXEC` (id=6)
2. **Frontend WebSocket**: Sends `message_type: 6` for tool buttons
3. **Backend handler**: `stream_response()` handles TOOL_EXEC directly
4. **DETECT keyword**: Fixed to use bot-specific database (`bot_database_manager`)
5. **Bot execution**: Tool buttons work - no "/tool" text shown
### Known Issues
- **INSERT keyword**: Has parsing issues in multi-line scripts
- **Test data**: `init_folha.bas` cannot insert data due to INSERT issues
- **Workaround**: Insert data manually or via external SQL tool
### Testing
```bash
# Restart services
./restart.sh
# Test in browser
http://localhost:3000/seplagse
# Check logs
tail -f botserver.log | grep -i "detecta\|error"
```

2
Cargo.lock generated
View file

@ -1275,7 +1275,7 @@ dependencies = [
[[package]]
name = "botapp"
version = "6.1.0"
version = "6.3.0"
dependencies = [
"anyhow",
"botlib",

@ -1 +1 @@
Subproject commit 980b1ad0d9bfc78481b1e563fc7d9384750a5089
Subproject commit e088a8e69eb8fe064bf1510a720d42abe159ab00

@ -1 +1 @@
Subproject commit 90c14bcd09ac09c797f397b357ee55f756b18758
Subproject commit 73002b36cc3def17546085574cbafe0f42c7b04f

View file

@ -1,29 +0,0 @@
Migrate 10 LXC containers from source (root@82.29.59.188 - LXD/ZFS) to destination (administrator@63.141.255.9 - Incus/Btrfs) using streaming method.
Instructions
- Use Btrfs storage on destination (loop1 mounted at /mnt/btrfs)
- Use streaming method: tar | zstd | ssh (no local storage)
- Migrate one by one: stop → copy → create → start → delete
- DO NOT delete containers before copying data
Discoveries
- Source: LXD with ZFS backend (default/containers/*), containers at /var/snap/lxd/common/lxd/storage-pools/default/containers/
- Destination: Incus 6.23, Btrfs pool PROD-GBO on loop1, project PROD-GBO1
- SSH: Works from source root → destination administrator
- MISTAKE MADE: Deleted all Incus containers with cleanup command before data was properly inside them
Accomplished
- ✅ Created Btrfs storage pool PROD-GBO on destination
- ✅ SSH access configured from source to destination
- ❌ Containers were deleted during cleanup - they need to be recreated
- ❌ Data is outside containers, needs to be copied INTO containers properly
Next steps
1. Create empty Incus containers: incus create --empty --project PROD-GBO1 -s PROD-GBO
2. Migrate from source: tar -C rootfs -cf - . | zstd | ssh dest 'sudo tar -I zstd -xf - -C /mnt/btrfs/containers/PROD-GBO1_<name>/rootfs'
3. Start containers: incus start <name>
4. Delete from source: lxc delete --force
Accomplished
- Recreated Btrfs storage pool PROD-GBO on /mnt/btrfs
- Created 10 empty Incus containers
- Migrated all containers using tar | zstd | ssh
- All 10 containers running on Btrfs
- Used ~24GB of Btrfs storage

View file

@ -1,461 +0,0 @@
# Fast Migration Plan - LXC/LXD to Incus (ONE BY ONE)
## Strategy: STREAMING DATA TRANSFER (NO DISK I/O ON SOURCE)
Transfer container data directly via compressed stream, NO intermediate tarballs on source.
Use `pv` (pipe viewer) for colorful progress bars and rate monitoring.
---
## Why This is FASTEST
**NO disk writes on source** (99% full) - streaming only
**zstd compression** - 3-5x faster than gzip
**pv monitoring** - colorful progress, ETA, rate
**Parallel transfers** - migrate multiple containers simultaneously
**Immediate deletion** - frees space for next transfer
---
## Prerequisites
### On Source (82.29.59.188)
```bash
# Install required tools
sudo apt update && sudo apt install -y zstd pv
# List all containers
lxc list
```
### On Destination (63.141.255.9)
```bash
# Incus is already installed and clean (LXC/LXD removed)
# Create PROD-GBO network bridge
sudo incus network create PROD-GBO --type bridge ipv4.address=10.107.115.1/24
# Create PROD-GBO1 project
sudo incus project create PROD-GBO1
sudo incus project switch PROD-GBO1
# Configure project
sudo incus project set PROD-GBO1 features.storage.volumes=true
sudo incus project set PROD-GBO1 features.profiles=true
```
---
## Migration Workflow (Per Container)
### Step 1: Stop Container on Source
```bash
# On source server
lxc stop pragmatismo-alm
```
### Step 2: Stream Data to Destination (FASTEST - NO DISK I/O)
```bash
# On source server - stream compressed data directly
lxc exec pragmatismo-alm -- tar -cf - /opt/gbo/ | \
zstd -3 -q | \
pv -s $(du -sb /opt/gbo | awk '{print $1}') | \
ssh administrator@63.141.255.9 "zstd -d | tar -xf - -C ~/gbo/pragmatismo-alm/"
```
**What happens:**
1. `tar` reads /opt/gbo from container (streamed, no disk write)
2. `zstd -3` compresses on-the-fly (fast compression)
3. `pv` shows colorful progress with ETA and rate
4. `ssh` streams to destination
5. Destination decompresses and extracts directly to ~/gbo/pragmatismo-alm/
6. **NO tarball on source disk** - solves 99% full problem!
**pv Options:**
- `-s` : Total size (from `du -sb` for accurate progress)
- Colorful output with ETA, rate, elapsed time
### Step 3: Delete Container from Source
```bash
# Free space immediately for next transfer
lxc delete pragmatismo-alm
```
### Step 4: Create Fresh Incus Container
```bash
# On destination server
sudo incus launch images:debian/12/cloud pragmatismo-alm
# Add network
sudo incus config device add pragmatismo-alm eth0 nic name=eth0 network=PROD-GBO
# Set static IP
sudo incus config set pragmatismo-alm ipv4.address=10.107.115.4
# Create gbuser inside container
sudo incus exec pragmatismo-alm -- useradd -m -s /bin/bash gbuser
```
### Step 5: Push Data to Container
```bash
# Create directory structure
sudo incus exec pragmatismo-alm -- mkdir -p /opt/gbo
# Push data (recursive)
sudo incus file push -r ~/gbo/pragmatismo-alm/* pragmatismo-alm/opt/gbo/
# Fix ownership
sudo incus exec pragmatismo-alm -- chown -R gbuser:gbuser /opt/gbo
# Make binaries executable
sudo incus exec pragmatismo-alm -- chmod +x /opt/gbo/bin/*
```
### Step 6: Start Container and Verify Service
```bash
sudo incus start pragmatismo-alm
# Check service status
sudo incus exec pragmatismo-alm -- systemctl status alm
# Check logs
sudo incus exec pragmatismo-alm -- journalctl -u alm -f
```
### Step 7: Configure NAT Rules (ON HOST, NOT IN CONTAINER)
```bash
# On destination host (63.141.255.9)
# For ALM (example):
sudo iptables -t nat -A PREROUTING -p tcp --dport 4747 -j DNAT --to-destination 10.107.115.4:4747
sudo iptables -t nat -A OUTPUT -p tcp --dport 4747 -j DNAT --to-destination 10.107.115.4:4747
sudo iptables -A FORWARD -p tcp -d 10.107.115.4 --dport 4747 -j ACCEPT
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.4 -j MASQUERADE
# Save rules
sudo sh -c 'iptables-save > /etc/iptables/rules.v4'
```
---
## Full Migration Script (ALL CONTAINERS)
### Run Multiple Containers in Parallel (for speed)
```bash
#!/bin/bash
# MIGRATE-ALL.sh - Fast parallel migration
set -e
# Container list with IPs
declare -A CONTAINERS=(
["dns"]="10.107.115.155"
["email"]="10.107.115.200"
["webmail"]="10.107.115.208"
["alm"]="10.107.115.4"
["drive"]="10.107.115.114"
["tables"]="10.107.115.33"
["system"]="10.107.115.229"
["alm-ci"]="10.107.115.190"
["table-editor"]="10.107.115.73"
)
for container in "${!CONTAINERS[@]}"; do
ip="${CONTAINERS[$container]}"
echo -e "\e[1;32m=== Migrating $container ($ip) ===\e[0m"
# Step 1: Stop
echo -e "\e[1;33mStopping $container...\e[0m"
ssh root@82.29.59.188 "lxc stop $container" || true
# Step 2: Get size for pv
echo -e "\e[1;33mGetting size of /opt/gbo...\e[0m"
size=$(ssh root@82.29.59.188 "lxc exec $container -- du -sb /opt/gbo | awk '{print \$1}'")
# Step 3: Stream data (FAST!)
echo -e "\e[1;33mStreaming data to destination...\e[0m"
ssh root@82.29.59.188 "lxc exec $container -- tar -cf - /opt/gbo/" | \
zstd -3 -q | \
pv -s $size | \
ssh administrator@63.141.255.9 "mkdir -p ~/gbo/$container && zstd -d | tar -xf - -C ~/gbo/$container/"
# Step 4: Delete from source
echo -e "\e[1;33mDeleting $container from source...\e[0m"
ssh root@82.29.59.188 "lxc delete $container"
# Step 5: Create fresh container
echo -e "\e[1;33mCreating Incus container...\e[0m"
ssh administrator@63.141.255.9 "sudo incus launch images:debian/12/cloud $container && \
sudo incus config device add $container eth0 nic name=eth0 network=PROD-GBO && \
sudo incus config set $container ipv4.address=$ip && \
sudo incus exec $container -- useradd -m -s /bin/bash gbuser"
# Step 6: Push data
echo -e "\e[1;33mPushing data to container...\e[0m"
ssh administrator@63.141.255.9 "sudo incus exec $container -- mkdir -p /opt/gbo && \
sudo incus file push -r ~/gbo/$container/* $container:/opt/gbo/ && \
sudo incus exec $container -- chown -R gbuser:gbuser /opt/gbo && \
sudo incus exec $container -- chmod +x /opt/gbo/bin/*"
# Step 7: Start
echo -e "\e[1;33mStarting $container...\e[0m"
ssh administrator@63.141.255.9 "sudo incus start $container"
echo -e "\e[1;32m✓ $container migrated successfully!\e[0m"
echo ""
done
echo -e "\e[1;32m=== ALL MIGRATIONS COMPLETE ===\e[0m"
```
---
## Single Container Migration (Quick Test)
```bash
#!/bin/bash
# migrate-one.sh - Migrate single container
CONTAINER=$1
if [ -z "$CONTAINER" ]; then
echo "Usage: $0 <container-name>"
exit 1
fi
echo -e "\e[1;32m=== Migrating $CONTAINER ===\e[0m"
# Stop on source
echo -e "\e[1;33m[1/7] Stopping container...\e[0m"
ssh root@82.29.59.188 "lxc stop $CONTAINER"
# Get size
echo -e "\e[1;33m[2/7] Getting data size...\e[0m"
SIZE=$(ssh root@82.29.59.188 "lxc exec $CONTAINER -- du -sb /opt/gbo | awk '{print \$1}'")
# Stream data (NO DISK WRITE!)
echo -e "\e[1;33m[3/7] Streaming data (pv will show progress)...\e[0m"
ssh root@82.29.59.188 "lxc exec $CONTAINER -- tar -cf - /opt/gbo/" | \
zstd -3 -q | \
pv -s $SIZE | \
ssh administrator@63.141.255.9 "mkdir -p ~/gbo/$CONTAINER && zstd -d | tar -xf - -C ~/gbo/$CONTAINER/"
# Delete from source
echo -e "\e[1;33m[4/7] Deleting from source...\e[0m"
ssh root@82.29.59.188 "lxc delete $CONTAINER"
# Create container
echo -e "\e[1;33m[5/7] Creating Incus container...\e[0m"
ssh administrator@63.141.255.9 "sudo incus launch images:debian/12/cloud $CONTAINER && \
sudo incus config device add $CONTAINER eth0 nic name=eth0 network=PROD-GBO && \
sudo incus exec $CONTAINER -- useradd -m -s /bin/bash gbuser"
# Push data
echo -e "\e[1;33m[6/7] Pushing data...\e[0m"
ssh administrator@63.141.255.9 "sudo incus exec $CONTAINER -- mkdir -p /opt/gbo && \
sudo incus file push -r ~/gbo/$CONTAINER/* $CONTAINER:/opt/gbo/ && \
sudo incus exec $CONTAINER -- chown -R gbuser:gbuser /opt/gbo && \
sudo incus exec $CONTAINER -- chmod +x /opt/gbo/bin/*"
# Start
echo -e "\e[1;33m[7/7] Starting container...\e[0m"
ssh administrator@63.141.255.9 "sudo incus start $CONTAINER"
echo -e "\e[1;32m✓ Migration complete! Check with: incus list\e[0m"
```
---
## Port Forwarding (iptables NAT) - Complete Rules
```bash
# Enable IP forwarding (persistent)
echo "net.ipv4.ip_forward = 1" | sudo tee /etc/sysctl.d/99-ipforward.conf
sudo sysctl -w net.ipv4.ip_forward=1
# Enable route_localnet
echo "net.ipv4.conf.all.route_localnet = 1" | sudo tee /etc/sysctl.d/99-localnet.conf
sudo sysctl -w net.ipv4.conf.all.route_localnet=1
# DNS (53)
sudo iptables -t nat -A PREROUTING -p udp --dport 53 -j DNAT --to-destination 10.107.115.155:53
sudo iptables -t nat -A PREROUTING -p tcp --dport 53 -j DNAT --to-destination 10.107.115.155:53
sudo iptables -t nat -A OUTPUT -p udp --dport 53 -j DNAT --to-destination 10.107.115.155:53
sudo iptables -t nat -A OUTPUT -p tcp --dport 53 -j DNAT --to-destination 10.107.115.155:53
# Email (25,465,587,993,995,143,110,4190)
sudo iptables -t nat -A PREROUTING -p tcp --dport 25 -j DNAT --to-destination 10.107.115.200:25
sudo iptables -t nat -A PREROUTING -p tcp --dport 465 -j DNAT --to-destination 10.107.115.200:465
sudo iptables -t nat -A PREROUTING -p tcp --dport 587 -j DNAT --to-destination 10.107.115.200:587
sudo iptables -t nat -A PREROUTING -p tcp --dport 993 -j DNAT --to-destination 10.107.115.200:993
sudo iptables -t nat -A PREROUTING -p tcp --dport 995 -j DNAT --to-destination 10.107.115.200:995
sudo iptables -t nat -A PREROUTING -p tcp --dport 143 -j DNAT --to-destination 10.107.115.200:143
sudo iptables -t nat -A PREROUTING -p tcp --dport 110 -j DNAT --to-destination 10.107.115.200:110
sudo iptables -t nat -A PREROUTING -p tcp --dport 4190 -j DNAT --to-destination 10.107.115.200:4190
sudo iptables -t nat -A OUTPUT -p tcp --dport 25 -j DNAT --to-destination 10.107.115.200:25
sudo iptables -t nat -A OUTPUT -p tcp --dport 465 -j DNAT --to-destination 10.107.115.200:465
sudo iptables -t nat -A OUTPUT -p tcp --dport 587 -j DNAT --to-destination 10.107.115.200:587
sudo iptables -t nat -A OUTPUT -p tcp --dport 993 -j DNAT --to-destination 10.107.115.200:993
sudo iptables -t nat -A OUTPUT -p tcp --dport 995 -j DNAT --to-destination 10.107.115.200:995
sudo iptables -t nat -A OUTPUT -p tcp --dport 143 -j DNAT --to-destination 10.107.115.200:143
sudo iptables -t nat -A OUTPUT -p tcp --dport 110 -j DNAT --to-destination 10.107.115.200:110
sudo iptables -t nat -A OUTPUT -p tcp --dport 4190 -j DNAT --to-destination 10.107.115.200:4190
# Webmail (5252)
sudo iptables -t nat -A PREROUTING -p tcp --dport 5252 -j DNAT --to-destination 10.107.115.208:5252
sudo iptables -t nat -A OUTPUT -p tcp --dport 5252 -j DNAT --to-destination 10.107.115.208:5252
# ALM (4747)
sudo iptables -t nat -A PREROUTING -p tcp --dport 4747 -j DNAT --to-destination 10.107.115.4:4747
sudo iptables -t nat -A OUTPUT -p tcp --dport 4747 -j DNAT --to-destination 10.107.115.4:4747
# Tables/PostgreSQL (5432)
sudo iptables -t nat -A PREROUTING -p tcp --dport 5432 -j DNAT --to-destination 10.107.115.33:5432
sudo iptables -t nat -A OUTPUT -p tcp --dport 5432 -j DNAT --to-destination 10.107.115.33:5432
# Proxy/Caddy (80, 443)
sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j DNAT --to-destination 10.107.115.236:80
sudo iptables -t nat -A PREROUTING -p tcp --dport 443 -j DNAT --to-destination 10.107.115.236:443
sudo iptables -t nat -A OUTPUT -p tcp --dport 80 -j DNAT --to-destination 10.107.115.236:80
sudo iptables -t nat -A OUTPUT -p tcp --dport 443 -j DNAT --to-destination 10.107.115.236:443
# FORWARD rules (allow traffic to containers)
sudo iptables -A FORWARD -p tcp -d 10.107.115.155 -j ACCEPT
sudo iptables -A FORWARD -p udp -d 10.107.115.155 -j ACCEPT
sudo iptables -A FORWARD -p tcp -d 10.107.115.200 -j ACCEPT
sudo iptables -A FORWARD -p tcp -s 10.107.115.200 -j ACCEPT
sudo iptables -A FORWARD -p tcp -d 10.107.115.208 -j ACCEPT
sudo iptables -A FORWARD -p tcp -d 10.107.115.4 -j ACCEPT
sudo iptables -A FORWARD -p tcp -d 10.107.115.33 -j ACCEPT
sudo iptables -A FORWARD -p tcp -d 10.107.115.236 -j ACCEPT
sudo iptables -A FORWARD -p tcp -s 10.107.115.236 -j ACCEPT
# POSTROUTING MASQUERADE (return traffic)
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.155 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p udp -d 10.107.115.155 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.200 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.208 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.4 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.33 -j MASQUERADE
sudo iptables -t nat -A POSTROUTING -p tcp -d 10.107.115.236 -j MASQUERADE
# Save rules
sudo sh -c 'iptables-save > /etc/iptables/rules.v4'
```
---
## Benefits of This Approach
**FASTEST** - No intermediate tarballs, pure streaming
**COLORFUL** - pv shows beautiful progress bars
**EFFICIENT** - zstd compression (3-5x gzip)
**SAFE** - One container at a time, verify before next
**CLEAN** - Immediate deletion frees disk space
**MONITORED** - Real-time transfer rate and ETA
**PARALLEL** - Can migrate multiple containers at once
**Estimated time:**
- Small containers (<1GB): ~5-10 minutes each
- Medium containers (1-5GB): ~10-20 minutes each
- Large containers (>5GB): ~20-40 minutes each
- Total (parallel): ~1-2 hours for all containers
vs lxc export method: ~4-8 hours total
---
## Quick Start
```bash
# 1. Install pv on source
ssh root@82.29.59.188 "apt install -y pv zstd"
# 2. Save migration scripts
# Copy migrate-one.sh and MIGRATE-ALL.sh
# 3. Run single test
./migrate-one.sh pragmatismo-alm
# 4. If successful, run all
./MIGRATE-ALL.sh
```
---
## Troubleshooting
### pv not showing progress?
```bash
# Check pv is installed
pv --version
# Check size detection
ssh root@82.29.59.188 "lxc exec pragmatismo-alm -- du -sb /opt/gbo"
```
### Container not starting?
```bash
# Check logs
sudo incus list
sudo incus info pragmatismo-alm
sudo incus logs pragmatismo-alm --show-log
# Check network
sudo incus exec pragmatismo-alm -- ip addr
```
### Service not running?
```bash
# Inside container
sudo incus exec pragmatismo-alm -- bash
systemctl status alm
journalctl -u alm -n 50
```
### NAT not working?
```bash
# Check rules
sudo iptables -t nat -L -n -v
# Check forwarding
sudo iptables -L FORWARD -n -v
# Test from host
nc -zv 127.0.0.1 4747
```
---
## Summary
This plan achieves:
**FASTEST possible migration** - streaming, no disk I/O
**Colorful progress** - pv with ETA, rate, elapsed
**Immediate space cleanup** - delete as we go
**Parallel migration** - do multiple containers at once
**Clean Incus** - fresh start, no LXC/LXD trash
**Proper NAT** - iptables only, no socat/proxy devices
**No mbuffer needed** - pv does the job with colorful output!
---
## Verification Checklist
After each migration:
- [ ] Container running: `sudo incus list`
- [ ] Service active: `sudo incus exec $c -- systemctl status <service>`
- [ ] Data intact: `sudo incus exec $c -- ls -la /opt/gbo`
- [ ] Port accessible: `nc -zv 127.0.0.1 <port>`
- [ ] Source deleted: `ssh root@82.29.59.188 lxc list` (should not show container)
- [ ] NAT rules added: `sudo iptables -t nat -L -n | grep <ip>`
After all migrations:
- [ ] All containers running on Incus
- [ ] All services active
- [ ] All NAT rules configured
- [ ] External access works
- [ ] Source server: no containers left

View file

@ -4,18 +4,12 @@
- Bot seplagse deve usar start.bas para inserir dados via init_folha.bas
- detecta.bas deve detectar anomalias nos dados inseridos
## Problema Atual
Erro de compilação em init_folha.bas:
```
Syntax error: Expecting name of a property (line 8, position 261)
```
## ✅ Status Atual
Linha 8: `IF GET_BOT_MEMORY("folha_inicializada") == "true" THEN`
### Correção REM em mod.rs (FEITA)
**Arquivo:** `botserver/src/basic/mod.rs` linha ~588-594
## Correção Parcial Aplicada
Arquivo: `botserver/src/basic/mod.rs` linha ~588-594
Adicionado filtro para `REM ` e `REM\t` no `compile_tool_script`:
Filtro adicionado para `REM ` e `REM\t` no `compile_tool_script`:
```rust
!(trimmed.starts_with("PARAM ") ||
trimmed.starts_with("PARAM\t") ||
@ -28,18 +22,38 @@ Adicionado filtro para `REM ` e `REM\t` no `compile_tool_script`:
trimmed.is_empty())
```
## Arquivos Envolvidos
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/start.bas` - Original restaurado
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/init_folha.bas` - Inserir dados de exemplo
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/detecta.bas` - Detectar anomalias
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/tables.bas` - Schema da tabela
### Arquivos Envolvidos (VERIFICADOS)
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/start.bas` ✅ OK
- Contém botões de sugestão: detecta e init_folha
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/init_folha.bas` ✅ OK
- 4 INSERT statements para dados de exemplo
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/detecta.bas` ✅ OK
- Usa DETECT keyword
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/tables.bas` ✅ OK
- TABLE folha_salarios definida
## Próximos Passos
1. Verificar se a correção em mod.rs resolve o erro de REM
2. Se ainda falhar, verificar o pré-processamento do IF/THEN ou SAVE
3. Testar start.bas com CALL "init_folha"
4. Testar detecta.bas com DETECT "folha_salarios"
### Botserver (RODANDO)
- ✅ Botserver compilado com sucesso
- ✅ Botserver rodando em http://localhost:8080
- ✅ Health check OK
## Próximos Passos (Pendentes)
1. **Testar via navegador** - Necessário instalar Playwright browsers
- Navegar para http://localhost:3000/seplagse
- Clicar em "⚙️ Inicializar Dados de Teste"
- Verificar se INSERT funciona
- Clicar em "🔍 Detectar Desvios na Folha"
- Verificar se DETECT funciona
2. **Verificar se há warnings relevantes**
- Alguns warnings de código podem precisar ser corrigidos
## Cache
- Limpar AST: `rm ./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/*.ast`
- Reiniciar: `./restart.sh`
- AST limpo: `rm ./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/*.ast`
- Reiniciado: `./restart.sh`
- Botserver: ✅ Rodando
## Arquivos de Trabalho
- Work directory: `./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/`
- Todos os arquivos BASIC estão presentes e parecem válidos

46
prompts/nodrive.md Normal file
View file

@ -0,0 +1,46 @@
# Progress: Removendo aws-sdk-s3 do default bundle
## Goal
Remover `aws-sdk-s3` (~120MB) do bundle default `["chat", "automation", "cache", "llm"]` e fazer compilar com:
```bash
cargo check -p botserver --no-default-features --features "chat,automation,cache,llm"
```
## ✅ COMPLETED
1. **Cargo.toml** - Features separadas: `drive` (S3) vs `local-files` (notify)
2. **main.rs** - `pub mod drive` com `#[cfg(any(feature = "drive", feature = "local-files"))]`
3. **state.rs** - `NoDrive` struct adicionada
4. **multimedia.rs** - `DefaultMultimediaHandler` com cfg gates (drive vs no-drive)
5. **drive/mod.rs** - Módulos condicionais:
- `#[cfg(feature = "drive")] pub mod document_processing;`
- `#[cfg(feature = "drive")] pub mod drive_monitor;`
- `#[cfg(feature = "drive")] pub mod vectordb;`
- `#[cfg(feature = "local-files")] pub mod local_file_monitor;`
- Todas ~21 funções com `#[cfg(feature = "drive")]`
6. **multimedia.rs - upload_media** - Duas implementações separadas com cfg gates:
- `#[cfg(feature = "drive")]` - Usa S3 client
- `#[cfg(not(feature = "drive"))]` - Usa armazenamento local
## ✅ VERIFIED
```bash
cargo check -p botserver --no-default-features --features "chat,automation,cache,llm"
```
**Resultado:** ✅ Build limpo (apenas warnings, 0 erros)
**Tempo de compilação:** 2m 29s
## Arquivo Não Fixado (opcional)
### auto_task/app_generator.rs
- `ensure_bucket_exists` method never used (warning, não impede compilação)
- Método já está com `#[cfg(feature = "drive")]` (correto)
## Resumo
O `aws-sdk-s3` foi removido com sucesso do bundle default. O sistema agora suporta dois modos:
- **Com feature "drive"**: Usa S3 (aws-sdk-s3 ~120MB)
- **Sem feature "drive"**: Usa armazenamento local (notify ~2MB)
O build padrão agora é leve (~120MB a menos) e funciona sem dependências de AWS.

49
restart-fast.sh Normal file
View file

@ -0,0 +1,49 @@
#!/bin/bash
echo "=== Fast Restart: botserver + botmodels only ==="
# Kill only the app services, keep infra running
pkill -f "botserver --noconsole" || true
pkill -f "botmodels" || true
# Clean logs
rm -f botserver.log botmodels.log
# Build only botserver (botui likely already built)
cargo build -p botserver
# Start botmodels
cd botmodels
source venv/bin/activate
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
echo " botmodels PID: $!"
cd ..
# Wait for botmodels
for i in $(seq 1 20); do
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
echo " botmodels ready"
break
fi
sleep 1
done
# Start botserver (keep botui running if already up)
if ! pgrep -f "botui" > /dev/null; then
echo "Starting botui..."
cargo build -p botui
cd botui
BOTSERVER_URL="http://localhost:8080" ./target/debug/botui > ../botui.log 2>&1 &
echo " botui PID: $!"
cd ..
fi
# Start botserver
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
echo " botserver PID: $!"
# Quick health check
sleep 2
curl -s http://localhost:8080/health > /dev/null 2>&1 && echo "✅ botserver ready" || echo "❌ botserver failed"
echo "Done. botserver $(pgrep -f 'botserver --noconsole') botui $(pgrep -f botui) botmodels $(pgrep -f botmodels)"

View file

@ -1,41 +1,55 @@
#!/bin/bash
set -e
echo "Stopping..."
pkill -f botserver || true
pkill -f botui || true
pkill -f botmodels || true
pkill -f rustc || true
echo "=== Fast Restart: botserver only (keeps infra running) ==="
echo "Cleaning..."
rm -f botserver.log botui.log botmodels.log
# Only kill the app services, keep infra (postgres, valkey, minio, vault, zitadel) running
pkill -f "botserver --noconsole" || true
pkill -f "botmodels" || true
echo "Building..."
# Clean app logs only
rm -f botserver.log botmodels.log
# Build botserver (incremental, should be fast)
cargo build -p botserver
cargo build -p botui
echo "Starting botmodels..."
cd botmodels
source venv/bin/activate
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
echo " PID: $!"
cd ..
# Start botmodels if not running
if ! pgrep -f "botmodels" > /dev/null; then
echo "Starting botmodels..."
cd botmodels
source venv/bin/activate
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
echo " botmodels PID: $!"
cd ..
# Wait for botmodels
for i in $(seq 1 15); do
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
echo " botmodels ready"
break
fi
sleep 1
done
else
echo " botmodels already running"
fi
echo "Waiting for botmodels..."
for i in $(seq 1 30); do
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
echo " botmodels ready"
break
# Start botserver
echo "Starting botserver..."
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info \
./target/debug/botserver --noconsole > botserver.log 2>&1 &
echo " botserver PID: $!"
# Wait for botserver health with timeout
echo "Waiting for botserver..."
for i in $(seq 1 10); do
if curl -sf http://localhost:8080/health > /dev/null 2>&1; then
echo "✅ botserver ready"
exit 0
fi
sleep 1
done
echo "Starting botserver..."
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=debug ./target/debug/botserver --noconsole > botserver.log 2>&1 &
echo " PID: $!"
echo "Starting botui..."
BOTSERVER_URL="http://localhost:8080" ./target/debug/botui > botui.log 2>&1 &
echo " PID: $!"
echo "Done. Logs: tail -f botserver.log botui.log botmodels.log"
echo "❌ botserver failed to start - check botserver.log"
tail -20 botserver.log
exit 1