Compare commits

..

No commits in common. "main" and "pragmatismo" have entirely different histories.

47 changed files with 1334 additions and 14907 deletions

View file

@ -1,5 +1,5 @@
[build]
# rustc-wrapper = "sccache"
rustc-wrapper = "sccache"
[target.x86_64-unknown-linux-gnu]
linker = "clang"

View file

@ -1,8 +0,0 @@
# General Bots Environment Configuration
# Copy this file to .env and fill in values
# NEVER commit .env to version control
# Vault connection
VAULT_ADDR=https://127.0.0.1:8200
VAULT_TOKEN=<your-vault-token-here>
VAULT_CACERT=./botserver-stack/vault/certs/ca.crt

44
.gitignore vendored
View file

@ -2,17 +2,11 @@
target/
*.out
bin/
*.png
*.jpg
# Logs
*.log
*logfile*
*-log*
.vscode
.zed
.gemini
.claude
# Temporary files
.tmp*
@ -37,15 +31,12 @@ botserver-installers/*
botserver-stack
TODO*
work
.swp
# Lock file
# Cargo.lock (should be tracked)
# Lock file (regenerated from Cargo.toml)
Cargo.lock
.kiro
config
# Data directory (contains bot configs and API keys)
data/
# Playwright
node_modules/
/test-results/
@ -53,30 +44,3 @@ node_modules/
/blob-report/
/playwright/.cache/
/playwright/.auth/
.playwright*
.ruff_cache
.opencode
config/directory_config.json
# CI cache bust: Fri Feb 13 22:33:51 UTC 2026
# Secrets - NEVER commit these files
vault-unseal-keys
start-and-unseal.sh
vault-token-*
init.json
*.pem
*.key
*.crt
*.cert
$null
AppData/
build_errors*.txt
build_errors_utf8.txt
check.json
clippy*.txt
errors.txt
errors_utf8.txt
vault-unseal-keysdefault-vault.tar
prompts/sec-bots.md
AGENTS-PROD.md

23
.gitmodules vendored
View file

@ -1,43 +1,42 @@
[submodule "botapp"]
path = botapp
url = ../botapp.git
url = https://alm.pragmatismo.com.br/GeneralBots/botapp.git
[submodule "botserver"]
path = botserver
url = ../BotServer.git
url = https://alm.pragmatismo.com.br/GeneralBots/botserver.git
[submodule "botlib"]
path = botlib
url = ../botlib.git
url = https://alm.pragmatismo.com.br/GeneralBots/botlib.git
[submodule "botui"]
path = botui
url = ../botui.git
url = https://alm.pragmatismo.com.br/GeneralBots/botui.git
[submodule "botbook"]
path = botbook
url = ../botbook.git
url = https://alm.pragmatismo.com.br/GeneralBots/botbook.git
[submodule "bottest"]
path = bottest
url = ../bottest.git
url = https://alm.pragmatismo.com.br/GeneralBots/bottest.git
[submodule "botdevice"]
path = botdevice
url = ../botdevice.git
url = https://alm.pragmatismo.com.br/GeneralBots/botdevice.git
[submodule "botmodels"]
path = botmodels
url = ../botmodels.git
url = https://alm.pragmatismo.com.br/GeneralBots/botmodels.git
[submodule "botplugin"]
path = botplugin
url = ../botplugin.git
url = https://alm.pragmatismo.com.br/GeneralBots/botplugin.git
[submodule "bottemplates"]
path = bottemplates
url = ../bottemplates.git
url = https://alm.pragmatismo.com.br/GeneralBots/bottemplates.git
[submodule ".github"]
path = .github
url = ../.github.git
url = https://alm.pragmatismo.com.br/GeneralBots/.github.git

9
.idea/gb.iml generated Normal file
View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

10
.idea/libraries/botserver_installers.xml generated Normal file
View file

@ -0,0 +1,10 @@
<component name="libraryTable">
<library name="botserver-installers">
<CLASSES>
<root url="jar://$PROJECT_DIR$/botserver/botserver-installers/llama-b7345-bin-ubuntu-x64.zip!/" />
<root url="jar://$PROJECT_DIR$/botserver/botserver-installers/vault_1.15.4_linux_amd64.zip!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</component>

6
.idea/misc.xml generated Normal file
View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>

8
.idea/modules.xml generated Normal file
View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/gb.iml" filepath="$PROJECT_DIR$/.idea/gb.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml generated Normal file
View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

52
.idea/workspace.xml generated Normal file
View file

@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="32fd08b0-7933-467d-9a46-1a53fd2da15c" name="Changes" comment="">
<change beforePath="$PROJECT_DIR$/botserver" beforeDir="false" afterPath="$PROJECT_DIR$/botserver" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="ProjectColorInfo"><![CDATA[{
"associatedIndex": 1
}]]></component>
<component name="ProjectId" id="38qdWTFkX8Nem4LzgigXpAycSN7" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent"><![CDATA[{
"keyToString": {
"ModuleVcsDetector.initialDetectionPerformed": "true",
"RunOnceActivity.ShowReadmeOnStart": "true",
"RunOnceActivity.git.unshallow": "true",
"RunOnceActivity.typescript.service.memoryLimit.init": "true",
"git-widget-placeholder": "main",
"last_opened_file_path": "/home/rodriguez/src/gb",
"vue.rearranger.settings.migration": "true"
}
}]]></component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-jdk-30f59d01ecdd-2fc7cc6b9a17-intellij.indexing.shared.core-IU-253.30387.90" />
</set>
</attachedChunks>
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="32fd08b0-7933-467d-9a46-1a53fd2da15c" name="Changes" comment="" />
<created>1769531070022</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1769531070022</updated>
<workItem from="1769531115917" duration="176000" />
</task>
<servers />
</component>
</project>

198
.kiro/settings/lsp.json Normal file
View file

@ -0,0 +1,198 @@
{
"languages": {
"typescript": {
"name": "typescript-language-server",
"command": "typescript-language-server",
"args": [
"--stdio"
],
"file_extensions": [
"ts",
"js",
"tsx",
"jsx"
],
"project_patterns": [
"package.json",
"tsconfig.json"
],
"exclude_patterns": [
"**/node_modules/**",
"**/dist/**"
],
"multi_workspace": false,
"initialization_options": {
"preferences": {
"disableSuggestions": false
}
},
"request_timeout_secs": 60
},
"python": {
"name": "pyright",
"command": "pyright-langserver",
"args": [
"--stdio"
],
"file_extensions": [
"py"
],
"project_patterns": [
"pyproject.toml",
"setup.py",
"requirements.txt",
"pyrightconfig.json"
],
"exclude_patterns": [
"**/__pycache__/**",
"**/venv/**",
"**/.venv/**",
"**/.pytest_cache/**"
],
"multi_workspace": false,
"initialization_options": {},
"request_timeout_secs": 60
},
"rust": {
"name": "rust-analyzer",
"command": "rust-analyzer",
"args": [],
"file_extensions": [
"rs"
],
"project_patterns": [
"Cargo.toml"
],
"exclude_patterns": [
"**/target/**"
],
"multi_workspace": false,
"initialization_options": {
"cargo": {
"buildScripts": {
"enable": true
}
},
"diagnostics": {
"enable": true,
"enableExperimental": true
},
"workspace": {
"symbol": {
"search": {
"scope": "workspace"
}
}
}
},
"request_timeout_secs": 60
},
"java": {
"name": "jdtls",
"command": "jdtls",
"args": [],
"file_extensions": [
"java"
],
"project_patterns": [
"pom.xml",
"build.gradle",
"build.gradle.kts",
".project"
],
"exclude_patterns": [
"**/target/**",
"**/build/**",
"**/.gradle/**"
],
"multi_workspace": false,
"initialization_options": {
"settings": {
"java": {
"compile": {
"nullAnalysis": {
"mode": "automatic"
}
},
"configuration": {
"annotationProcessing": {
"enabled": true
}
}
}
}
},
"request_timeout_secs": 60
},
"ruby": {
"name": "solargraph",
"command": "solargraph",
"args": [
"stdio"
],
"file_extensions": [
"rb"
],
"project_patterns": [
"Gemfile",
"Rakefile"
],
"exclude_patterns": [
"**/vendor/**",
"**/tmp/**"
],
"multi_workspace": false,
"initialization_options": {},
"request_timeout_secs": 60
},
"go": {
"name": "gopls",
"command": "gopls",
"args": [],
"file_extensions": [
"go"
],
"project_patterns": [
"go.mod",
"go.sum"
],
"exclude_patterns": [
"**/vendor/**"
],
"multi_workspace": false,
"initialization_options": {
"usePlaceholders": true,
"completeUnimported": true
},
"request_timeout_secs": 60
},
"cpp": {
"name": "clangd",
"command": "clangd",
"args": [
"--background-index"
],
"file_extensions": [
"cpp",
"cc",
"cxx",
"c",
"h",
"hpp",
"hxx"
],
"project_patterns": [
"CMakeLists.txt",
"compile_commands.json",
"Makefile"
],
"exclude_patterns": [
"**/build/**",
"**/cmake-build-**/**"
],
"multi_workspace": false,
"initialization_options": {},
"request_timeout_secs": 60
}
}
}

24
.vscode/launch.json vendored Normal file
View file

@ -0,0 +1,24 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Debug executable 'botserver'",
"cargo": {
"args": ["run", "--bin=botserver", "--package=botserver", "--manifest-path=${workspaceFolder}/botserver/Cargo.toml"],
"filter": {
"name": "botserver",
"kind": "bin"
}
},
"args": [],
"env": {
"RUST_LOG": "trace,aws_sigv4=off,aws_smithy_checksums=off,mio=off,reqwest=off,aws_runtime=off,aws_smithy_http_client=off,rustls=off,hyper_util=off,aws_smithy_runtime=off,aws_smithy_runtime_api=off,tracing=off,aws_sdk_s3=off"
},
"cwd": "${workspaceFolder}/botserver"
},
]
}

5
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,5 @@
{
"git.ignoreLimitWarning": true,
"Codegeex.SidebarUI.LanguagePreference": "English",
"Codegeex.RepoIndex": true
}

7
.zed/settings.json Normal file
View file

@ -0,0 +1,7 @@
{
"languages": {
"Rust": {
"enable_language_server": false,
},
},
}

710
AGENTS.md
View file

@ -1,710 +0,0 @@
# General Bots AI Agent Guidelines
- Use apenas a língua culta ao falar.
- Never save files on root! Use `/tmp` for temp files.
- Never push to ALM without asking first — it is production!
- If in trouble with a tool, go to the official website for install instructions.
- See `botserver/src/drive/local_file_monitor.rs` to load bots from `/opt/gbo/data`.
---
## 📁 Workspace Structure
| Crate | Purpose | Port | Tech Stack |
|-------|---------|------|------------|
| **botserver** | Main API server, business logic | 8080 | Axum, Diesel, Rhai BASIC |
| **botui** | Web UI server (dev) + proxy | 3000 | Axum, HTML/HTMX/CSS |
| **botapp** | Desktop app wrapper | - | Tauri 2 |
| **botlib** | Shared library | - | Core types, errors |
| **botbook** | Documentation | - | mdBook |
| **bottest** | Integration tests | - | tokio-test |
| **botdevice** | IoT/Device support | - | Rust |
| **botplugin** | Browser extension | - | JS |
### Key Paths
- **Binary:** `target/debug/botserver`
- **Run from:** `botserver/` directory
- **Env file:** `botserver/.env`
- **UI Files:** `botui/ui/suite/`
- **Bot data:** `/opt/gbo/data` (primary)
- **Test web:** `http://localhost:3000` — Login: `http://localhost:3000/suite/auth/login.html`
### 📦 Data Directory Structure
```
# DEV LOCAL (quando botserver-stack existe)
├── ./botserver-stack/data/system/work/{bot}.gbai/{bot}.gbdialog/
│ ├── *.bas # Scripts compilados (gerado automático)
│ └── *.ast # Cache compilado (deletar para forçar recompilação)
# PRODUCTION (com container Incus)
├── /opt/gbo/data/ # FONTE dos bots
└── (compilação fica em memória ou /opt/gbo/work/ se existir)
```
**IMPORTANTE:**
- **FONTE**: `/opt/gbo/data/{bot}.gbai/{bot}.gbdialog/{tool}.bas`
- **DEV LOCAL**: `./botserver-stack/data/system/work/{bot}.gbai/{bot}.gbdialog/`
- O botserver compila `.bas``.ast` automaticamente
- Se cache, deletar `.ast` para forçar recompilação
---
## 🧪 Debugging & Testing Tools
### 🔍 Ver Erros de Execução
```bash
tail -f botserver.log | grep -i "error\|tool"
```
### 🧪 Testar Ferramenta Específica
1. **Identificar o erro no log:**
```bash
grep -A5 "Tool error" botserver.log
```
2. **Corrigir o arquivo `.bas` na fonte:**
- **Dev local:** `./botserver-stack/data/system/work/{bot}.gbai/{bot}.gbdialog/{tool}.bas`
- **Production:** `/opt/gbo/data/{bot}.gbai/{bot}.gbdialog/{tool}.bas`
3. **Forçar recompilação (se necessário):**
```bash
rm ./botserver-stack/data/system/work/{bot}.gbai/{bot}.gbdialog/{tool}.ast
```
- Em dev local o AST fica em `./botserver-stack/...`
- Em production pode ficar em `/opt/gbo/work/...` se existir
4. **Testar novamente no browser:**
```
http://localhost:3000/{botname}
```
### ⚠️ Erros Comuns em Scripts BASIC
| Erro | Causa | Solução |
|------|-------|---------|
| `=== is not a valid operator` | BASIC usa `==`, não `===` | Substituir `===` por `--` em strings |
| `Syntax error` | Erro de sintaxe BASIC | Verificar parênteses, vírgulas |
| `Tool execution failed` | Erro no script | Ver logs para stack trace |
### 📝 Exemplo: Corrigir Operador Inválido
```bas
# ERRADO (JavaScript syntax):
PRINT "=== RESULTADO ==="
# CORRETO (BASIC syntax):
PRINT "-- RESULTADO --"
```
---
## 🧭 LLM Navigation Guide
1. Start with **[Component Dependency Graph](../README.md#-component-dependency-graph)**
2. Review **[Module Responsibility Matrix](../README.md#-module-responsibility-matrix)**
3. Study **[Data Flow Patterns](../README.md#-data-flow-patterns)**
4. Reference **[Common Architectural Patterns](../README.md#-common-architectural-patterns)**
5. Check [Security Rules](#-security-directives---mandatory) — violations are blocking
6. Follow [Code Patterns](#-mandatory-code-patterns) — consistency is mandatory
---
## ❌ Absolute Prohibitions
### Build & Deploy
- ❌ **NEVER** search `/target` folder
- ❌ **NEVER** build in release mode or use `--release`
- ❌ **NEVER** run `cargo build` — use `cargo check` for verification
- ❌ **NEVER** run `cargo clean` — causes 30min rebuilds; use `./reset.sh` for DB issues
- ❌ **NEVER** deploy manually — ALWAYS use CI/CD pipeline (push → ALM → alm-ci builds → deploys)
- ❌ **NEVER** use `scp`, direct SSH binary copy, or manual deployment
- ❌ **NEVER** run the binary directly — use `systemctl` or `./restart.sh`
### Code Quality
- ❌ **NEVER** use `panic!()`, `todo!()`, `unimplemented!()`, `unwrap()`, `expect()`
- ❌ **NEVER** use `Command::new()` directly — use `SafeCommand`
- ❌ **NEVER** return raw error strings to HTTP clients — use `ErrorSanitizer`
- ❌ **NEVER** use `#[allow()]` or lint exceptions in `Cargo.toml` — FIX the code
- ❌ **NEVER** use `_` prefix for unused vars — DELETE or USE them
- ❌ **NEVER** leave unused imports, dead code, or commented-out code
- ❌ **NEVER** use CDN links — all assets must be local
- ❌ **NEVER** create `.md` docs without checking `botbook/` first
- ❌ **NEVER** hardcode credentials — use `generate_random_string()` or env vars
### Build Pattern (MANDATORY) - Fix Fast Loop
When building botserver, use this pattern to fix errors ASAP:
```bash
# Run cargo in background, kill at 20 lines, fix errors, loop
# IMPORTANT: Never use --all-features (pulls docs/slides dependencies)
cd /home/rodriguez/src/gb
cargo check -p botserver > /tmp/check.log 2>&1 &
CARGO_PID=$!
while kill -0 $CARGO_PID 2>/dev/null; do
LINES=$(wc -l < /tmp/check.log 2>/dev/null || echo 0)
if [ "$LINES" -gt 20 ]; then
kill $CARGO_PID 2>/dev/null
echo "=== Got $LINES lines, killing cargo ==="
break
fi
sleep 1
done
# Check for errors - use strings to handle binary output
if strings /tmp/check.log | grep -q "^error"; then
echo "❌ Errors found:"
strings /tmp/check.log | grep "^error" | head -20
# Fix errors, then re-run this pattern
else
echo "✅ No errors - build clean!"
fi
```
**Key Rule:** Kill cargo at 20 lines, fix errors immediately, loop until clean.
**Why:** Compiling takes 2-3+ minutes. Getting errors in 20s saves 10+ minutes per error.
### Security
- ❌ **NEVER** include sensitive data (IPs, tokens, keys) in docs or code
- ❌ **NEVER** write internal IPs to logs — mask them (e.g., "10.x.x.x")
- ❌ **NEVER** create files with secrets in repo root
> **Secret files MUST be placed in `/tmp/` only** (ephemeral, not tracked by git).
---
## 🔐 Security Directives — MANDATORY
### 1. Error Handling — No Panics
```rust
// ❌ FORBIDDEN: unwrap(), expect(), panic!(), todo!()
// ✅ REQUIRED:
value?
value.ok_or_else(|| Error::NotFound)?
value.unwrap_or_default()
if let Some(v) = value { ... }
```
### 2. Command Execution — SafeCommand
```rust
// ❌ FORBIDDEN: Command::new("cmd").arg(user_input).output()
// ✅ REQUIRED:
use crate::security::command_guard::SafeCommand;
SafeCommand::new("allowed_command")?.arg("safe_arg")?.execute()
```
### 3. Error Responses — ErrorSanitizer
```rust
// ❌ FORBIDDEN: Json(json!({ "error": e.to_string() }))
// ✅ REQUIRED:
use crate::security::error_sanitizer::log_and_sanitize;
let sanitized = log_and_sanitize(&e, "context", None);
(StatusCode::INTERNAL_SERVER_ERROR, sanitized)
```
### 4. SQL — sql_guard
```rust
// ❌ FORBIDDEN: format!("SELECT * FROM {}", user_table)
// ✅ REQUIRED:
use crate::security::sql_guard::{sanitize_identifier, validate_table_name};
let safe_table = sanitize_identifier(&user_table);
validate_table_name(&safe_table)?;
```
### 5. Rate Limiting
- General: 100 req/s, Auth: 10 req/s, API: 50 req/s per token, WebSocket: 10 msgs/s
- Use `governor` crate with per-IP and per-User tracking
### 6. CSRF Protection
- ALL state-changing endpoints (POST/PUT/DELETE/PATCH) MUST require CSRF token
- Use `tower_csrf`, bound to user session. Exempt: Bearer Token endpoints
### 7. Security Headers (ALL responses)
`Content-Security-Policy`, `Strict-Transport-Security`, `X-Frame-Options: DENY`, `X-Content-Type-Options: nosniff`, `Referrer-Policy: strict-origin-when-cross-origin`, `Permissions-Policy: geolocation=(), microphone=(), camera=()`
### 8. Dependency Management
- App crates track `Cargo.lock`; lib crates don't
- Critical deps: exact versions (`=1.0.1`); regular: caret (`1.0`)
- Run `cargo audit` weekly; update only via PR with testing
---
## ✅ Mandatory Code Patterns
```rust
impl MyStruct { fn new() -> Self { Self { } } } // Use Self, not type name
#[derive(PartialEq, Eq)] // Always derive both
format!("Hello {name}") // Inline format args
match x { A | B => do_thing(), C => other() } // Combine identical arms
```
---
## 📏 File Size Limits
- **Max 450 lines per file** — split proactively at 350 lines
- Split by: `types.rs`, `handlers.rs`, `operations.rs`, `utils.rs`, `mod.rs`
- Re-export all public items in `mod.rs`
---
## 🔥 Error Fixing Workflow
### Preferred: Offline Batch Fix
1. Read ENTIRE error list first
2. Group errors by file
3. For each file: view → fix ALL errors → write once
4. Verify with build/diagnostics only AFTER all fixes
### ⚡ Streaming Build Rule
Don't wait for `cargo` to finish — cancel at first errors, fix, re-run.
### 🧠 Memory Issues (process "Killed")
```bash
pkill -9 cargo; pkill -9 rustc; pkill -9 botserver
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200
```
---
## 🔄 Modos de Execução
O botserver suporta **dois modos** de execução:
### Modo 1: Local Standalone (sem Docker/Incus)
O botserver sobe **tudo localmente** (PostgreSQL, Valkey, MinIO, Vault, LLM).
```bash
cd /home/rodriguez/src/gb/botserver
cargo run -- --install # Instala dependências (PostgreSQL, Valkey, MinIO, etc.)
cargo run # Sobe tudo e inicia o servidor
```
**O que acontece:**
- `PackageManager` baixa e extrai binários para `botserver-stack/bin/`
- Cria `botserver-stack/data/pgdata/` com PostgreSQL
- Inicia PostgreSQL na porta 5432
- Inicia Valkey na porta 6379
- Inicia MinIO na porta 9100
- Configura Vault para secrets
- Baixa modelo LLM (llama.cpp) para detecção de anomalias
- Ao final: `http://localhost:8080`
**Verificar se está rodando:**
```bash
curl http://localhost:8080/health
curl http://localhost:5432 # PostgreSQL
curl http://localhost:6379 # Valkey
```
**Testar com Playwright:**
```bash
# Navegar para bot de teste
npx playwright open http://localhost:3000/salesianos
# Ou diretamente
npx playwright open http://localhost:3000/detecta
```
### Modo 2: Container (Incus) — Produção
Os serviços rodam em containers Incus separados.
```bash
# Subir todos os containers
sudo incus start system tables vault directory drive cache llm vector_db
# Verificar status
sudo incus list
# Acessar container system (onde roda botserver)
sudo incus exec system -- bash
# Ver logs do botserver
sudo incus exec system -- journalctl -u botserver -f
```
**Arquitetura de Containers:**
| Container | Services | Portas |
|-----------|----------|--------|
| system | BotServer, Valkey | 8080, 6379 |
| tables | PostgreSQL | 5432 |
| vault | Vault | 8200 |
| directory | Zitadel | 9000 |
| drive | MinIO | 9100 |
| cache | Valkey (backup) | 6379 |
| llm | llama.cpp | 8081 |
| vector_db | Qdrant | 6333 |
### reset.sh (Ambiente Local)
```bash
./reset.sh # Limpa e reinicia tudo localmente
```
### Service Commands
```bash
ps aux | grep -E "(botserver|botui)" | grep -v grep
curl http://localhost:8080/health
./restart.sh # Restart services
```
---
## 🎭 Playwright Browser Testing
### Browser Setup
If browser fails: `pkill -9 -f brave; pkill -9 -f chrome; pkill -9 -f chromium` → wait 3s → navigate again.
### Bot Testing Flow
1. Navigate to `http://localhost:3000/<botname>`
2. Snapshot → verify welcome message + suggestion buttons + Portuguese accents
3. Click suggestion → wait 3-5s → snapshot → fill data → submit
4. Verify DB records and backend logs
### Desktop UI Note
Chat window may cover other apps — click **middle button** (restore) to minimize, or navigate directly via URL.
### WhatsApp Testing
- Webhook is **global** — bot routing by typing bot name as first message
- Single WhatsApp number serves ALL bots; routing via `whatsapp-id` in `config.csv`
---
## Adding New Features
### Checklist
- [ ] Which module owns this? (Check Module Responsibility Matrix)
- [ ] Database migrations needed?
- [ ] New API endpoints?
- [ ] Security: input validation, auth, rate limiting, error sanitization?
- [ ] Screens in botui?
- [ ] No `unwrap()`/`expect()`?
### Pattern: types → schema → Diesel model → business logic → API endpoint → BASIC keyword (if applicable) → tests → docs in `botbook/`
### Commit & Deploy
```bash
cd botserver && git push alm main && git push origin main
cd .. && git add botserver && git commit -m "Update botserver: <desc>" && git push alm main && git push origin main
```
---
## 🎨 Frontend Standards
- **HTMX-first** — server returns HTML fragments, not JSON
- **Local assets only** — NO CDN links
- Use `hx-get`, `hx-post`, `hx-target`, `hx-swap`; WebSocket via htmx-ws
---
## 🚀 Performance & Quality
- `cargo clippy --workspace` must pass with **0 warnings**
- `cargo tree --duplicates` / `cargo machete` / `cargo audit` weekly
- Release profile: `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, `panic = "abort"`
- Use `default-features = false` and opt-in to needed features
---
## 🧪 Testing
- **Unit:** per-crate `tests/` or `#[cfg(test)]` modules — `cargo test -p <crate>`
- **Integration:** `bottest/` crate — `cargo test -p bottest`
- **Coverage:** 80%+ on critical paths; ALL error paths and security guards tested
---
## 🚢 Deploy Workflow (CI/CD Only)
1. Push to ALM (triggers CI automatically)
2. CI builds on alm-ci → deploys to system container via SSH
3. Service auto-restarts on binary update
4. Verify: check service status + logs after ~10 min
### Container Architecture
| Container | Service | Port |
|-----------|---------|------|
| system | BotServer + Valkey | 8080/6379 |
| tables | PostgreSQL | 5432 |
| vault | Vault | 8200 |
---
## 🔑 Core Directives Summary
- **OFFLINE FIRST** — fix all errors from list before compiling
- **BATCH BY FILE** — fix ALL errors in a file at once, write once
- **VERIFY LAST** — only compile after ALL fixes applied
- **DELETE DEAD CODE** — never keep unused code
- **GIT WORKFLOW** — always push to ALL repositories
- **0 warnings, 0 errors** — loop until clean
---
## 🔧 Bot Scripts Architecture
### File Types
| File | Purpose |
|------|---------|
| `start.bas` | Entry point, executed on session start |
| `{tool}.bas` | Tool implementation (e.g., `detecta.bas`) |
| `tables.bas` | **SPECIAL** - Defines database tables, auto-creates on compile |
| `init_folha.bas` | Initialization script for specific features |
### tables.bas — SPECIAL FILE
- **DO NOT call via CALL keyword** - it's processed automatically
- Parsed at compile time by `process_table_definitions()`
- Tables are created/updated in database via `sync_bot_tables()`
- Location: `/opt/gbo/data/{bot}.gbai/{bot}.gbdialog/tables.bas`
### Tool Button Execution (TOOL_EXEC)
- Frontend sends `message_type: 6` via WebSocket
- Backend handles in `stream_response()` when `message_type == MessageType::TOOL_EXEC`
- Tool executes directly, skips KB injection and LLM
- Result appears in chat (tool output), no "/tool" text shown
### CALL Keyword
- Can call in-memory procedures OR .bas scripts
- Syntax: `CALL "script_name"` or `CALL "procedure_name"`
- If not in memory, looks for `{name}.bas` in bot's gbdialog folder
### DETECT Keyword
- Analyzes database table for anomalies
- Requires table to exist (defined in tables.bas)
- Example: `result = DETECT "folha_salarios"`
- Calls BotModels API at `/api/anomaly/detect`
### start.bas Execution
- Executed on WebSocket connect (for web clients)
- Also on first user message (blocking, once per session)
- Loads suggestions via `ADD_SUGGESTION_TOOL`
- Marks session with Redis key to prevent re-run
### MessageType Enum (botlib/src/message_types.rs)
| ID | Name | Purpose |
|----|------|---------|
| 0 | EXTERNAL | External message |
| 1 | USER | User message |
| 2 | BOT_RESPONSE | Bot response |
| 3 | CONTINUE | Continue processing |
| 4 | SUGGESTION | Suggestion button |
| 5 | CONTEXT_CHANGE | Context change |
| 6 | TOOL_EXEC | Direct tool execution (skips KB/LLM) |
**Usage:** When frontend sends `message_type: 6`, backend executes tool directly without going through LLM.
### 🚨 FUNDAMENTAL: Submodule Push Rule (MANDATORY)
**Every time you push the main repo, you MUST also push ALL submodules!**
```bash
# After ANY main repo push, ALWAYS run:
cd botserver && git push origin main && git push alm main
cd ../botui && git push origin main && git push alm main
cd ../botlib && git push origin main && git push alm main
# ... repeat for ALL submodules
```
**Why:** CI builds based on submodule commits. If submodule isn't pushed, CI deploys old code.
**Checklist before pushing:**
- [ ] botserver pushed?
- [ ] botui pushed?
- [ ] botlib pushed?
- [ ] All other submodules pushed?
- [ ] Main repo points to new submodule commits?
---
## 🔐 Zitadel Setup (Directory Service)
### Container Architecture
- **directory container**: Zitadel running on port **8080** internally
- **tables container**: PostgreSQL database on port 5432
- Use database **PROD-DIRECTORY** for Zitadel data
### Network Access (Container Mode)
- **Internal API**: `http://<directory-ip>:8080`
- **External port 9000** redirected via iptables NAT to directory:8080
- **Health check**: `curl -sf http://localhost:8080/debug/healthz`
### Zitadel Installation Steps
1. **Reset database** (on tables container):
```bash
psql -h localhost -U postgres -d postgres -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'PROD-DIRECTORY' AND pid <> pg_backend_pid();"
psql -h localhost -U postgres -d postgres -c "DROP DATABASE IF EXISTS \"PROD-DIRECTORY\";"
psql -h localhost -U postgres -d postgres -c "CREATE DATABASE \"PROD-DIRECTORY\";"
```
2. **Create init config** (on directory container):
```bash
cat > /opt/gbo/conf/directory/zitadel-init-steps.yaml << "EOF"
FirstInstance:
InstanceName: "BotServer"
DefaultLanguage: "en"
PatPath: "/opt/gbo/conf/directory/admin-pat.txt"
Org:
Name: "BotServer"
Machine:
Machine:
Username: "admin-sa"
Name: "Admin Service Account"
Pat:
ExpirationDate: "2099-01-01T00:00:00Z"
Human:
UserName: "admin"
FirstName: "Admin"
LastName: "User"
Email:
Address: "admin@localhost"
Verified: true
Password: "Admin123!"
PasswordChangeRequired: false
EOF
```
3. **Start Zitadel** (on directory container):
```bash
pkill -9 zitadel || true
nohup env \
ZITADEL_DATABASE_POSTGRES_HOST=<tables-ip> \
ZITADEL_DATABASE_POSTGRES_PORT=5432 \
ZITADEL_DATABASE_POSTGRES_DATABASE=PROD-DIRECTORY \
ZITADEL_DATABASE_POSTGRES_USER_USERNAME=postgres \
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=postgres \
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable \
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres \
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres \
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable \
ZITADEL_EXTERNALSECURE=false \
ZITADEL_EXTERNALDOMAIN=<directory-ip> \
ZITADEL_EXTERNALPORT=9000 \
ZITADEL_TLS_ENABLED=false \
/opt/gbo/bin/zitadel start-from-init \
--masterkey MasterkeyNeedsToHave32Characters \
--tlsMode disabled \
--externalDomain <directory-ip> \
--externalPort 9000 \
--steps /opt/gbo/conf/directory/zitadel-init-steps.yaml \
> /opt/gbo/logs/zitadel.log 2>&1 &
```
4. **Wait for bootstrap** (~90 seconds), then verify:
```bash
curl -sf http://localhost:8080/debug/healthz
cat /opt/gbo/conf/directory/admin-pat.txt
```
5. **Configure iptables** (on system container):
```bash
iptables -t nat -A PREROUTING -p tcp --dport 9000 -j DNAT --to-destination <directory-ip>:8080
iptables -t nat -A OUTPUT -p tcp -d <external-ip> --dport 9000 -j DNAT --to-destination <directory-ip>:8080
```
### Zitadel API Usage
**PAT file location**: `/opt/gbo/conf/directory/admin-pat.txt` (on directory container)
#### Get IAM Info (internal)
```bash
curl -s -H "Authorization: Bearer $PAT" http://<directory-ip>:8080/management/v1/iam
```
#### Get IAM Info (external via port 9000)
```bash
curl -s -H "Authorization: Bearer $PAT" -H "Host: <directory-ip>" http://<external-ip>:9000/management/v1/iam
```
#### Create Human User
```bash
curl -s -X POST \
-H "Authorization: Bearer $PAT" \
-H "Host: <directory-ip>" \
-H "Content-Type: application/json" \
http://<external-ip>:9000/management/v1/users/human \
-d '{
"userName": "janedoe",
"name": "Jane Doe",
"profile": {"firstName": "Jane", "lastName": "Doe"},
"email": {"email": "jane@example.com"}
}'
```
### Zitadel API Endpoints Reference
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/management/v1/iam` | GET | Get IAM info |
| `/management/v1/orgs/me` | GET | Get current org |
| `/management/v1/users/human` | POST | Create human user |
| `/management/v1/users/machine` | POST | Create machine user |
| `/oauth/v2/token` | POST | Get access token |
| `/debug/healthz` | GET | Health check |
### Important Notes
- **Zitadel listens on port 8080 internally**
- **External port 9000** is forwarded via iptables NAT
- **Use Host header** with directory IP for external API calls
- **PAT file**: `/opt/gbo/conf/directory/admin-pat.txt`
- **Admin credentials**: `admin` / `Admin123!` (human user)
- **Database**: `PROD-DIRECTORY` on tables container
- **Zitadel v4.13.1** is the current version
---
## 📊 SEPLAGSE Bot Configuration
### Bot Location
- **Source**: `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/`
- **Work**: `./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/`
### Key Files
| File | Purpose |
|------|---------|
| `start.bas` | Entry point with suggestion buttons |
| `detecta.bas` | Tool for detecting anomalies in folha_salarios |
| `init_folha.bas` | Tool to initialize test data (INSERT keyword has issues) |
| `tables.bas` | Table definitions - auto-processed on compile |
### Tool Button Configuration (start.bas)
```bas
ADD_SUGGESTION_TOOL "detecta" AS "🔍 Detectar Desvios na Folha"
ADD_SUGGESTION_TOOL "init_folha" AS "⚙️ Inicializar Dados de Teste"
```
### Detection Flow
1. User clicks "Detectar Desvios na Folha" button
2. Frontend sends `message_type: 6` (TOOL_EXEC) via WebSocket
3. Backend executes `detecta.bas` directly (skips KB/LLM)
4. `detecta.bas` calls `DETECT "folha_salarios"` keyword
5. Keyword queries bot-specific database for table data
6. Data sent to BotModels API at `/api/anomaly/detect`
7. Results displayed in chat
### Fixes Applied
1. **TOOL_EXEC message type**: Added `MessageType::TOOL_EXEC` (id=6)
2. **Frontend WebSocket**: Sends `message_type: 6` for tool buttons
3. **Backend handler**: `stream_response()` handles TOOL_EXEC directly
4. **DETECT keyword**: Fixed to use bot-specific database (`bot_database_manager`)
5. **Bot execution**: Tool buttons work - no "/tool" text shown
### Known Issues
- **INSERT keyword**: Has parsing issues in multi-line scripts
- **Test data**: `init_folha.bas` cannot insert data due to INSERT issues
- **Workaround**: Insert data manually or via external SQL tool
### Testing
```bash
# Restart services
./restart.sh
# Test in browser
http://localhost:3000/seplagse
# Check logs
tail -f botserver.log | grep -i "detecta\|error"
```

11677
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,6 @@ members = [
"bottest",
"botui",
]
exclude = ["backup-to-s3"]
[workspace.lints.rust]
@ -110,7 +109,6 @@ url = "2.5"
dirs = "5.0"
tempfile = "3"
walkdir = "2.5.0"
notify = "8.0"
# ─── COMPRESSION / ARCHIVES ───
flate2 = "1.0"
@ -176,7 +174,7 @@ indicatif = "0.18.0"
# ─── MEMORY ALLOCATOR ───
tikv-jemallocator = "0.6"
tikv-jemalloc-ctl = { version = "0.6", default-features = false, features = ["stats"] }
tikv-jemalloc-ctl = { version = "0.6", default-features = false }
# ─── SECRETS / VAULT ───
vaultrs = "0.7"
@ -198,7 +196,7 @@ csv = "1.3"
tonic = { version = "0.14.2", default-features = false }
# ─── STATIC FILES ───
rust-embed = { version = "8.5", features = ["interpolate-folder-path"] }
rust-embed = "8.5"
mime_guess = "2.0"
# ─── TAURI (Desktop/Mobile) ───

204
DEPENDENCIES-DEV.sh Normal file
View file

@ -0,0 +1,204 @@
#!/bin/bash
#
# DEPENDENCIES-DEV.sh - Development Dependencies for General Bots
#
# This script installs additional packages needed for BUILDING botserver from source.
# Only install these if you plan to compile the code yourself.
#
# Usage: sudo ./DEPENDENCIES-DEV.sh
#
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN} General Bots Development Dependencies${NC}"
echo -e "${GREEN}========================================${NC}"
# Check root
if [ "$EUID" -ne 0 ]; then
echo -e "${RED}Error: Run as root (use sudo)${NC}"
exit 1
fi
# Detect OS
if [ -f /etc/os-release ]; then
. /etc/os-release
OS=$ID
else
echo -e "${RED}Error: Cannot detect OS${NC}"
exit 1
fi
echo -e "${YELLOW}OS: $OS${NC}"
install_debian_ubuntu() {
apt-get update
apt-get install -y \
build-essential \
gcc \
g++ \
clang \
llvm-dev \
libclang-dev \
cmake \
make \
git \
pkg-config \
libssl-dev \
libpq-dev \
liblzma-dev \
zlib1g-dev \
libabseil-dev \
protobuf-compiler \
libprotobuf-dev \
automake \
bison \
flex \
gperf \
libtool \
m4 \
nasm \
python3 \
python3-pip \
nodejs \
npm
# Cross-compilation toolchains
apt-get install -y \
gcc-aarch64-linux-gnu \
gcc-arm-linux-gnueabihf \
gcc-x86-64-linux-gnu || true
}
install_fedora_rhel() {
dnf groupinstall -y "Development Tools"
dnf install -y \
gcc \
gcc-c++ \
clang \
llvm-devel \
clang-devel \
cmake \
make \
git \
pkgconf-devel \
openssl-devel \
libpq-devel \
xz-devel \
zlib-devel \
abseil-cpp-devel \
protobuf-compiler \
protobuf-devel \
automake \
bison \
flex \
gperf \
libtool \
m4 \
nasm \
python3 \
python3-pip \
nodejs \
npm
}
install_arch() {
pacman -Sy --noconfirm \
base-devel \
gcc \
clang \
llvm \
cmake \
make \
git \
pkgconf \
openssl \
postgresql-libs \
xz \
zlib \
abseil-cpp \
protobuf \
automake \
bison \
flex \
gperf \
libtool \
m4 \
nasm \
python \
python-pip \
nodejs \
npm
}
install_alpine() {
apk add --no-cache \
build-base \
gcc \
g++ \
clang \
llvm-dev \
clang-dev \
cmake \
make \
git \
pkgconf-dev \
openssl-dev \
postgresql-dev \
xz-dev \
zlib-dev \
abseil-cpp-dev \
protobuf-dev \
protoc \
automake \
bison \
flex \
gperf \
libtool \
m4 \
nasm \
python3 \
py3-pip \
nodejs \
npm
}
case $OS in
ubuntu|debian|linuxmint|pop)
install_debian_ubuntu
;;
fedora|rhel|centos|rocky|almalinux)
install_fedora_rhel
;;
arch|manjaro)
install_arch
;;
alpine)
install_alpine
;;
*)
echo -e "${RED}Unsupported OS: $OS${NC}"
echo "Required development packages:"
echo " - build-essential/base-devel"
echo " - gcc, g++, clang"
echo " - cmake, make, git"
echo " - Development headers for:"
echo " - OpenSSL, PostgreSQL, XZ, zlib"
echo " - Abseil, Protobuf, LLVM"
exit 1
;;
esac
echo -e "${GREEN}Development dependencies installed!${NC}"
echo ""
echo "Install Rust if not already installed:"
echo " curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh"
echo ""
echo "Then build with:"
echo " cargo build --release"

View file

@ -1,77 +0,0 @@
<#
.SYNOPSIS
Installs runtime dependencies for General Bots on Windows.
.DESCRIPTION
This script downloads and configures the system libraries required to build
and run BotServer on Windows. It downloads PostgreSQL binaries (for libpq)
and sets the PQ_LIB_DIR environment variable permanently.
.EXAMPLE
PS> .\DEPENDENCIES.ps1
#>
$ErrorActionPreference = 'Stop'
# ─── COLORS ───
function Write-Step { param($msg) Write-Host " * $msg" -ForegroundColor Green }
function Write-Warn { param($msg) Write-Host " ! $msg" -ForegroundColor Yellow }
function Write-Err { param($msg) Write-Host " x $msg" -ForegroundColor Red }
Write-Host "========================================" -ForegroundColor Green
Write-Host " General Bots Runtime Dependencies" -ForegroundColor Green
Write-Host " (Windows)" -ForegroundColor Green
Write-Host "========================================" -ForegroundColor Green
Write-Host ""
# ─── PostgreSQL binaries (libpq.lib for Diesel ORM) ───
$PgsqlDir = "C:\pgsql\pgsql"
$PgsqlLib = "$PgsqlDir\lib\libpq.lib"
$PgsqlZipUrl = "https://get.enterprisedb.com/postgresql/postgresql-17.4-1-windows-x64-binaries.zip"
$PgsqlZip = "$env:TEMP\pgsql.zip"
if (Test-Path $PgsqlLib) {
Write-Step "PostgreSQL binaries already present at $PgsqlDir"
} else {
Write-Host "`nDownloading PostgreSQL binaries..." -ForegroundColor Cyan
Write-Host " URL: $PgsqlZipUrl"
Write-Host " This may take a few minutes (~300MB)...`n"
Invoke-WebRequest -Uri $PgsqlZipUrl -OutFile $PgsqlZip -UseBasicParsing
Write-Host "Extracting to C:\pgsql ..."
if (Test-Path "C:\pgsql") { Remove-Item "C:\pgsql" -Recurse -Force }
Expand-Archive -Path $PgsqlZip -DestinationPath "C:\pgsql" -Force
Remove-Item $PgsqlZip -Force -ErrorAction SilentlyContinue
if (Test-Path $PgsqlLib) {
Write-Step "PostgreSQL binaries installed successfully."
} else {
Write-Err "Failed to find libpq.lib after extraction!"
exit 1
}
}
# Set PQ_LIB_DIR permanently for the current user
$CurrentPqDir = [System.Environment]::GetEnvironmentVariable("PQ_LIB_DIR", "User")
if ($CurrentPqDir -ne "$PgsqlDir\lib") {
[System.Environment]::SetEnvironmentVariable("PQ_LIB_DIR", "$PgsqlDir\lib", "User")
$env:PQ_LIB_DIR = "$PgsqlDir\lib"
Write-Step "PQ_LIB_DIR set to '$PgsqlDir\lib' (User environment variable)"
} else {
Write-Step "PQ_LIB_DIR already configured."
}
# ─── Summary ───
Write-Host ""
Write-Host "========================================" -ForegroundColor Green
Write-Host " Dependencies installed!" -ForegroundColor Green
Write-Host "========================================" -ForegroundColor Green
Write-Host ""
Write-Host "You can now build and run:" -ForegroundColor Cyan
Write-Host " cargo build -p botserver"
Write-Host " cargo build -p botui"
Write-Host " .\restart.ps1"
Write-Host ""
Write-Host "NOTE: If this is the first time, restart your terminal" -ForegroundColor Yellow
Write-Host " so PQ_LIB_DIR takes effect." -ForegroundColor Yellow

743
README.md
View file

@ -1,27 +1,8 @@
RULE 0: Never call tool_call while thinking. Ex NEVER do this: Let me check if the API call succeeded:<tool_call>terminal<arg_key>command</arg_key><arg_value>tail -50 botserver.log | grep -E "LLM streaming error|error|Error|SUCCESS|200"</arg_value><arg_key>cd</arg_key><arg_value>gb</arg_value></tool_call>. First finish Thinking, then emit a explanation and tool!
# General Bots Workspace
## ⚠️ CRITICAL SECURITY WARNING
**NEVER CREATE FILES WITH SECRETS IN THE REPOSITORY ROOT**
Secret files MUST be placed in `/tmp/` only:
- ✅ `/tmp/vault-token-gb` - Vault root token
- ✅ `/tmp/vault-unseal-key-gb` - Vault unseal key
- ❌ `vault-unseal-keys` - FORBIDDEN (tracked by git)
- ❌ `start-and-unseal.sh` - FORBIDDEN (contains secrets)
**Files added to .gitignore:** `vault-unseal-keys`, `start-and-unseal.sh`, `vault-token-*`
**Why `/tmp/`?**
- Cleared on reboot (ephemeral)
- Not tracked by git
- Standard Unix security practice
- Prevents accidental commits
---
**Version:** 6.3.0
**Version:** 6.2.0
**Type:** Rust Workspace (Monorepo with Independent Subproject Repos)
---
@ -38,7 +19,7 @@ For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pr
| Crate | Purpose | Port | Tech Stack |
|-------|---------|------|------------|
| **botserver** | Main API server, business logic | 9000 | Axum, Diesel, Rhai BASIC |
| **botserver** | Main API server, business logic | 8088 | Axum, Diesel, Rhai BASIC |
| **botui** | Web UI server (dev) + proxy | 3000 | Axum, HTML/HTMX/CSS |
| **botapp** | Desktop app wrapper | - | Tauri 2 |
| **botlib** | Shared library | - | Core types, errors |
@ -54,35 +35,6 @@ For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pr
- **Env file:** `botserver/.env`
- **Stack:** `botserver-stack/`
- **UI Files:** `botui/ui/suite/`
- **Local Bot Data:** `/opt/gbo/data/` (place `.gbai` packages here)
### Local Bot Data Directory
Place local bot packages in `/opt/gbo/data/` for automatic loading and monitoring:
**Directory Structure:**
```
/opt/gbo/data/
└── mybot.gbai/
├── mybot.gbdialog/
│ ├── start.bas
│ └── main.bas
└── mybot.gbot/
└── config.csv
```
**Features:**
- **Auto-loading:** Bots automatically mounted on server startup
- **Auto-compilation:** `.bas` files compiled to `.ast` on change
- **Auto-creation:** New bots automatically added to database
- **Hot-reload:** Changes trigger immediate recompilation
- **Monitored by:** LocalFileMonitor and ConfigWatcher services
**Usage:**
1. Create bot directory structure in `/opt/gbo/data/`
2. Add `.bas` files to `<bot_name>.gbai/<bot_name>.gbdialog/`
3. Server automatically detects and loads the bot
4. Optional: Add `config.csv` for bot configuration
---
@ -92,36 +44,15 @@ Place local bot packages in `/opt/gbo/data/` for automatic loading and monitorin
BotServer automatically installs, configures, and manages all infrastructure components on first run. **DO NOT manually start these services** - BotServer handles everything.
**Automatic Service Lifecycle:**
1. **Start**: When botserver starts, it automatically launches all infrastructure components (PostgreSQL, Vault, MinIO, Valkey, Qdrant, etc.)
2. **Credentials**: BotServer retrieves all service credentials (passwords, tokens, API keys) from Vault
3. **Connection**: BotServer uses these credentials to establish secure connections to each service
4. **Query**: All database queries, cache operations, and storage requests are authenticated using Vault-managed credentials
**Credential Flow:**
```
botserver starts
Launch PostgreSQL, MinIO, Valkey, Qdrant
Connect to Vault
Retrieve service credentials (from database)
Authenticate with each service using retrieved credentials
Ready to handle requests
```
| Component | Purpose | Port | Binary Location | Credentials From |
|-----------|---------|------|-----------------|------------------|
| **Vault** | Secrets management | 8200 | `botserver-stack/bin/vault/vault` | Auto-unsealed |
| **PostgreSQL** | Primary database | 5432 | `botserver-stack/bin/tables/bin/postgres` | Vault → database |
| **MinIO** | Object storage (S3-compatible) | 9000/9001 | `botserver-stack/bin/drive/minio` | Vault → database |
| **Zitadel** | Identity/Authentication | 8300 | `botserver-stack/bin/directory/zitadel` | Vault → database |
| **Qdrant** | Vector database (embeddings) | 6333 | `botserver-stack/bin/vector_db/qdrant` | Vault → database |
| **Valkey** | Cache/Queue (Redis-compatible) | 6379 | `botserver-stack/bin/cache/valkey-server` | Vault → database |
| **Llama.cpp** | Local LLM server | 8081 | `botserver-stack/bin/llm/build/bin/llama-server` | Vault → database |
| Component | Purpose | Port | Binary Location | Managed By |
|-----------|---------|------|-----------------|------------|
| **Vault** | Secrets management | 8200 | `botserver-stack/bin/vault/vault` | botserver |
| **PostgreSQL** | Primary database | 5432 | `botserver-stack/bin/tables/bin/postgres` | botserver |
| **MinIO** | Object storage (S3-compatible) | 9000/9001 | `botserver-stack/bin/drive/minio` | botserver |
| **Zitadel** | Identity/Authentication | 8300 | `botserver-stack/bin/directory/zitadel` | botserver |
| **Qdrant** | Vector database (embeddings) | 6333 | `botserver-stack/bin/vector_db/qdrant` | botserver |
| **Valkey** | Cache/Queue (Redis-compatible) | 6379 | `botserver-stack/bin/cache/valkey-server` | botserver |
| **Llama.cpp** | Local LLM server | 8081 | `botserver-stack/bin/llm/build/bin/llama-server` | botserver |
### 📦 Component Installation System
@ -340,12 +271,9 @@ cd botserver-stack/bin/cache && ./valkey-cli ping
The script handles BOTH servers properly:
1. Stop existing processes cleanly
2. Build botserver and botui sequentially (no race conditions)
3. Start botserver in background → **automatically starts all infrastructure services (PostgreSQL, Vault, MinIO, Valkey, Qdrant)**
4. BotServer retrieves credentials from Vault and authenticates with all services
5. Start botui in background → proxy to botserver
6. Show process IDs and monitoring commands
**Infrastructure services are fully automated - no manual configuration required!**
3. Start botserver in background → auto-bootstrap infrastructure
4. Start botui in background → proxy to botserver
5. Show process IDs and monitoring commands
**Monitor startup:**
```bash
@ -354,7 +282,7 @@ tail -f botserver.log botui.log
**Access:**
- Web UI: http://localhost:3000
- API: http://localhost:9000
- API: http://localhost:8088
### 📊 Monitor & Debug
@ -378,7 +306,7 @@ grep -E " E |W |CLIENT:" botserver.log | tail -20
```bash
cd botserver && cargo run -- --noconsole > ../botserver.log 2>&1 &
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run > ../botui.log 2>&1 &
cd botui && BOTSERVER_URL="http://localhost:8088" cargo run > ../botui.log 2>&1 &
```
### 🛑 Stop Servers
@ -396,7 +324,7 @@ rm -rf botserver-stack/data/vault botserver-stack/conf/vault/init.json && ./rest
**Port in use?** Find and kill:
```bash
lsof -ti:9000 | xargs kill -9
lsof -ti:8088 | xargs kill -9
lsof -ti:3000 | xargs kill -9
```
@ -407,59 +335,8 @@ All infrastructure services (PostgreSQL, Vault, Redis, Qdrant, MinIO, etc.) are
- **Configurations:** `botserver-stack/conf/`
- **Data storage:** `botserver-stack/data/`
- **Service logs:** `botserver-stack/logs/` (check here for troubleshooting)
- **Credentials:** Stored in Vault, retrieved by botserver at startup
**Do NOT install or reference global PostgreSQL, Redis, or other services.** When botserver starts, it automatically:
1. Launches all required stack services
2. Connects to Vault
3. Retrieves credentials from the `bot_configuration` database table
4. Authenticates with each service using retrieved credentials
5. Begins handling requests with authenticated connections
If you encounter service errors, check the individual service logs in `./botserver-stack/logs/[service]/` directories.
### UI File Deployment - Production Options
**Option 1: Embedded UI (Recommended for Production)**
The `embed-ui` feature compiles UI files directly into the botui binary, eliminating the need for separate file deployment:
```bash
# Build with embedded UI files
cargo build --release -p botui --features embed-ui
# The binary now contains all UI files - no additional deployment needed!
# The botui binary is self-contained and production-ready
```
**Benefits of embed-ui:**
- ✅ Single binary deployment (no separate UI files)
- ✅ Faster startup (no filesystem access)
- ✅ Smaller attack surface
- ✅ Simpler deployment process
**Option 2: Filesystem Deployment (Development Only)**
For development, UI files are served from the filesystem:
```bash
# UI files must exist at botui/ui/suite/
# This is automatically available in development builds
```
**Option 3: Manual File Deployment (Legacy)**
If you need to deploy UI files separately (not recommended):
```bash
# Deploy UI files to production location
./botserver/deploy/deploy-ui.sh /opt/gbo
# Verify deployment
ls -la /opt/gbo/bin/ui/suite/index.html
```
See `botserver/deploy/README.md` for deployment scripts.
**Do NOT install or reference global PostgreSQL, Redis, or other services.** When botserver starts, it automatically launches all required stack services. If you encounter service errors, check the individual service logs in `./botserver-stack/logs/[service]/` directories.
### Start Both Servers (Automated)
```bash
@ -473,7 +350,7 @@ See `botserver/deploy/README.md` for deployment scripts.
cd botserver && cargo run -- --noconsole
# Terminal 2: botui
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run
cd botui && BOTSERVER_URL="http://localhost:8088" cargo run
```
### Build Commands
@ -490,11 +367,234 @@ cargo test -p bottest
---
## 🤖 AI Agent Guidelines
## 🧭 LLM Navigation Guide
> **For LLM instructions, coding rules, security directives, testing workflows, and error handling patterns, see [AGENTS.md](./AGENTS.md).**
### Quick Context Jump
- [Primary Purpose](#overview) - Unified workspace for AI automation platform
- [Crate Structure](#-workspace-structure) - 9 independent crates with shared libraries
- [Dependencies](#-component-dependency-graph) - How crates depend on each other
- [Quick Start](#quick-start) - Get running in 2 commands
- [Error Patterns](#common-error-patterns) - Fix compilation errors efficiently
- [Security Rules](#-security-directives---mandatory) - MUST-FOLLOW security patterns
- [Code Patterns](#-mandatory-code-patterns) - Required coding conventions
- [Testing](#testing-strategy) - How to test changes
- [Debugging](#debugging-guide) - Troubleshoot common issues
---
### Reading This Workspace
**For LLMs analyzing this codebase:**
1. Start with [Component Dependency Graph](#-component-dependency-graph) to understand relationships
2. Review [Module Responsibility Matrix](#-module-responsibility-matrix) for what each module does
3. Study [Data Flow Patterns](#-data-flow-patterns) to understand execution flow
4. Reference [Common Architectural Patterns](#-common-architectural-patterns) before making changes
5. Check [Security Rules](#-security-directives---mandatory) - violations are blocking issues
6. Follow [Code Patterns](#-mandatory-code-patterns) - consistency is mandatory
**For Humans working on this codebase:**
1. Follow [Error Fixing Workflow](#-error-fixing-workflow) for compilation errors
2. Observe [File Size Limits](#-file-size-limits---mandatory) - max 450 lines per file
3. Run [Weekly Maintenance Tasks](#-weekly-maintenance-tasks) to keep codebase healthy
4. Read project-specific READMEs in [Project-Specific Guidelines](#-project-specific-guidelines)
## 🧪 Testing Strategy
### Unit Tests
- **Location**: Each crate has `tests/` directory or inline `#[cfg(test)]` modules
- **Naming**: Test functions use `test_` prefix or describe what they test
- **Running**: `cargo test -p <crate_name>` or `cargo test` for all
### Integration Tests
- **Location**: `bottest/` crate contains integration tests
- **Scope**: Tests full workflows across multiple crates
- **Running**: `cargo test -p bottest`
- **Database**: Uses test database, automatically migrates on first run
### Test Utilities Available
- **TestAppStateBuilder** (`bottest/src/harness.rs`) - Build test state with mocked components
- **TestBot** (`bottest/src/bot/mod.rs`) - Mock bot for testing
- **Test Database**: Auto-created, migrations run automatically
### Coverage Goals
- **Critical paths**: 80%+ coverage required
- **Error handling**: ALL error paths must have tests
- **Security**: All security guards must have tests
## 🚨 CRITICAL ERROR HANDLING RULE
**STOP EVERYTHING WHEN ERRORS APPEAR**
When ANY error appears in logs during startup or operation:
1. **IMMEDIATELY STOP** - Do not continue with other tasks
2. **IDENTIFY THE ERROR** - Read the full error message and context
3. **FIX THE ERROR** - Address the root cause, not symptoms
4. **VERIFY THE FIX** - Ensure error is completely resolved
5. **ONLY THEN CONTINUE** - Never ignore or work around errors
**NEVER restart servers to "fix" errors - FIX THE ACTUAL PROBLEM**
Examples of errors that MUST be fixed immediately:
- Database connection errors
- Component initialization failures
- Service startup errors
- Configuration errors
- Any error containing "Error:", "Failed:", "Cannot", "Unable"
## 🐛 Debugging Guide
### Log Locations
| Component | Log File | What's Logged |
|-----------|----------|---------------|
| **botserver** | `botserver.log` | API requests, errors, script execution, **client navigation events** |
| **botui** | `botui.log` | UI rendering, WebSocket connections |
| **drive_monitor** | In botserver logs with `[drive_monitor]` prefix | File sync, compilation |
| **script execution** | In botserver logs with `[ScriptService]` prefix | BASIC compilation, runtime errors |
| **client errors** | In botserver logs with `CLIENT:` prefix | JavaScript errors, navigation events |
### Client-Side Logging
**Navigation Tracking:** All client-side navigation is logged to botserver.log with `CLIENT:` prefix:
```
CLIENT:NAVIGATION: click: home -> drive
CLIENT:NAVIGATION: hashchange: drive -> chat
```
**Error Reporting:** JavaScript errors automatically appear in server logs:
```
CLIENT:ERROR: Uncaught TypeError: Cannot read property 'x' of undefined at /suite/js/app.js:123
```
**For LLM Troubleshooting:** ALWAYS check both:
1. `botserver.log` - Server errors + client navigation/errors (prefixed with `CLIENT:`)
2. `botui.log` - UI server logs
### USE WEBSITE Feature - Vector DB Context Injection
**FIXED (v6.2.0+):** The `USE WEBSITE` BASIC command now properly injects vector database embeddings into chat context.
**How it works:**
1. **Preprocessing:** When a `.bas` file containing `USE WEBSITE "https://..."` is compiled, the website is registered for crawling
2. **Crawling:** Content is extracted, chunked, and embedded into Qdrant vector DB (collection name: `website_<url_hash>`)
3. **Runtime Association:** The compiled `.ast` file contains `USE_WEBSITE()` function call that creates session-website association
4. **Context Injection:** During chat, `inject_kb_context()` searches active websites' embeddings and includes relevant chunks in LLM prompt
**Example BASIC script:**
```basic
USE WEBSITE "https://docs.pragmatismo.com.br" REFRESH "1h"
TALK "Hello! I can now answer questions about the documentation."
```
**Database tables involved:**
- `session_website_associations` - Links sessions to websites
- `website_embeddings` - Stores crawled content vectors in Qdrant
**Verification:**
```sql
-- Check if website is associated with session
SELECT * FROM session_website_associations WHERE session_id = '<uuid>';
-- Check if embeddings exist in Qdrant (via HTTP API)
curl http://localhost:6333/collections/website_<hash>/points/scroll
```
**Previous Issue:** In earlier versions, `USE WEBSITE` was removed during preprocessing and never executed at runtime, preventing context injection. Now the function call is preserved in the compiled AST.
### Common Error Messages
| Error | Meaning | Fix |
|-------|---------|-----|
| `Session not found` | Invalid session_id in request | Check auth flow, verify session exists in DB |
| `Bot not found` | Invalid bot_name or bot_id | Verify bot exists in `bots` table |
| `Script compilation error` | BASIC syntax error in .bas file | Check .bas file syntax, look for typos |
| `Failed to send TALK message` | WebSocket disconnected | Check client connection, verify web_adapter running |
| `Drive sync failed` | S3 connection or permission issue | Verify S3 credentials, check bucket exists |
| `unwrap() called on None value` | Panic in production code | MUST FIX - replace with proper error handling |
| `Component not responding: <component_name>` | Infrastructure component not accessible | Check component status: `ps aux | grep <component>`. View logs: `tail -f botserver-stack/logs/<component>/`. Restart via ./restart.sh |
| `Config key not found: <key>` | Missing configuration in database | Check `bot_configuration` table. Set correct value via API or direct SQL update. |
| `403 Forbidden on POST /api/client-errors` | RBAC blocking client error reporting | FIXED in v6.2.0+ - endpoint now allows anonymous access |
### Useful Debugging Commands
```bash
# Check if botserver is running
ps aux | grep botserver
# View botserver logs in real-time
tail -f botserver/logs/botserver.log
# Check work directory structure
ls -la ./work/*.gbai/*/
# Test database connection
cd botserver && cargo run --bin botserver -- --test-db
# Run specific test with output
cargo test -p botserver test_name -- --nocapture
# Check for memory leaks during compilation
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | grep -i error
```
### Troubleshooting Workflows
**Problem: Script not executing**
1. Check if .bas file exists in `./work/{bot_name}.gbai/{bot_name}.gbdialog/`
2. Verify file has correct syntax (compile with ScriptService)
3. Check logs for compilation errors
4. Verify drive_monitor is running and syncing files
**Problem: WebSocket messages not received**
1. Check browser console for WebSocket errors
2. Verify session_id is valid in database
3. Check web_adapter is registered for session
4. Look for TALK execution in botserver logs
**Problem: Component not starting or crashing**
1. Identify the component from error message (e.g., Vault, PostgreSQL, MinIO, Qdrant, Valkey)
2. Check if process is running: `ps aux | grep <component_name>`
3. Check component logs: `tail -f botserver-stack/logs/<component_name>/`
4. Common fixes:
- Config error: Check `botserver-stack/conf/<component_name>/` for valid configuration
- Port conflict: Ensure no other process using the component's port
- Permission error: Check file permissions in `botserver-stack/data/<component_name>/`
- Missing binary: Re-run `./reset.sh && ./restart.sh` to reinstall components
5. Restart: `./restart.sh`
**Problem: Component configuration errors**
1. All component configs stored in database `bot_configuration` table
2. Check current value: `SELECT * FROM bot_configuration WHERE config_key = '<key_name>';`
3. Update incorrect config: `UPDATE bot_configuration SET config_value = '<correct_value>' WHERE config_key = '<key_name>';`
4. For path configs: Ensure paths are relative to component binary or absolute
5. Restart botserver after config changes
**Problem: File not found errors**
1. Check if file exists in expected location
2. Verify config paths use correct format (relative/absolute)
3. Check file permissions: `ls -la <file_path>`
4. For model/data files: Ensure downloaded to `botserver-stack/data/<component>/`
**Problem: LLM not responding**
1. Check LLM API credentials in config
2. Verify API key has available quota
3. Check network connectivity to LLM provider
4. Review request/response logs for API errors
### Performance Profiling
```bash
# Profile compilation time
cargo build --release --timings
# Profile runtime performance
cargo flamegraph --bin botserver
# Check binary size
ls -lh target/release/botserver
# Memory usage
valgrind --leak-check=full target/release/botserver
```
## 📖 Glossary
@ -514,7 +614,284 @@ cargo test -p bottest
---
## 🔥 Error Fixing Workflow
### Mode 1: OFFLINE Batch Fix (PREFERRED)
When given error output:
```
1. Read ENTIRE error list first
2. Group errors by file
3. For EACH file with errors:
a. View file → understand context
b. Fix ALL errors in that file
c. Write once with all fixes
4. Move to next file
5. REPEAT until ALL errors addressed
6. ONLY THEN → verify with build/diagnostics
```
**NEVER run cargo build/check/clippy DURING fixing**
**Fix ALL errors OFFLINE first, verify ONCE at the end**
### Mode 2: Interactive Loop
```
LOOP UNTIL (0 warnings AND 0 errors):
1. Run diagnostics → pick file with issues
2. Read entire file
3. Fix ALL issues in that file
4. Write file once with all fixes
5. Verify with diagnostics
6. CONTINUE LOOP
END LOOP
```
### Common Error Patterns
| Error | Fix |
|-------|-----|
| `expected i64, found u64` | `value as i64` |
| `expected Option<T>, found T` | `Some(value)` |
| `expected T, found Option<T>` | `value.unwrap_or(default)` |
| `cannot multiply f32 by f64` | `f64::from(f32_val) * f64_val` |
| `no field X on type Y` | Check struct definition |
| `no variant X found` | Check enum definition |
| `function takes N arguments` | Match function signature |
| `cannot find function` | Add missing function or fix import |
| `unused variable` | Delete or use with `..` in patterns |
| `unused import` | Delete the import line |
| `cannot move out of X because borrowed` | Use scoping `{ }` to limit borrow |
---
## 🧠 Memory Management
When compilation fails due to memory issues (process "Killed"):
```bash
pkill -9 cargo; pkill -9 rustc; pkill -9 botserver
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200
```
---
## 📏 File Size Limits - MANDATORY
### Maximum 450 Lines Per File
When a file grows beyond this limit:
1. **Identify logical groups** - Find related functions
2. **Create subdirectory module** - e.g., `handlers/`
3. **Split by responsibility:**
- `types.rs` - Structs, enums, type definitions
- `handlers.rs` - HTTP handlers and routes
- `operations.rs` - Core business logic
- `utils.rs` - Helper functions
- `mod.rs` - Re-exports and configuration
4. **Keep files focused** - Single responsibility
5. **Update mod.rs** - Re-export all public items
**NEVER let a single file exceed 450 lines - split proactively at 350 lines**
### Current Files Requiring Immediate Refactoring
| File | Lines | Target Split |
|------|-------|--------------|
| `botserver/src/drive/mod.rs` | 1522 | → 4 files |
| `botserver/src/auto_task/app_generator.rs` | 2981 | → 7 files |
| `botui/ui/suite/sheet/sheet.js` | 3220 | → 8 files |
| `botserver/src/tasks/mod.rs` | 2651 | → 6 files |
| `botserver/src/learn/mod.rs` | 2306 | → 5 files |
See `TODO-refactor1.md` for detailed refactoring plans.
---
## 🔍 Continuous Monitoring
**YOLO Forever Monitoring Pattern:**
The system includes automated log monitoring to catch errors in real-time:
```bash
# Continuous monitoring (check every 5 seconds)
while true; do
sleep 5
echo "=== Check at $(date +%H:%M:%S) ==="
tail -50 botserver.log | grep -E "ERROR|WARN|CLIENT:" | tail -5 || echo "✓ Clean"
done
```
**Quick Status Check:**
```bash
# Check last 200 lines for any issues
tail -200 botserver.log | grep -E "ERROR|WARN|CLIENT:" | tail -10
# Show recent server activity
tail -30 botserver.log
# Check if server is running
ps aux | grep botserver | grep -v grep
```
**Monitoring Dashboard:**
- **Server Status**: https://localhost:8088 (health endpoint)
- **Logs**: `tail -f botserver.log`
- **Client Errors**: Look for `CLIENT:` prefix
- **Server Errors**: Look for `ERROR` or `WARN` prefixes
**Status Indicators:**
- ✅ **Clean**: No ERROR/WARN/CLIENT: entries in logs
- ⚠️ **Warnings**: Non-critical issues that should be reviewed
- ❌ **Errors**: Critical issues requiring immediate attention
**When Errors Appear:**
1. Capture the full error context (50 lines before/after)
2. Identify the component (server, client, database, etc.)
3. Check troubleshooting section for specific fixes
4. Update this README with discovered issues and resolutions
---
## 🚀 Performance & Size Standards
### Binary Size Optimization
- **Release Profile**: Always maintain `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, `panic = "abort"`.
- **Dependencies**:
- Run `cargo tree --duplicates` weekly to find and resolve duplicate versions.
- Run `cargo machete` to remove unused dependencies.
- Use `default-features = false` and explicitly opt-in to needed features.
### Memory Optimization
- **Strings**: Prefer `&str` over `String` where possible. Use `Cow<str>` for conditional ownership.
- **Collections**: Use `Vec::with_capacity` when size is known. Consider `SmallVec` for hot paths.
- **Allocations**: Minimize heap allocations in hot paths.
- **Cloning**: Avoid unnecessary `.clone()` calls. Use references or `Cow` types.
### Code Quality Issues Found
- **955 instances** of `unwrap()`/`expect()` in codebase - ALL must be replaced with proper error handling
- **12,973 instances** of excessive `clone()`/`to_string()` calls - optimize for performance
- **Test code exceptions**: `unwrap()` allowed in test files only
### Linting & Code Quality
- **Clippy**: Code MUST pass `cargo clippy --all-targets --all-features` with **0 warnings**.
- **No Allow**: Do not use `#[allow(clippy::...)]` unless absolutely necessary and documented. Fix the underlying issue.
---
## 🔐 Security Directives - MANDATORY
### Error Handling - NO PANICS IN PRODUCTION
```rust
// ❌ FORBIDDEN
value.unwrap()
value.expect("message")
panic!("error")
todo!()
unimplemented!()
// ✅ REQUIRED
value?
value.ok_or_else(|| Error::NotFound)?
value.unwrap_or_default()
value.unwrap_or_else(|e| { log::error!("{}", e); default })
if let Some(v) = value { ... }
match value { Ok(v) => v, Err(e) => return Err(e.into()) }
```
### Command Execution - USE SafeCommand
```rust
// ❌ FORBIDDEN
Command::new("some_command").arg(user_input).output()
// ✅ REQUIRED
use crate::security::command_guard::SafeCommand;
SafeCommand::new("allowed_command")?
.arg("safe_arg")?
.execute()
```
### Error Responses - USE ErrorSanitizer
```rust
// ❌ FORBIDDEN
Json(json!({ "error": e.to_string() }))
format!("Database error: {}", e)
// ✅ REQUIRED
use crate::security::error_sanitizer::log_and_sanitize;
let sanitized = log_and_sanitize(&e, "context", None);
(StatusCode::INTERNAL_SERVER_ERROR, sanitized)
```
### SQL - USE sql_guard
```rust
// ❌ FORBIDDEN
format!("SELECT * FROM {}", user_table)
// ✅ REQUIRED
use crate::security::sql_guard::{sanitize_identifier, validate_table_name};
let safe_table = sanitize_identifier(&user_table);
validate_table_name(&safe_table)?;
```
---
## ❌ Absolute Prohibitions
```
❌ NEVER use .unwrap() or .expect() in production code (tests OK)
❌ NEVER use panic!(), todo!(), unimplemented!()
❌ NEVER use Command::new() directly - use SafeCommand
❌ NEVER return raw error strings to HTTP clients
❌ NEVER use #[allow()] in source code - FIX the code instead
❌ NEVER add lint exceptions to Cargo.toml - FIX the code instead
❌ NEVER use _ prefix for unused variables - DELETE or USE them
❌ NEVER leave unused imports or dead code
❌ NEVER add comments - code must be self-documenting
❌ NEVER modify Cargo.toml lints section!
❌ NEVER use CDN links - all assets must be local
❌ NEVER use cargo clean - causes 30min rebuilds, use ./reset.sh for database issues
❌ NEVER create .md documentation files without checking botbook/ first - documentation belongs there
```
---
## ✅ Mandatory Code Patterns
### Use Self in Impl Blocks
```rust
impl MyStruct {
fn new() -> Self { Self { } } // ✅ Not MyStruct
}
```
### Derive Eq with PartialEq
```rust
#[derive(PartialEq, Eq)] // ✅ Always both
struct MyStruct { }
```
### Inline Format Args
```rust
format!("Hello {name}") // ✅ Not format!("{}", name)
```
### Combine Match Arms
```rust
match x {
A | B => do_thing(), // ✅ Combine identical arms
C => other(),
}
```
---
## 🖥️ UI Architecture (botui + botserver)
@ -523,13 +900,13 @@ cargo test -p bottest
| Server | Port | Purpose |
|--------|------|---------|
| **botui** | 3000 | Serves UI files + proxies API to botserver |
| **botserver** | 9000 | Backend API + embedded UI fallback |
| **botserver** | 8088 | Backend API + embedded UI fallback |
### How It Works
```
Browser → localhost:3000 → botui (serves HTML/CSS/JS)
→ /api/* proxied to botserver:9000
→ /api/* proxied to botserver:8088
→ /suite/* served from botui/ui/suite/
```
@ -652,50 +1029,50 @@ cargo audit
- `ADDITIONAL-SUGGESTIONS.md` - Enhancement ideas
- `TODO-*.md` - Task tracking files
Subprojects (botapp, botserver, botui, etc.) are **independent repositories referenced as git submodules**.
### ⚠️ CRITICAL: Submodule Push Workflow
When making changes to any submodule (botserver, botui, botlib, etc.):
1. **Commit and push changes within the submodule directory:**
```bash
cd botserver
git add .
git commit -m "Your changes"
git push pragmatismo main
git push github main
```
2. **Update the global gb repository submodule reference:**
```bash
cd .. # Back to gb root
git add botserver
git commit -m "Update botserver submodule to latest commit"
git push pragmatismo main
git push github main
```
**Failure to push the global gb repository will cause submodule changes to not trigger CI/CD pipelines.**
Both repositories must be pushed for changes to take effect in production.
Subprojects (botapp, botserver, etc.) are **not** git submodules - they are independent repositories.
---
## Development Workflow
1. Read this README.md (workspace structure)
2. Read **[AGENTS.md](./AGENTS.md)** (coding rules & workflows)
3. **BEFORE creating any .md file, search botbook/ for existing documentation**
4. Read `<project>/README.md` (project-specific rules)
5. Use diagnostics tool to check warnings
6. Fix all warnings with full file rewrites
7. Verify with diagnostics after each file
8. Never suppress warnings with `#[allow()]`
1. Read this README.md (workspace-level rules)
2. **BEFORE creating any .md file, search botbook/ for existing documentation**
3. Read `<project>/README.md` (project-specific rules)
4. Use diagnostics tool to check warnings
5. Fix all warnings with full file rewrites
6. Verify with diagnostics after each file
7. Never suppress warnings with `#[allow()]`
---
## Main Directive
**LOOP AND COMPACT UNTIL 0 WARNINGS - MAXIMUM PRECISION**
- 0 warnings
- 0 errors
- Trust project diagnostics
- Respect all rules
- No `#[allow()]` in source code
- Real code fixes only
---
## 🔑 Remember
- **OFFLINE FIRST** - Fix all errors from list before compiling
- **ZERO WARNINGS, ZERO ERRORS** - The only acceptable state
- **FIX, DON'T SUPPRESS** - No #[allow()], no Cargo.toml lint exceptions
- **SECURITY FIRST** - No unwrap, no raw errors, no direct commands
- **READ BEFORE FIX** - Always understand context first
- **BATCH BY FILE** - Fix ALL errors in a file at once
- **WRITE ONCE** - Single edit per file with all fixes
- **VERIFY LAST** - Only compile/diagnostics after ALL fixes
- **DELETE DEAD CODE** - Don't keep unused code around
- **Version 6.2.0** - Do not change without approval
- **GIT WORKFLOW** - ALWAYS push to ALL repositories (github, pragmatismo)
---
## License

2
botapp

@ -1 +1 @@
Subproject commit 0b556948f970832e8606f886853793e2bc8dc35c
Subproject commit b5ee6e061acf1388aef777ddcd9a2bf84bd6ed57

@ -1 +1 @@
Subproject commit 7b2b7ab3c53c65a68930a8cb2e7ca359d8e22bcf
Subproject commit 85696bb9070738f6bb865202f8c7de733f7c731a

@ -1 +1 @@
Subproject commit 35411f4f9e64e54b1039360ab654d537cd2958c9
Subproject commit 97778e06dd804be55ff761c7fe2788af0ef50626

2
botlib

@ -1 +1 @@
Subproject commit e926818f35879c1db4086efaa9634caac45f5743
Subproject commit 2765fa2ebadc91435e8d90f068b4c96dbb77329b

@ -1 +1 @@
Subproject commit e088a8e69eb8fe064bf1510a720d42abe159ab00
Subproject commit 22a1954fac2f87a0a13b5e599771273172afc73a

@ -1 +1 @@
Subproject commit 1727e48307fdb7b54c726af8cd6b12669764e908
Subproject commit 17a3caebabddbe843c2b7fd93f624b0ccd9c44fb

@ -1 +1 @@
Subproject commit 73002b36cc3def17546085574cbafe0f42c7b04f
Subproject commit 30345c66e2738ebe73d896841e54f655999e3630

@ -1 +1 @@
Subproject commit 3110dd587290047f283300d674ad325f4f9b3046
Subproject commit dd3d8c74dd58a1cc6d6b18d22108819519aaf9c3

@ -1 +1 @@
Subproject commit 346120cb0b916f72abd2fdad577ae1c606aba1a2
Subproject commit 74e761de0dd5105885acf00183223a702a8436df

2
botui

@ -1 +1 @@
Subproject commit ff6dfcf126dc20ff8a46d3ac44ef04045cfaf268
Subproject commit 414d277ae1757834d2ddbd6225063b451e919788

View file

@ -0,0 +1,20 @@
{
"base_url": "http://localhost:8300",
"default_org": {
"id": "358572039839154190",
"name": "default",
"domain": "default.localhost"
},
"default_user": {
"id": "admin",
"username": "admin",
"email": "admin@localhost",
"password": "",
"first_name": "Admin",
"last_name": "User"
},
"admin_token": "eW0mGnOlKjpYHsrZZNAh1o3_8qeyF1iKKgEj-Y63GBdjQbQmxKxEjsNmVLZ_DWRDK6I3_yI",
"project_id": "",
"client_id": "358572040510308366",
"client_secret": "WyZRbj5iMkOkbvvtJWivXVaaydhWX1TodavhnAhsivl8IDZ44v2QoqT5upfgmOfz"
}

12
package.json Normal file
View file

@ -0,0 +1,12 @@
{
"name": "gb",
"version": "1.0.0",
"main": "index.js",
"author": "Rodrigo Rodriguez (Pragmatismo) <me@rodrigorodriguez.com>",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.58.1",
"@types/node": "^25.2.0"
},
"scripts": {}
}

79
playwright.config.ts Normal file
View file

@ -0,0 +1,79 @@
import { defineConfig, devices } from '@playwright/test';
/**
* Read environment variables from file.
* https://github.com/motdotla/dotenv
*/
// import dotenv from 'dotenv';
// import path from 'path';
// dotenv.config({ path: path.resolve(__dirname, '.env') });
/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: './tests',
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Base URL to use in actions like `await page.goto('')`. */
// baseURL: 'http://localhost:3000',
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: 'on-first-retry',
},
/* Configure projects for major browsers */
projects: [
{
name: 'chromium',
use: { ...devices['Desktop Chrome'] },
},
{
name: 'firefox',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'webkit',
use: { ...devices['Desktop Safari'] },
},
/* Test against mobile viewports. */
// {
// name: 'Mobile Chrome',
// use: { ...devices['Pixel 5'] },
// },
// {
// name: 'Mobile Safari',
// use: { ...devices['iPhone 12'] },
// },
/* Test against branded browsers. */
// {
// name: 'Microsoft Edge',
// use: { ...devices['Desktop Edge'], channel: 'msedge' },
// },
// {
// name: 'Google Chrome',
// use: { ...devices['Desktop Chrome'], channel: 'chrome' },
// },
],
/* Run your local dev server before starting the tests */
// webServer: {
// command: 'npm run start',
// url: 'http://localhost:3000',
// reuseExistingServer: !process.env.CI,
// },
});

File diff suppressed because it is too large Load diff

View file

@ -1,146 +0,0 @@
# Email Campaigns — Feature Plan
## Existing Foundation (botserver/src/marketing/)
- `campaigns.rs` — CrmCampaign model, CRUD handlers
- `metrics.rs` — CampaignMetrics, ChannelBreakdown, open/click/conversion rates
- `lists.rs` — recipient lists
- `templates.rs` — content templates
- `triggers.rs` — event-based sending
- `email/tracking.rs` — open/click tracking pixels
---
## Features to Build
### 1. Insights Dashboard
**What:** Time series views of delivery + engagement metrics per campaign.
**Data points per time bucket (hourly/daily):**
- Sent, delivered, bounced, failed
- Opens (unique + total), clicks, replies, unsubscribes
- Delivery rate, open rate, click-to-open rate (CTOR)
**Filters/pivots:**
- By mailbox provider (Gmail, Outlook, Yahoo, etc. — parsed from MX/SMTP response)
- By sender identity (from address / domain)
- By campaign or list
- Message search → show exact SMTP response from provider
**Implementation:**
- Add `email_delivery_events` table: `(id, campaign_id, recipient_id, event_type, provider, smtp_response, ts)`
- API: `GET /api/campaigns/:id/insights?from=&to=&group_by=provider|identity|day`
- UI: HTMX + chart.js time series (local vendor)
---
### 2. Advisor Recommendations
**What:** Analyze sending config + results and surface actionable fixes.
**Checks to run:**
| Check | Signal | Recommendation |
|---|---|---|
| SPF/DKIM/DMARC | DNS lookup | "Add missing record" |
| Bounce rate > 5% | delivery_events | "Clean list — remove hard bounces" |
| Open rate < 15% | metrics | "Improve subject line / send time" |
| Spam complaints > 0.1% | FBL data | "Remove complainers immediately" |
| Sending from new IP | warmup_schedule | "Follow warmup plan" |
| List age > 6 months | list.last_sent | "Re-engagement campaign before bulk send" |
**Implementation:**
- `marketing/advisor.rs``AdvisorEngine::analyze(campaign_id) -> Vec<Recommendation>`
- API: `GET /api/campaigns/:id/advisor`
- Runs automatically after each campaign completes
---
### 3. IP Warmup (like OneSignal / Mailchimp)
**What:** Gradually increase daily send volume over 46 weeks to build sender reputation.
**Warmup schedule (standard):**
| Day | Max emails/day |
|---|---|
| 12 | 50 |
| 34 | 100 |
| 57 | 500 |
| 810 | 1,000 |
| 1114 | 5,000 |
| 1521 | 10,000 |
| 2228 | 50,000 |
| 29+ | unlimited |
**Rules:**
- Only send to most engaged subscribers first (opened in last 90 days)
- Stop warmup if bounce rate > 3% or complaint rate > 0.1%
- Resume next day at same volume if paused
**Implementation:**
- `marketing/warmup.rs``WarmupSchedule`, `WarmupEngine::get_daily_limit(ip, day) -> u32`
- `warmup_schedules` table: `(id, ip, started_at, current_day, status, paused_reason)`
- Scheduler checks warmup limit before each send batch
- API: `GET /api/warmup/status`, `POST /api/warmup/start`
---
### 4. Optimized Shared Delivery
**What:** Auto-select best sending IP based on real-time reputation signals.
**Logic:**
- Track per-IP: bounce rate, complaint rate, delivery rate (last 24h)
- Score each IP: `score = delivery_rate - (bounce_rate * 10) - (complaint_rate * 100)`
- Route each send to highest-scored IP for that destination provider
- Rotate IPs to spread load and preserve reputation
**Implementation:**
- `marketing/ip_router.rs``IpRouter::select(destination_domain) -> IpAddr`
- `ip_reputation` table: `(ip, provider, bounces, complaints, delivered, window_start)`
- Plugs into Stalwart send path via botserver API
---
### 5. Modern Email Marketing Features
| Feature | Description |
|---|---|
| **Send time optimization** | ML-based per-contact best send time (based on past open history) |
| **A/B testing** | Split subject/content, auto-pick winner after N hours |
| **Suppression list** | Global unsubscribe/bounce/complaint list, auto-applied to all sends |
| **Re-engagement flows** | Auto-trigger "we miss you" to contacts inactive > 90 days |
| **Transactional + marketing separation** | Separate IPs/domains for transactional vs bulk |
| **One-click unsubscribe** | RFC 8058 `List-Unsubscribe-Post` header on all bulk sends |
| **Preview & spam score** | Pre-send SpamAssassin score check |
| **Link tracking** | Redirect all links through tracker, record clicks per contact |
| **Webhook events** | Push delivery events to external URLs (Stalwart webhook → botserver) |
---
## DB Tables to Add
```sql
email_delivery_events (id, campaign_id, recipient_id, event_type, provider, smtp_code, smtp_response, ts)
warmup_schedules (id, ip, started_at, current_day, daily_limit, status, paused_reason)
ip_reputation (id, ip, provider, delivered, bounced, complained, window_start)
advisor_recommendations (id, campaign_id, check_name, severity, message, created_at, dismissed)
ab_tests (id, campaign_id, variant_a, variant_b, split_pct, winner, decided_at)
suppression_list (id, org_id, email, reason, added_at)
```
---
## Files to Create
```
botserver/src/marketing/
├── warmup.rs — IP warmup engine + schedule
├── advisor.rs — recommendation engine
├── ip_router.rs — optimized IP selection
├── ab_test.rs — A/B test logic
├── suppression.rs — global suppression list
└── send_time.rs — send time optimization
```
---
## Existing Code to Extend
- `marketing/metrics.rs` → add time-series queries + provider breakdown
- `marketing/campaigns.rs` → add warmup_enabled, ab_test_id fields
- `email/tracking.rs` → already has open/click tracking, extend with provider parsing
- `core/shared/schema/` → add new tables above

View file

@ -1,59 +0,0 @@
# SEPLAGSE - Detecção de Desvios na Folha
## Objetivo
- Bot seplagse deve usar start.bas para inserir dados via init_folha.bas
- detecta.bas deve detectar anomalias nos dados inseridos
## ✅ Status Atual
### Correção REM em mod.rs (FEITA)
**Arquivo:** `botserver/src/basic/mod.rs` linha ~588-594
Filtro adicionado para `REM ` e `REM\t` no `compile_tool_script`:
```rust
!(trimmed.starts_with("PARAM ") ||
trimmed.starts_with("PARAM\t") ||
trimmed.starts_with("DESCRIPTION ") ||
trimmed.starts_with("DESCRIPTION\t") ||
trimmed.starts_with("REM ") || // <-- ADICIONADO
trimmed.starts_with("REM\t") || // <-- ADICIONADO
trimmed.starts_with('\'') ||
trimmed.starts_with('#') ||
trimmed.is_empty())
```
### Arquivos Envolvidos (VERIFICADOS)
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/start.bas` ✅ OK
- Contém botões de sugestão: detecta e init_folha
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/init_folha.bas` ✅ OK
- 4 INSERT statements para dados de exemplo
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/detecta.bas` ✅ OK
- Usa DETECT keyword
- `/opt/gbo/data/seplagse.gbai/seplagse.gbdialog/tables.bas` ✅ OK
- TABLE folha_salarios definida
### Botserver (RODANDO)
- ✅ Botserver compilado com sucesso
- ✅ Botserver rodando em http://localhost:8080
- ✅ Health check OK
## Próximos Passos (Pendentes)
1. **Testar via navegador** - Necessário instalar Playwright browsers
- Navegar para http://localhost:3000/seplagse
- Clicar em "⚙️ Inicializar Dados de Teste"
- Verificar se INSERT funciona
- Clicar em "🔍 Detectar Desvios na Folha"
- Verificar se DETECT funciona
2. **Verificar se há warnings relevantes**
- Alguns warnings de código podem precisar ser corrigidos
## Cache
- AST limpo: `rm ./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/*.ast`
- Reiniciado: `./restart.sh`
- Botserver: ✅ Rodando
## Arquivos de Trabalho
- Work directory: `./botserver-stack/data/system/work/seplagse.gbai/seplagse.gbdialog/`
- Todos os arquivos BASIC estão presentes e parecem válidos

View file

@ -1,272 +0,0 @@
# Integrated Suite — Conversational Interface Plan
> **Pattern:** Every suite app exposes its own `PROMPT.md` + internal tools.
> The shared chat bar activates app-specific context when the user is inside that app.
> WhatsApp campaigns is the first full example.
---
## Architecture
```
User (WhatsApp / Suite chat bar)
BotOrchestrator (core/bot/mod.rs)
detect active app context
load app PROMPT.md + app InternalTools
LLM with tools → tool_executor.rs
app data / actions
```
### Key existing pieces
| File | Role |
|---|---|
| `core/bot/mod.rs` | `get_session_tools()` + `ToolExecutor::execute_tool_call()` |
| `tasks/PROMPT.md` | Pattern for app-level LLM prompt |
| `marketing/whatsapp.rs` | WhatsApp campaign send/metrics |
| `marketing/campaigns.rs` | Campaign CRUD |
| `marketing/lists.rs` | Recipient lists |
| `botui/ui/suite/campaigns/` | Campaigns UI |
---
## Standard: Every Suite App
### 1. `PROMPT.md` per app folder
Location: `botserver/src/<app>/PROMPT.md`
```markdown
# <App> — Internal Tools Guide
You are the <App> assistant. When the user is in <App>, you have access to:
- tool: list_<entities>
- tool: create_<entity>
- tool: search_<entity>
- tool: <app_specific_action>
Rules:
- Always confirm destructive actions before executing
- Show results as structured summaries, not raw JSON
- If user uploads a file, parse it and confirm before acting
```
### 2. `tools.rs` per app
Location: `botserver/src/<app>/tools.rs`
Registers `Vec<Tool>` (LLM function-calling schema) + handler mapping.
Loaded by `get_session_tools()` when session's active app = this app.
### 3. App context detection
`core/bot/mod.rs` reads `session.active_app` (set by UI via `POST /api/session/context`).
Loads `<app>/PROMPT.md` as system prompt prefix + `<app>/tools.rs` tools.
---
## WhatsApp Campaigns — Full Conversational Flow
### Meta Rules (enforced in tools)
- Only approved Message Templates for marketing (non-session-initiated)
- 24h session window for free-form after user replies
- Media: image/video/document via Media Upload API before send
- Opt-out: always honor STOP, add to suppression list immediately
- Rate: respect per-phone-number rate limits (1000 msg/s business tier)
- Template category: MARKETING requires explicit opt-in from recipient
### Conversation Flow (WhatsApp → campaign creation)
```
User sends to bot number:
"I want to send a campaign"
Bot: "Great! Send me:
1. Your contact list (.xlsx or .csv)
2. The message text
3. An image (optional)
4. When to send (or 'now')"
User uploads contacts.xlsx
[tool: parse_contact_file]
→ extract phone numbers, names
→ validate E.164 format
→ show preview: "Found 342 contacts. First 3: +55..."
User sends message text
[tool: check_template_compliance]
→ check if free-form or needs approved template
→ if template needed: list available approved templates
→ suggest closest match
User sends image (optional)
[tool: upload_media]
→ upload to Meta Media API
→ return media_id
Bot: "Ready to send to 342 contacts at 14:00 today.
Preview: [image] Hello {name}, ...
Estimated cost: $X
Confirm? (yes/no)"
User: "yes"
[tool: create_and_schedule_campaign]
→ create campaign record
→ apply warmup limit if IP warming
→ schedule via TaskScheduler
```
### WhatsApp Campaign Tools (`marketing/whatsapp_tools.rs`)
```rust
// Tool definitions for LLM function calling
pub fn whatsapp_campaign_tools() -> Vec<Tool> {
vec![
Tool::new("parse_contact_file", "Parse uploaded xlsx/csv into contact list"),
Tool::new("list_templates", "List approved WhatsApp message templates"),
Tool::new("check_template_compliance", "Check if message needs approved template"),
Tool::new("upload_media", "Upload image/video to Meta Media API"),
Tool::new("preview_campaign", "Show campaign preview with cost estimate"),
Tool::new("create_and_schedule_campaign", "Create campaign and schedule send"),
Tool::new("get_campaign_status", "Get delivery/read metrics for a campaign"),
Tool::new("pause_campaign", "Pause an in-progress campaign"),
Tool::new("list_campaigns", "List recent campaigns with metrics"),
Tool::new("add_to_suppression", "Add number to opt-out list"),
]
}
```
### WhatsApp PROMPT.md (`marketing/WHATSAPP_PROMPT.md`)
```markdown
# WhatsApp Campaign Assistant
You help users create and manage WhatsApp marketing campaigns.
## Meta Platform Rules (MANDATORY)
- Marketing messages MUST use pre-approved templates outside 24h session window
- Always check opt-in status before adding to campaign
- Honor STOP/unsubscribe immediately via add_to_suppression tool
- Never send more than warmup daily limit if IP is warming up
- Image must be uploaded via upload_media before referencing in campaign
## Conversation Style
- Guide step by step: contacts → message → media → schedule → confirm
- Show cost estimate before confirming
- After send: proactively share open/read rates when available
## File Handling
- .xlsx/.csv → use parse_contact_file tool
- Images → use upload_media tool
- Always confirm parsed data before proceeding
```
---
## Integrated Suite Chat Bar — Standard
### How it works
1. User opens any suite app (CRM, Campaigns, Drive, etc.)
2. Chat bar at bottom activates with app context
3. `POST /api/session/context { app: "campaigns" }` sets `session.active_app`
4. BotOrchestrator loads `campaigns/PROMPT.md` + `campaigns/tools.rs`
5. User can ask natural language questions or trigger actions
### Examples per app
| App | Example query | Tool activated |
|---|---|---|
| **Campaigns** | "How did last week's campaign perform?" | `get_campaign_metrics` |
| **CRM** | "Show deals closing this month" | `list_deals` with filter |
| **Drive** | "Find the Q1 report" | `search_files` |
| **Tasks** | "Create a task to follow up with Acme" | `create_task` |
| **People** | "Who hasn't been contacted in 30 days?" | `list_contacts` with filter |
| **Mail** | "Summarize unread emails from clients" | `list_emails` + LLM summary |
| **Sheet** | "What's the total revenue in column D?" | `query_sheet` |
| **Learn** | "What does our refund policy say?" | `search_kb` |
---
## Implementation Plan
### Phase 1 — Infrastructure (1 sprint)
- [ ] `core/bot/mod.rs` — read `session.active_app`, load app PROMPT + tools
- [ ] `core/tool_context.rs` — app tool registry: `register_app_tools(app_name) -> Vec<Tool>`
- [ ] `POST /api/session/context` — set active app from UI
- [ ] Suite chat bar UI component (`botui/ui/suite/partials/chatbar.html`)
### Phase 2 — WhatsApp Campaigns (1 sprint)
- [ ] `marketing/whatsapp_tools.rs` — 10 tools above
- [ ] `marketing/WHATSAPP_PROMPT.md`
- [ ] `marketing/file_parser.rs` — xlsx/csv → contact list
- [ ] Meta warmup enforcement in send path
- [ ] Conversational campaign creation flow (state machine in session)
### Phase 3 — App-by-app rollout (1 app/sprint)
Priority order based on value:
1. CRM (deals, contacts, pipeline queries)
2. Campaigns (email + WhatsApp)
3. Tasks (create, assign, status)
4. Drive (search, summarize docs)
5. Mail (summarize, draft reply)
6. People (segment, find contacts)
7. Sheet (query, calculate)
8. Learn (KB search)
### Phase 4 — Cross-app intelligence
- [ ] Global search across all apps via single query
- [ ] "What happened today?" — aggregates activity across CRM + Mail + Tasks
- [ ] Proactive suggestions: "You have 3 deals closing this week and no follow-up tasks"
---
## File Structure to Create
```
botserver/src/
├── marketing/
│ ├── whatsapp_tools.rs ← NEW: LLM tool definitions + handlers
│ ├── WHATSAPP_PROMPT.md ← NEW: WhatsApp assistant system prompt
│ ├── file_parser.rs ← NEW: xlsx/csv → contacts
│ └── warmup.rs ← NEW: (from campaigns.md plan)
├── core/
│ ├── tool_registry.rs ← NEW: app → tools mapping
│ └── bot/
│ └── app_context.rs ← NEW: load app prompt + tools per session
├── crm/
│ ├── tools.rs ← NEW
│ └── PROMPT.md ← NEW
├── tasks/
│ └── tools.rs ← NEW (PROMPT.md exists)
└── <each app>/
├── tools.rs ← NEW per app
└── PROMPT.md ← NEW per app
botui/ui/suite/
└── partials/
└── chatbar.html ← NEW: shared chat bar component
```
---
## Chat Bar UI (`partials/chatbar.html`)
```html
<div id="suite-chatbar" class="chatbar">
<div id="chatbar-messages" hx-ext="ws" ws-connect="/ws/suite-chat"></div>
<form ws-send>
<input type="hidden" name="app_context" value="{{ active_app }}">
<input type="file" id="chatbar-file" name="file" accept=".xlsx,.csv,.png,.jpg,.pdf" style="display:none">
<button type="button" onclick="document.getElementById('chatbar-file').click()">📎</button>
<input type="text" name="message" placeholder="Ask anything about {{ active_app }}...">
<button type="submit"></button>
</form>
</div>
```
File uploads go to `POST /api/suite/upload` → stored in Drive → media_id passed to tool.

View file

@ -1,46 +0,0 @@
# Progress: Removendo aws-sdk-s3 do default bundle
## Goal
Remover `aws-sdk-s3` (~120MB) do bundle default `["chat", "automation", "cache", "llm"]` e fazer compilar com:
```bash
cargo check -p botserver --no-default-features --features "chat,automation,cache,llm"
```
## ✅ COMPLETED
1. **Cargo.toml** - Features separadas: `drive` (S3) vs `local-files` (notify)
2. **main.rs** - `pub mod drive` com `#[cfg(any(feature = "drive", feature = "local-files"))]`
3. **state.rs** - `NoDrive` struct adicionada
4. **multimedia.rs** - `DefaultMultimediaHandler` com cfg gates (drive vs no-drive)
5. **drive/mod.rs** - Módulos condicionais:
- `#[cfg(feature = "drive")] pub mod document_processing;`
- `#[cfg(feature = "drive")] pub mod drive_monitor;`
- `#[cfg(feature = "drive")] pub mod vectordb;`
- `#[cfg(feature = "local-files")] pub mod local_file_monitor;`
- Todas ~21 funções com `#[cfg(feature = "drive")]`
6. **multimedia.rs - upload_media** - Duas implementações separadas com cfg gates:
- `#[cfg(feature = "drive")]` - Usa S3 client
- `#[cfg(not(feature = "drive"))]` - Usa armazenamento local
## ✅ VERIFIED
```bash
cargo check -p botserver --no-default-features --features "chat,automation,cache,llm"
```
**Resultado:** ✅ Build limpo (apenas warnings, 0 erros)
**Tempo de compilação:** 2m 29s
## Arquivo Não Fixado (opcional)
### auto_task/app_generator.rs
- `ensure_bucket_exists` method never used (warning, não impede compilação)
- Método já está com `#[cfg(feature = "drive")]` (correto)
## Resumo
O `aws-sdk-s3` foi removido com sucesso do bundle default. O sistema agora suporta dois modos:
- **Com feature "drive"**: Usa S3 (aws-sdk-s3 ~120MB)
- **Sem feature "drive"**: Usa armazenamento local (notify ~2MB)
O build padrão agora é leve (~120MB a menos) e funciona sem dependências de AWS.

View file

@ -1,259 +0,0 @@
# Production Environment Guide
## Infrastructure
### Servers
| Host | IP | Purpose |
|------|-----|---------|
| `system` | `10.157.134.196` | Main botserver + botui container |
| `alm-ci` | `10.157.134.200` | CI/CD runner (Forgejo Actions) |
| `alm` | `10.157.134.34` | Forgejo git server |
| `dns` | `10.157.134.214` | DNS container |
| `drive` | `10.157.134.206` | Drive storage |
| `email` | `10.157.134.40` | Email service |
| `proxy` | `10.157.134.241` | Reverse proxy |
| `tables` | `10.157.134.174` | PostgreSQL |
| `table-editor` | `10.157.134.184` | Table editor |
| `webmail` | `10.157.134.86` | Webmail |
### Port Mapping (system container)
| Service | Internal Port | External URL |
|---------|--------------|--------------|
| botserver | `5858` | `https://system.pragmatismo.com.br` |
| botui | `5859` | `https://chat.pragmatismo.com.br` |
### Access
```bash
# SSH to host
ssh administrator@63.141.255.9
# Execute inside system container
sudo incus exec system -- bash -c 'command'
# SSH from host to container (used by CI)
ssh -o StrictHostKeyChecking=no system "command"
```
## Services
### botserver.service
- **Binary**: `/opt/gbo/bin/botserver`
- **Port**: `5858`
- **User**: `gbuser`
- **Logs**: `/opt/gbo/logs/out.log`, `/opt/gbo/logs/err.log`
- **Config**: `/etc/systemd/system/botserver.service`
- **Env**: `PORT=5858`
### ui.service
- **Binary**: `/opt/gbo/bin/botui`
- **Port**: `5859`
- **Config**: `/etc/systemd/system/ui.service`
- **Env**: `BOTSERVER_URL=http://localhost:5858`
- ⚠️ MUST be `http://localhost:5858` — NOT `https://system.pragmatismo.com.br`
- Rust proxy runs server-side, needs direct localhost access
- JS client uses relative URLs through `chat.pragmatismo.com.br`
### Data Directory
- **Path**: `/opt/gbo/data/`
- **Structure**: `<botname>.gbai/<botname>.gbdialog/*.bas`
- **Bots**: cristo, fema, jucees, oerlabs, poupatempo, pragmatismogb, salesianos, sentient, seplagse
- **Work dir**: `/opt/gbo/work/` (compiled .ast cache)
### Stack Services (managed by botserver bootstrap)
- **Vault**: Secrets management
- **PostgreSQL**: Database (port 5432)
- **Valkey**: Cache (port 6379, password auth)
- **MinIO**: Object storage
- **Zitadel**: Identity provider
- **LLM**: llama.cpp
## CI/CD Pipeline
### Repositories
| Repo | ALM URL | GitHub URL |
|------|---------|------------|
| gb | `https://alm.pragmatismo.com.br/GeneralBots/gb.git` | `git@github.com:GeneralBots/gb.git` |
| botserver | `https://alm.pragmatismo.com.br/GeneralBots/BotServer.git` | `git@github.com:GeneralBots/botserver.git` |
| botui | `https://alm.pragmatismo.com.br/GeneralBots/BotUI.git` | `git@github.com:GeneralBots/botui.git` |
| botlib | `https://alm.pragmatismo.com.br/GeneralBots/botlib.git` | `git@github.com:GeneralBots/botlib.git` |
### Push Order
```bash
# 1. Push submodules first
cd botserver && git push alm main && git push origin main && cd ..
cd botui && git push alm main && git push origin main && cd ..
# 2. Update root workspace references
git add botserver botui botlib
git commit -m "Update submodules: <description>"
git push alm main && git push origin main
```
### Build Environment
- **CI runner**: `alm-ci` container (Debian Trixie, glibc 2.41)
- **Target**: `system` container (Debian 12 Bookworm, glibc 2.36)
- **⚠️ GLIBC MISMATCH**: Building on CI runner produces binaries incompatible with system container
- **Solution**: CI workflow transfers source to system container and builds there via SSH
### Workflow File
- **Location**: `botserver/.forgejo/workflows/botserver.yaml`
- **Triggers**: Push to `main` branch
- **Steps**:
1. Setup workspace on CI runner (clone repos)
2. Transfer source to system container via `tar | ssh`
3. Build inside system container (matches glibc 2.36)
4. Deploy binary inside container
5. Verify botserver is running
## Common Operations
### Check Service Status
```bash
# From host
sudo incus exec system -- systemctl status botserver --no-pager
sudo incus exec system -- systemctl status ui --no-pager
# Check if running
sudo incus exec system -- pgrep -f botserver
sudo incus exec system -- pgrep -f botui
```
### View Logs
```bash
# Systemd journal
sudo incus exec system -- journalctl -u botserver --no-pager -n 50
sudo incus exec system -- journalctl -u ui --no-pager -n 50
# Application logs
sudo incus exec system -- tail -50 /opt/gbo/logs/out.log
sudo incus exec system -- tail -50 /opt/gbo/logs/err.log
# Live tail
sudo incus exec system -- tail -f /opt/gbo/logs/out.log
```
### Restart Services
```bash
sudo incus exec system -- systemctl restart botserver
sudo incus exec system -- systemctl restart ui
```
### Manual Deploy (emergency)
```bash
# Kill old process
sudo incus exec system -- killall botserver
# Copy binary (from host CI workspace or local)
sudo incus exec system -- cp /opt/gbo/ci/botserver/target/debug/botserver /opt/gbo/bin/botserver
sudo incus exec system -- chmod +x /opt/gbo/bin/botserver
sudo incus exec system -- chown gbuser:gbuser /opt/gbo/bin/botserver
# Start service
sudo incus exec system -- systemctl start botserver
```
### Transfer Bot Files to Production
```bash
# From local to prod host
tar czf /tmp/bots.tar.gz -C /opt/gbo/data <botname>.gbai
scp /tmp/bots.tar.gz administrator@63.141.255.9:/tmp/
# From host to container
sudo incus exec system -- bash -c 'tar xzf /tmp/bots.tar.gz -C /opt/gbo/data/'
# Clear compiled cache
sudo incus exec system -- find /opt/gbo/data -name "*.ast" -delete
sudo incus exec system -- find /opt/gbo/work -name "*.ast" -delete
```
### Snapshots
```bash
# List snapshots
sudo incus snapshot list system
# Restore snapshot
sudo incus snapshot restore system <snapshot-name>
```
## Troubleshooting
### GLIBC Version Mismatch
**Symptom**: `GLIBC_2.39 not found` or `GLIBC_2.38 not found`
**Cause**: Binary compiled on CI runner (glibc 2.41) but runs in system container (glibc 2.36)
**Fix**: CI workflow must build inside the system container. Check `botserver.yaml` uses SSH to build in container.
### botserver Not Starting
```bash
# Check binary
sudo incus exec system -- ldd /opt/gbo/bin/botserver | grep "not found"
# Check direct execution
sudo incus exec system -- timeout 10 /opt/gbo/bin/botserver 2>&1
# Check data directory
sudo incus exec system -- ls -la /opt/gbo/data/
```
### botui Can't Reach botserver
```bash
# Check BOTSERVER_URL
sudo incus exec system -- grep BOTSERVER_URL /etc/systemd/system/ui.service
# Must be http://localhost:5858, NOT https://system.pragmatismo.com.br
# Fix:
sudo incus exec system -- sed -i 's|BOTSERVER_URL=.*|BOTSERVER_URL=http://localhost:5858|' /etc/systemd/system/ui.service
sudo incus exec system -- systemctl daemon-reload
sudo incus exec system -- systemctl restart ui
```
### Suggestions Not Showing
```bash
# Check bot files exist
sudo incus exec system -- ls -la /opt/gbo/data/<bot>.gbai/<bot>.gbdialog/
# Check for compilation errors
sudo incus exec system -- tail -50 /opt/gbo/logs/out.log | grep -i "error\|fail\|compile"
# Clear cache and restart
sudo incus exec system -- find /opt/gbo/work -name "*.ast" -delete
sudo incus exec system -- systemctl restart botserver
```
### IPv6 DNS Issues
**Symptom**: External API calls (Groq, Cloudflare) timeout
**Cause**: Container DNS returns AAAA records but no IPv6 connectivity
**Fix**: Container has `IPV6=no` in network config and `gai.conf` labels. If issues persist, check `RES_OPTIONS=inet4` in botserver.service.
## Security
- **NEVER** push secrets to git
- **NEVER** commit files to root with credentials
- **Vault** is single source of truth for secrets
- **CI/CD** is the only deployment method — never manually scp binaries
- **ALM** is production — ask before pushing

View file

@ -1,154 +0,0 @@
# USE KB 2.0: Group-Based Knowledge Base Access
## Overview
Modify the USE KB keyword to respect user group permissions, ensuring that THINK KB queries only return answers from knowledge base folders that belong to groups the logged-in user is a member of.
## Current Architecture
### USE KB Flow
1. User executes `USE KB "kb_name"` in BASIC script
2. `use_kb.rs:add_kb_to_session()` checks if KB exists in `kb_collections`
3. Creates default KB entry if not found
4. Adds association to `session_kb_associations` table
5. KB becomes active for the session
### THINK KB Flow
1. User executes `THINK KB "query"`
2. `think_kb.rs:think_kb_search()` gets all active KBs from `session_kb_associations`
3. For each active KB, calls `KnowledgeBaseManager.search()` on its Qdrant collection
4. Returns combined results from all active KBs
### Group System
- Groups stored in `rbac_groups` table
- User membership in `rbac_user_groups` table
- Group permissions via `rbac_group_roles` table
## Proposed Changes
### 1. Database Schema Changes
Add new table `kb_group_associations`:
```sql
CREATE TABLE kb_group_associations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
kb_id UUID NOT NULL REFERENCES kb_collections(id) ON DELETE CASCADE,
group_id UUID NOT NULL REFERENCES rbac_groups(id) ON DELETE CASCADE,
granted_by UUID REFERENCES users(id) ON DELETE SET NULL,
granted_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
UNIQUE(kb_id, group_id)
);
```
Migration file: `botserver/migrations/6.2.0-01-kb-groups/up.sql`
### 2. Backend Logic Changes
#### Modify `think_kb_search()` in `think_kb.rs`
- Add user group lookup before searching
- Filter active KBs to only those accessible by user's groups
- Allow access if KB has no group associations (public KBs) OR user is in associated groups
```rust
async fn think_kb_search(
kb_manager: Arc<KnowledgeBaseManager>,
db_pool: DbPool,
session_id: Uuid,
bot_id: Uuid,
user_id: Uuid, // Add user_id parameter
query: &str,
) -> Result<serde_json::Value, String> {
// Get user's groups
let user_groups = get_user_groups(&db_pool, user_id)?;
// Get active KBs filtered by groups
let accessible_kbs = get_accessible_kbs_for_session(&db_pool, session_id, &user_groups)?;
// Search only accessible KBs
// ... rest of search logic
}
```
#### Add `get_accessible_kbs_for_session()` function
```rust
fn get_accessible_kbs_for_session(
conn_pool: &DbPool,
session_id: Uuid,
user_groups: &[String],
) -> Result<Vec<(String, String, String)>, String> {
// Query that joins session_kb_associations with kb_group_associations
// Returns KBs where group_id IS NULL (public) OR group_id IN user_groups
}
```
#### Modify `add_kb_to_session()` in `use_kb.rs`
- Add optional group access check
- Allow USE KB if user has access to the KB's groups
### 3. API Changes
Add new endpoints in `rbac.rs` for KB-group management:
```rust
// Assign KB to group
POST /api/rbac/kbs/{kb_id}/groups/{group_id}
// Remove KB from group
DELETE /api/rbac/kbs/{kb_id}/groups/{group_id}
// Get groups for KB
GET /api/rbac/kbs/{kb_id}/groups
// Get KBs accessible by user
GET /api/rbac/users/{user_id}/accessible-kbs
```
### 4. Frontend Changes
#### Update `botui/ui/suite/admin/groups.html`
- Add "Knowledge Bases" tab to group detail panel
- Show list of KBs assigned to the group
- Allow adding/removing KB assignments
#### Update `botui/ui/suite/drive/drive.html`
- Add group visibility indicators for KB folders
- Show which groups have access to each KB
### 5. Migration Strategy
1. Create new migration for `kb_group_associations` table
2. Run migration to create table
3. Assign existing KBs to default groups (e.g., "all_users" group)
4. Update application code
5. Deploy and test
### 6. Backward Compatibility
- Existing KBs without group associations remain public
- Existing USE KB calls continue to work
- THINK KB will filter results based on new permissions
## Implementation Steps
1. ✅ Database migration for kb_group_associations
2. ✅ Modify think_kb_search to accept user_id and filter by groups
3. ✅ Update THINK KB keyword registration to pass user_id
4. ✅ Add group access check to USE KB
5. ✅ Add API endpoints for KB-group management
6. ✅ Update admin UI for group-KB assignment
7. ✅ Update drive UI to show group access
8. ✅ Add tests for group-based access control
## Security Considerations
- All KB access checks must happen at the database level
- No client-side filtering of search results
- Group membership verified on each request
- Audit logging for KB access attempts
## Testing
- Unit tests for group access functions
- Integration tests for THINK KB with group filtering
- UI tests for admin group-KB management
- End-to-end tests with different user group scenarios

View file

@ -1,10 +1 @@
#!/bin/bash
set -e
echo "Cleaning up..."
rm -rf botserver-stack/ ./work/ .env
echo "Starting services..."
./restart.sh
echo "Reset complete!"
rm -rf botserver-stack/ ./work/ .env

View file

@ -1,49 +0,0 @@
#!/bin/bash
echo "=== Fast Restart: botserver + botmodels only ==="
# Kill only the app services, keep infra running
pkill -f "botserver --noconsole" || true
pkill -f "botmodels" || true
# Clean logs
rm -f botserver.log botmodels.log
# Build only botserver (botui likely already built)
cargo build -p botserver
# Start botmodels
cd botmodels
source venv/bin/activate
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
echo " botmodels PID: $!"
cd ..
# Wait for botmodels
for i in $(seq 1 20); do
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
echo " botmodels ready"
break
fi
sleep 1
done
# Start botserver (keep botui running if already up)
if ! pgrep -f "botui" > /dev/null; then
echo "Starting botui..."
cargo build -p botui
cd botui
BOTSERVER_URL="http://localhost:8080" ./target/debug/botui > ../botui.log 2>&1 &
echo " botui PID: $!"
cd ..
fi
# Start botserver
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
echo " botserver PID: $!"
# Quick health check
sleep 2
curl -s http://localhost:8080/health > /dev/null 2>&1 && echo "✅ botserver ready" || echo "❌ botserver failed"
echo "Done. botserver $(pgrep -f 'botserver --noconsole') botui $(pgrep -f botui) botmodels $(pgrep -f botmodels)"

View file

@ -1,32 +0,0 @@
$ErrorActionPreference = "Continue"
Write-Host "Stopping..."
Stop-Process -Name "botserver" -Force -ErrorAction SilentlyContinue
Stop-Process -Name "botui" -Force -ErrorAction SilentlyContinue
Stop-Process -Name "rustc" -Force -ErrorAction SilentlyContinue
Write-Host "Cleaning..."
Remove-Item -Path "botserver.log", "botui.log" -Force -ErrorAction SilentlyContinue
Write-Host "Building..."
cargo build -p botserver
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botserver"; exit 1 }
cargo build -p botui
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botui"; exit 1 }
Write-Host "Starting botserver..."
$env:PORT = "8080"
$env:RUST_LOG = "debug"
$env:PATH += ";C:\pgsql\pgsql\bin;C:\pgsql\pgsql\lib"
$botserverProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botserver.exe" -ArgumentList "--noconsole" -RedirectStandardOutput "botserver.log" -RedirectStandardError "botserver.log"
Write-Host " PID: $($botserverProcess.Id)"
Write-Host "Starting botui..."
$env:BOTSERVER_URL = "http://localhost:8080"
$env:PORT = "3000"
$botuiProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botui.exe" -RedirectStandardOutput "botui.log" -RedirectStandardError "botui.log"
Write-Host " PID: $($botuiProcess.Id)"
Write-Host "Done. Logs are being written to botserver.log and botui.log"
Write-Host "To view logs, you can use: Get-Content botserver.log -Wait"

View file

@ -1,55 +1,28 @@
#!/bin/bash
set -e
echo "=== Fast Restart: botserver only (keeps infra running) ==="
echo "🛑 Stopping existing processes..."
pkill -f botserver || true
pkill -f botui || true
pkill -f rustc || true
# Only kill the app services, keep infra (postgres, valkey, minio, vault, zitadel) running
pkill -f "botserver --noconsole" || true
pkill -f "botmodels" || true
echo "🧹 Cleaning logs..."
rm -f botserver.log botui.log
# Clean app logs only
rm -f botserver.log botmodels.log
# Build botserver (incremental, should be fast)
echo "🔨 Building botserver..."
cargo build -p botserver
# Start botmodels if not running
if ! pgrep -f "botmodels" > /dev/null; then
echo "Starting botmodels..."
cd botmodels
source venv/bin/activate
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
echo " botmodels PID: $!"
cd ..
# Wait for botmodels
for i in $(seq 1 15); do
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
echo " botmodels ready"
break
fi
sleep 1
done
else
echo " botmodels already running"
fi
echo "🔨 Building botui..."
cargo build -p botui
# Start botserver
echo "Starting botserver..."
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info \
./target/debug/botserver --noconsole > botserver.log 2>&1 &
echo " botserver PID: $!"
echo "🚀 Starting botserver..."
RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
BOTSERVER_PID=$!
# Wait for botserver health with timeout
echo "Waiting for botserver..."
for i in $(seq 1 10); do
if curl -sf http://localhost:8080/health > /dev/null 2>&1; then
echo "✅ botserver ready"
exit 0
fi
sleep 1
done
echo "🚀 Starting botui..."
BOTSERVER_URL="https://localhost:8088" ./target/debug/botui > botui.log 2>&1 &
BOTUI_PID=$!
echo "❌ botserver failed to start - check botserver.log"
tail -20 botserver.log
exit 1
echo "✅ Started botserver (PID: $BOTSERVER_PID) and botui (PID: $BOTUI_PID)"
echo "📊 Monitor with: tail -f botserver.log botui.log"
echo "🌐 Access at: http://localhost:3000"

28
start.bas Normal file
View file

@ -0,0 +1,28 @@
REM Knowledge Base Website Crawler Bot - Start Template
REM Sets up bot context and crawled websites, then exits
REM Load bot introduction
intro = GET BOT MEMORY "introduction"
IF intro = "" THEN
intro = "I'm your documentation assistant with access to crawled websites."
END IF
REM Register websites for crawling (preprocessing mode)
USE WEBSITE "https://docs.python.org"
USE WEBSITE "https://developer.mozilla.org"
USE WEBSITE "https://stackoverflow.com"
REM Set context for LLM
SET CONTEXT "role" AS intro
SET CONTEXT "capabilities" AS "I can search Python docs, MDN web docs, and Stack Overflow."
REM Configure suggestion buttons
CLEAR SUGGESTIONS
ADD SUGGESTION "python" AS "How do I use Python dictionaries?"
ADD SUGGESTION "javascript" AS "Explain JavaScript async/await"
ADD SUGGESTION "web" AS "What is the DOM in web development?"
REM Initial greeting
TALK intro
TALK "I have access to Python documentation, MDN web docs, and Stack Overflow."
TALK "Ask me any programming question!"

18
tests/example.spec.ts Normal file
View file

@ -0,0 +1,18 @@
import { test, expect } from '@playwright/test';
test('has title', async ({ page }) => {
await page.goto('https://playwright.dev/');
// Expect a title "to contain" a substring.
await expect(page).toHaveTitle(/Playwright/);
});
test('get started link', async ({ page }) => {
await page.goto('https://playwright.dev/');
// Click the get started link.
await page.getByRole('link', { name: 'Get started' }).click();
// Expects page to have a heading with the name of Installation.
await expect(page.getByRole('heading', { name: 'Installation' })).toBeVisible();
});

41
yarn.lock Normal file
View file

@ -0,0 +1,41 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.58.1":
version "1.58.1"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.58.1.tgz#891dcd1da815cb1042490531f6d8778988509d22"
integrity sha512-6LdVIUERWxQMmUSSQi0I53GgCBYgM2RpGngCPY7hSeju+VrKjq3lvs7HpJoPbDiY5QM5EYRtRX5fvrinnMAz3w==
dependencies:
playwright "1.58.1"
"@types/node@^25.2.0":
version "25.2.0"
resolved "https://registry.yarnpkg.com/@types/node/-/node-25.2.0.tgz#015b7d228470c1dcbfc17fe9c63039d216b4d782"
integrity sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w==
dependencies:
undici-types "~7.16.0"
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.58.1:
version "1.58.1"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.58.1.tgz#d63be2c9b7dcbdb035beddd4b42437bd3ca89107"
integrity sha512-bcWzOaTxcW+VOOGBCQgnaKToLJ65d6AqfLVKEWvexyS3AS6rbXl+xdpYRMGSRBClPvyj44njOWoxjNdL/H9UNg==
playwright@1.58.1:
version "1.58.1"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.58.1.tgz#63300e77a604c77264e1b499c0d94b54ed96d6ba"
integrity sha512-+2uTZHxSCcxjvGc5C891LrS1/NlxglGxzrC4seZiVjcYVQfUa87wBL6rTDqzGjuoWNjnBzRqKmF6zRYGMvQUaQ==
dependencies:
playwright-core "1.58.1"
optionalDependencies:
fsevents "2.3.2"
undici-types@~7.16.0:
version "7.16.0"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.16.0.tgz#ffccdff36aea4884cbfce9a750a0580224f58a46"
integrity sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==