Compare commits
467 commits
7dcab89e06
...
df4abcf09a
| Author | SHA1 | Date | |
|---|---|---|---|
| df4abcf09a | |||
| 0758cfe147 | |||
| e96aa72a36 | |||
| 5217e5cf4b | |||
| e1a715c54e | |||
| 8e060f1cae | |||
| edf339a363 | |||
| 8dbcf89907 | |||
| 03663874d0 | |||
| 795605e407 | |||
| 9d244f17b4 | |||
| 6297ff7811 | |||
| 10f1336ec3 | |||
| c557577821 | |||
| 1fd2b14d4b | |||
| 7d568b4ad8 | |||
| b8f7631b79 | |||
| 4baa5a1c5f | |||
| cb447561d7 | |||
| 64af57efcc | |||
| fd811523ab | |||
| 5f89213cb0 | |||
| c34708454b | |||
| a428e20b75 | |||
| 42373c0cea | |||
| 0560794533 | |||
| 1e03d4fe27 | |||
| c85eab3443 | |||
| 81da7693e5 | |||
| 2cff40a15a | |||
| 9ab39e1442 | |||
| e42c204201 | |||
| 83635eebde | |||
| 2623a0317c | |||
| c7099ae536 | |||
| 0274469de6 | |||
| 7481df49c7 | |||
| 4068be0f98 | |||
| 3b0a1124ca | |||
| b1a6ac8809 | |||
| 0ebb90385b | |||
| 3550afeeb8 | |||
| d1ce45589a | |||
| 824f8c7f01 | |||
| bb5e2cc1f2 | |||
| e754d738b8 | |||
| b70b4f9d56 | |||
| bc3fb4b811 | |||
| a44072f10f | |||
| 3823597057 | |||
| 554c0a72ca | |||
| 67a3bb2866 | |||
| f6b6396dde | |||
| 8270ed3d0c | |||
| 921a4a52c9 | |||
| af0f0fee1c | |||
| f949180a89 | |||
| 9a6dd337f9 | |||
| f679593d7b | |||
| a4ae8bf156 | |||
| d66ea28357 | |||
| 791e1672f4 | |||
| a16cf65c3c | |||
| ee362fbd15 | |||
| bfee85f5b5 | |||
| b04a03aa7d | |||
| 4292a7ce76 | |||
| 0529f51b68 | |||
| 4388d8d042 | |||
| 9c61885b9b | |||
| 42bf55c348 | |||
| cae3c25c8f | |||
| 620d84a845 | |||
| 57a718426e | |||
| 3b09fce82b | |||
| 75650df065 | |||
| 2519294add | |||
| 553b7b9e21 | |||
| 7366541614 | |||
| 3a03a7dc5a | |||
| 254901bf4a | |||
| cf69c01feb | |||
| 0c5b20ce61 | |||
| db6f2610ee | |||
| 1707ead3c0 | |||
| 90fb3f0dc0 | |||
| 3e1a3d4e5e | |||
| 6b857e8d17 | |||
| 5b5e3202e5 | |||
| 11ccba624c | |||
| 1ab01b09a2 | |||
| 5ebd2d8f5d | |||
| 0c008c1c0e | |||
| 07f34991d5 | |||
| ba7184c10b | |||
| 02e7389e62 | |||
| a1aa97f1be | |||
| 7950120339 | |||
| d963b6c562 | |||
| 851c66fbaa | |||
| 36fddd0aec | |||
| 3aac836a49 | |||
| 50ffd05068 | |||
| 4478b3d7ba | |||
| f0fde2c7bd | |||
| a3e7c90669 | |||
| 7122285a60 | |||
| e0fec021e8 | |||
| 6965e08313 | |||
| f33968c4e0 | |||
| 264873d776 | |||
| 1c1a9ea4cf | |||
| 3cd1d9b23c | |||
| 3b76f31792 | |||
| e22ae04993 | |||
| 50a220f1f8 | |||
| 7c1d022ad2 | |||
| a170ec4c2b | |||
| d80e80856a | |||
| 0432cf2c70 | |||
| cee8aeee34 | |||
| c405f18efe | |||
| 1bfaad789d | |||
| a5a6b372b7 | |||
| e00e066ffe | |||
| d5e2d8e5a8 | |||
| a1b4bac917 | |||
| 14d66671ae | |||
| 5e5cfabbb6 | |||
| de0e3844d4 | |||
| 2185d3352c | |||
| f5096d49ff | |||
| ee1e484316 | |||
| 55d58535d6 | |||
| 397da6cf48 | |||
| 915cf1baf3 | |||
| 098f46ef2d | |||
| d8f8aff96e | |||
| b301620094 | |||
| e8ff264bd7 | |||
| 9844ce5ab0 | |||
| 730739aedf | |||
| d3ae322256 | |||
| decea7ddfc | |||
| f2ba1f4dfe | |||
| 03d223517d | |||
| f41b984b6f | |||
| 93f80e0af5 | |||
| fa03fc65a1 | |||
| 4b67b0f486 | |||
| 49bc6e52bc | |||
| a8ae578a80 | |||
| b2995cdcdb | |||
| 4f510d1196 | |||
| d28510a632 | |||
| e094dc138a | |||
| 07b6af9bf3 | |||
| a5f16fbab9 | |||
| 2257c980cb | |||
| 123771c996 | |||
| f767337ed8 | |||
| d598bdc29a | |||
| b45f63a7a6 | |||
| e1b456d199 | |||
| 96ece5a3ea | |||
| 1662905a32 | |||
| d0d68e792e | |||
| fc95cba887 | |||
| 93dc55c47c | |||
| 0c1a988f82 | |||
| dcabb6c0bc | |||
| 083b56921f | |||
| daa76e8a7b | |||
| be03dfe880 | |||
| 934e46e038 | |||
| fbd2a8647d | |||
| 8e6549a9ea | |||
| 05859d5276 | |||
| 96c61938d2 | |||
| 6386f65e58 | |||
| 613409b5d6 | |||
| ef8e7b9b56 | |||
| 915945e1b5 | |||
| d2e24c581b | |||
| a65365d19f | |||
| 6e63c47087 | |||
| 370fa6511d | |||
| 2368c30e59 | |||
| 6ddb1ebcc5 | |||
| d9587863aa | |||
| 48add934bd | |||
| 9d87f4b60d | |||
| a492d1abc1 | |||
| 27c1cd9671 | |||
| 50798824f8 | |||
| 737fb45fc0 | |||
| ff1680cafc | |||
| 0dcd46bfe7 | |||
| 55043a4d8a | |||
| 6a97db0931 | |||
| b1c3800ca8 | |||
| c4c52264db | |||
| 277789e0bc | |||
| c2d60d7cb8 | |||
| 72e6992f33 | |||
| 9016868345 | |||
| e7a42b5011 | |||
| 1bd81a4c2c | |||
| a367d8fca5 | |||
| 932fc30cea | |||
| bb79ac931f | |||
| 99c64d32ff | |||
| 80494ea4fe | |||
| 7ee9d42560 | |||
| 272f56c79d | |||
| fb02e72b8f | |||
| 50a3718d82 | |||
| 373bb6a6e4 | |||
| ada9db7a42 | |||
| fb0b7f079f | |||
| 92d8a0d858 | |||
| 1ae46149ee | |||
| 9068bc25f8 | |||
| 80c798ed05 | |||
| 41cf536cd4 | |||
| 901153803f | |||
| a8521d7480 | |||
| f5b954df76 | |||
| 1bf9e1872b | |||
| a377af5ba3 | |||
| 0dbc7cb081 | |||
| 8eecc7f871 | |||
| 5bebba4d7f | |||
| 60ef1d0562 | |||
| 1df0ea8626 | |||
| 54639690e2 | |||
| 04454f2274 | |||
| 3dadec70a8 | |||
| 76a74b87f2 | |||
| 264bd6f4c9 | |||
| 466bd729af | |||
| 9e3232ae85 | |||
| 75eccecbe9 | |||
| f9aa5dacd0 | |||
| 82f5f24b44 | |||
| 752cdd6f3b | |||
| 3a6a571361 | |||
| ed2052c8ec | |||
| 3e5c569354 | |||
| 9093340f4e | |||
| 5cec129302 | |||
| 4fa2018f5d | |||
| 814b0214be | |||
| b6f83df229 | |||
| 66b19098b4 | |||
| 8ef34d011c | |||
| d8fb6c954a | |||
| a2da4820b7 | |||
| 9815ab313b | |||
| 30e78ba40c | |||
| 45df9d9caf | |||
| 52aac0af21 | |||
| c79ab35409 | |||
| 90ee0257cb | |||
| 1388463695 | |||
| 6a13837bb8 | |||
| 602dfdee93 | |||
| c404cc0b4d | |||
| 926759f630 | |||
| ebf516e967 | |||
| 28326cb049 | |||
| ff146657e0 | |||
| 9677c90907 | |||
| f65b288cc5 | |||
| 0ec849d323 | |||
| 9f44d084ac | |||
| bb1cef6675 | |||
| af87de255b | |||
| ff4ab52fef | |||
| eb87414a78 | |||
| 083024a438 | |||
| f815943491 | |||
| e61cf84bc4 | |||
| 22172cf64a | |||
| a7ccec940a | |||
| ec2e93b22c | |||
| 2cd3a5d692 | |||
| d09a11e31e | |||
| 4941f74c6a | |||
| 7a70798c85 | |||
| bd1aeb442d | |||
| ae94a7b77c | |||
| 94ba355910 | |||
| 4f4795b981 | |||
| 2658659ff0 | |||
| 3b6e2df6e2 | |||
| 4bf1dc4689 | |||
| d933ac52af | |||
| 21add146a6 | |||
| bbea8fb9fd | |||
| 76b64182bd | |||
| ce6a65a902 | |||
| b9835d6d12 | |||
| e0ab4bb1fe | |||
| e30b070eff | |||
| f0858a443e | |||
| 0ce43ed8f7 | |||
| 8502eac494 | |||
| 70cdd6e5a5 | |||
| 98e8179810 | |||
| 67d3c7f901 | |||
| ec4c660887 | |||
| 7d3cf9bd61 | |||
| 02c8efeca7 | |||
| 0be7cb1f61 | |||
| dfad0f3989 | |||
| 58be60fda0 | |||
| f7bcd16212 | |||
| 4900274887 | |||
| bfc9ced932 | |||
| 65583977fa | |||
| 12d9dc50af | |||
| 1ca4d2c712 | |||
| 176df967f7 | |||
| 7e9b8b00fe | |||
| e2ffa39bf8 | |||
| 297af2f9e1 | |||
| 4caa147841 | |||
| 29416db1b8 | |||
| 9c3bd0c1d5 | |||
| 21b219f0d3 | |||
| f7238b5d33 | |||
| 4c26b77143 | |||
|
|
7de55efe79 | ||
|
|
7d70035050 | ||
|
|
c19095f141 | ||
|
|
2572425cb1 | ||
|
|
8a6fec467c | ||
| 2fe4586be5 | |||
| 6297018124 | |||
| 7d95a607f4 | |||
| 91ad0232f0 | |||
| c43bda9b2a | |||
| 144c4b0b79 | |||
| 920666db40 | |||
| 731892db36 | |||
| 0465216f66 | |||
| a695da5a05 | |||
| f1455ad7cb | |||
| e42ee6ee43 | |||
| 1f73d82c50 | |||
| dd1d105ef0 | |||
| b7275ed233 | |||
| e245077700 | |||
| a23b4f1983 | |||
| ff9e3f56a9 | |||
| 62e2390da3 | |||
| 39a6a4cf20 | |||
| 4da3910b3b | |||
| db119148cf | |||
| d89a5c9d30 | |||
| f8c36c3778 | |||
| 9123756e82 | |||
| 28c4f89e16 | |||
| cc8b432aa8 | |||
| d65794fa93 | |||
| 2fe91226b0 | |||
| 465fa71c84 | |||
| 853181cd1d | |||
| c7fe607171 | |||
| 12e7cba9bd | |||
| 31f68d7bf0 | |||
| 334bb9239b | |||
| a9c4714929 | |||
| 41efb790ef | |||
| 4e0737f60a | |||
| 649afd7947 | |||
| a99c17008e | |||
| abedde3af7 | |||
| 9fc33725b7 | |||
| 7d4708b516 | |||
| 34af1f2a16 | |||
| 21b96804e8 | |||
| a6a221788b | |||
| 610741e123 | |||
| ddb11a7c06 | |||
| 57b09e5b66 | |||
| 046dbc63ad | |||
| c3c235f8c4 | |||
| 19b4a20a02 | |||
| 792a13eb67 | |||
| 9eb2bfe09c | |||
| 49930f2aa0 | |||
| d08dab26dc | |||
| ba032f41eb | |||
| ec8d002574 | |||
| ce81951a69 | |||
| 1d0c9ccdb8 | |||
| f0e0553966 | |||
| 9b02df3bec | |||
| 49d9b193b2 | |||
| b11140d106 | |||
| 8e27900529 | |||
| 60e2054a02 | |||
| 40e735a56f | |||
| a83d9a88aa | |||
| 8b32eaf41f | |||
| c12ad1eda4 | |||
| 4729fe7071 | |||
| e443aa9d1a | |||
| 3891f137fd | |||
| f40449ed51 | |||
| 146f04b373 | |||
| 1ecd5f4f0c | |||
| b5bf1061f8 | |||
| 7bda264f61 | |||
| caafad484a | |||
| ddc1bdb2db | |||
| faeb77fc07 | |||
| a31d7b355a | |||
| 30ec64d878 | |||
| dd64a4102c | |||
| 041c76209a | |||
| c6e8ced648 | |||
| 56e8054839 | |||
| 8519c3cfd2 | |||
| 8a0216c654 | |||
| abcf959fdf | |||
| b9ab17fb4c | |||
| 84083b9ae0 | |||
| 079e802b17 | |||
| 33845109fd | |||
| bd3857f2d4 | |||
| acea58eecb | |||
| ead23594c0 | |||
| b606913d7d | |||
| 5ffa0d71af | |||
| 0057ca3612 | |||
| 504bb66a82 | |||
| 1e6289b223 | |||
| 8c3f51a49d | |||
| cf0d94873d | |||
| 8eef47058d | |||
| dcaf90d39f | |||
| a8e107059a | |||
| ea4cb78646 | |||
| eda45af678 | |||
| fc994375b1 | |||
| 3fe4f7ece7 | |||
| 4202f0dcf9 | |||
| fad4e1457b | |||
| b798123f14 | |||
| 5b34b21622 | |||
| cf80060818 | |||
| cda27734f0 | |||
| f1b231ce3a | |||
| 6a7cdf2800 | |||
| 21aa782fd2 | |||
| 54966ff63f | |||
| bcb43ce887 | |||
| 4736383997 | |||
| 93519eba14 | |||
| 0e47fd9476 | |||
| 2f200e1e99 | |||
| 8e02195eca | |||
| 41e314fd67 | |||
| fe865fdfa1 |
49 changed files with 14591 additions and 1208 deletions
|
|
@ -1,5 +1,5 @@
|
|||
[build]
|
||||
rustc-wrapper = "sccache"
|
||||
# rustc-wrapper = "sccache"
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
linker = "clang"
|
||||
|
|
|
|||
8
.env.example
Normal file
8
.env.example
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
# General Bots Environment Configuration
|
||||
# Copy this file to .env and fill in values
|
||||
# NEVER commit .env to version control
|
||||
|
||||
# Vault connection
|
||||
VAULT_ADDR=https://127.0.0.1:8200
|
||||
VAULT_TOKEN=<your-vault-token-here>
|
||||
VAULT_CACERT=./botserver-stack/vault/certs/ca.crt
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
name: BotServer CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
env:
|
||||
CARGO_BUILD_JOBS: 4
|
||||
CARGO_NET_RETRY: 10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: gbo
|
||||
|
||||
steps:
|
||||
- name: Disable SSL verification
|
||||
run: git config --global http.sslVerify false
|
||||
|
||||
- name: Checkout BotServer Code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: botserver
|
||||
|
||||
- name: Setup Workspace
|
||||
run: |
|
||||
git clone --depth 1 --branch main https://alm.pragmatismo.com.br/GeneralBots/gb.git workspace
|
||||
cd workspace
|
||||
git submodule update --init --depth 1 botlib
|
||||
|
||||
# Remove all members except botserver and botlib from workspace
|
||||
sed -i '/"botapp",/d' Cargo.toml
|
||||
sed -i '/"botdevice",/d' Cargo.toml
|
||||
sed -i '/"bottest",/d' Cargo.toml
|
||||
sed -i '/"botui",/d' Cargo.toml
|
||||
sed -i '/"botbook",/d' Cargo.toml
|
||||
sed -i '/"botmodels",/d' Cargo.toml
|
||||
sed -i '/"botplugin",/d' Cargo.toml
|
||||
sed -i '/"bottemplates",/d' Cargo.toml
|
||||
|
||||
cd ..
|
||||
rm -rf workspace/botserver
|
||||
mv botserver workspace/botserver
|
||||
|
||||
|
||||
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libpq-dev libssl-dev liblzma-dev pkg-config
|
||||
|
||||
- name: Install Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install sccache
|
||||
run: |
|
||||
wget https://github.com/mozilla/sccache/releases/download/v0.8.2/sccache-v0.8.2-x86_64-unknown-linux-musl.tar.gz
|
||||
tar xzf sccache-v0.8.2-x86_64-unknown-linux-musl.tar.gz
|
||||
mv sccache-v0.8.2-x86_64-unknown-linux-musl/sccache $HOME/.cargo/bin/sccache
|
||||
chmod +x $HOME/.cargo/bin/sccache
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
$HOME/.cargo/bin/sccache --start-server || true
|
||||
|
||||
|
||||
- name: Setup environment
|
||||
run: sudo cp /opt/gbo/bin/system/.env . 2>/dev/null || true
|
||||
|
||||
- name: Build BotServer
|
||||
working-directory: workspace
|
||||
run: |
|
||||
cargo build -p botserver -j 8 2>&1 | tee /tmp/build.log
|
||||
ls -lh target/debug/botserver
|
||||
sccache --show-stats || true
|
||||
|
||||
- name: Save build log
|
||||
if: always()
|
||||
run: |
|
||||
sudo mkdir -p /opt/gbo/logs
|
||||
sudo cp /tmp/build.log /opt/gbo/logs/botserver-$(date +%Y%m%d-%H%M%S).log || true
|
||||
|
||||
- name: Deploy
|
||||
working-directory: workspace
|
||||
run: |
|
||||
lxc exec bot:pragmatismo-system -- systemctl stop system || true
|
||||
|
||||
sudo cp target/debug/botserver /opt/gbo/bin/system/
|
||||
sudo chmod +x /opt/gbo/bin/system/botserver
|
||||
|
||||
lxc exec bot:pragmatismo-system -- systemctl start system || true
|
||||
|
||||
46
.gitignore
vendored
46
.gitignore
vendored
|
|
@ -2,11 +2,17 @@
|
|||
target/
|
||||
*.out
|
||||
bin/
|
||||
|
||||
*.png
|
||||
*.jpg
|
||||
# Logs
|
||||
*.log
|
||||
*logfile*
|
||||
*-log*
|
||||
.vscode
|
||||
.zed
|
||||
.gemini
|
||||
.claude
|
||||
|
||||
|
||||
# Temporary files
|
||||
.tmp*
|
||||
|
|
@ -24,19 +30,23 @@ work/
|
|||
|
||||
# Documentation build
|
||||
docs/book
|
||||
|
||||
.ruff_cache
|
||||
.goutputstream*
|
||||
# Installers (keep gitkeep)
|
||||
botserver-installers/*
|
||||
!botserver-installers/.gitkeep
|
||||
botserver-stack
|
||||
TODO*
|
||||
work
|
||||
|
||||
# Lock file (regenerated from Cargo.toml)
|
||||
Cargo.lock
|
||||
.swp
|
||||
# Lock file
|
||||
# Cargo.lock (should be tracked)
|
||||
.kiro
|
||||
config
|
||||
|
||||
# Data directory (contains bot configs and API keys)
|
||||
data/
|
||||
|
||||
# Playwright
|
||||
node_modules/
|
||||
/test-results/
|
||||
|
|
@ -44,4 +54,30 @@ node_modules/
|
|||
/blob-report/
|
||||
/playwright/.cache/
|
||||
/playwright/.auth/
|
||||
.playwright*
|
||||
.ruff_cache
|
||||
.opencode
|
||||
config/directory_config.json
|
||||
# CI cache bust: Fri Feb 13 22:33:51 UTC 2026
|
||||
|
||||
# Secrets - NEVER commit these files
|
||||
vault-unseal-keys
|
||||
start-and-unseal.sh
|
||||
vault-token-*
|
||||
init.json
|
||||
*.pem
|
||||
*.key
|
||||
*.crt
|
||||
*.cert
|
||||
$null
|
||||
AppData/
|
||||
build_errors*.txt
|
||||
build_errors_utf8.txt
|
||||
check.json
|
||||
clippy*.txt
|
||||
errors.txt
|
||||
errors_utf8.txt
|
||||
|
||||
vault-unseal-keysdefault-vault.tar
|
||||
prompts/sec-bots.md
|
||||
AGENTS-PROD.md
|
||||
|
|
|
|||
23
.gitmodules
vendored
23
.gitmodules
vendored
|
|
@ -1,42 +1,43 @@
|
|||
[submodule "botapp"]
|
||||
path = botapp
|
||||
url = https://github.com/GeneralBots/botapp.git
|
||||
url = ../botapp.git
|
||||
|
||||
[submodule "botserver"]
|
||||
path = botserver
|
||||
url = https://github.com/GeneralBots/BotServer.git
|
||||
url = ../BotServer.git
|
||||
|
||||
[submodule "botlib"]
|
||||
path = botlib
|
||||
url = https://github.com/GeneralBots/botlib.git
|
||||
url = ../botlib.git
|
||||
|
||||
[submodule "botui"]
|
||||
path = botui
|
||||
url = https://github.com/GeneralBots/botui.git
|
||||
url = ../botui.git
|
||||
|
||||
[submodule "botbook"]
|
||||
path = botbook
|
||||
url = https://github.com/GeneralBots/botbook.git
|
||||
url = ../botbook.git
|
||||
|
||||
[submodule "bottest"]
|
||||
path = bottest
|
||||
url = https://github.com/GeneralBots/bottest.git
|
||||
url = ../bottest.git
|
||||
|
||||
[submodule "botdevice"]
|
||||
path = botdevice
|
||||
url = https://github.com/GeneralBots/botdevice.git
|
||||
url = ../botdevice.git
|
||||
|
||||
[submodule "botmodels"]
|
||||
path = botmodels
|
||||
url = https://github.com/GeneralBots/botmodels.git
|
||||
url = ../botmodels.git
|
||||
|
||||
[submodule "botplugin"]
|
||||
path = botplugin
|
||||
url = https://github.com/GeneralBots/botplugin.git
|
||||
url = ../botplugin.git
|
||||
|
||||
[submodule "bottemplates"]
|
||||
path = bottemplates
|
||||
url = https://github.com/GeneralBots/bottemplates.git
|
||||
url = ../bottemplates.git
|
||||
|
||||
[submodule ".github"]
|
||||
path = .github
|
||||
url = https://github.com/GeneralBots/.github.git
|
||||
url = ../.github.git
|
||||
|
|
|
|||
9
.idea/gb.iml
generated
9
.idea/gb.iml
generated
|
|
@ -1,9 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="JAVA_MODULE" version="4">
|
||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||
<exclude-output />
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
10
.idea/libraries/botserver_installers.xml
generated
10
.idea/libraries/botserver_installers.xml
generated
|
|
@ -1,10 +0,0 @@
|
|||
<component name="libraryTable">
|
||||
<library name="botserver-installers">
|
||||
<CLASSES>
|
||||
<root url="jar://$PROJECT_DIR$/botserver/botserver-installers/llama-b7345-bin-ubuntu-x64.zip!/" />
|
||||
<root url="jar://$PROJECT_DIR$/botserver/botserver-installers/vault_1.15.4_linux_amd64.zip!/" />
|
||||
</CLASSES>
|
||||
<JAVADOC />
|
||||
<SOURCES />
|
||||
</library>
|
||||
</component>
|
||||
6
.idea/misc.xml
generated
6
.idea/misc.xml
generated
|
|
@ -1,6 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2">
|
||||
<output url="file://$PROJECT_DIR$/out" />
|
||||
</component>
|
||||
</project>
|
||||
8
.idea/modules.xml
generated
8
.idea/modules.xml
generated
|
|
@ -1,8 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/gb.iml" filepath="$PROJECT_DIR$/.idea/gb.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
||||
6
.idea/vcs.xml
generated
6
.idea/vcs.xml
generated
|
|
@ -1,6 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
52
.idea/workspace.xml
generated
52
.idea/workspace.xml
generated
|
|
@ -1,52 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ChangeListManager">
|
||||
<list default="true" id="32fd08b0-7933-467d-9a46-1a53fd2da15c" name="Changes" comment="">
|
||||
<change beforePath="$PROJECT_DIR$/botserver" beforeDir="false" afterPath="$PROJECT_DIR$/botserver" afterDir="false" />
|
||||
</list>
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
||||
<option name="LAST_RESOLUTION" value="IGNORE" />
|
||||
</component>
|
||||
<component name="Git.Settings">
|
||||
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
|
||||
</component>
|
||||
<component name="ProjectColorInfo"><![CDATA[{
|
||||
"associatedIndex": 1
|
||||
}]]></component>
|
||||
<component name="ProjectId" id="38qdWTFkX8Nem4LzgigXpAycSN7" />
|
||||
<component name="ProjectViewState">
|
||||
<option name="hideEmptyMiddlePackages" value="true" />
|
||||
<option name="showLibraryContents" value="true" />
|
||||
</component>
|
||||
<component name="PropertiesComponent"><![CDATA[{
|
||||
"keyToString": {
|
||||
"ModuleVcsDetector.initialDetectionPerformed": "true",
|
||||
"RunOnceActivity.ShowReadmeOnStart": "true",
|
||||
"RunOnceActivity.git.unshallow": "true",
|
||||
"RunOnceActivity.typescript.service.memoryLimit.init": "true",
|
||||
"git-widget-placeholder": "main",
|
||||
"last_opened_file_path": "/home/rodriguez/src/gb",
|
||||
"vue.rearranger.settings.migration": "true"
|
||||
}
|
||||
}]]></component>
|
||||
<component name="SharedIndexes">
|
||||
<attachedChunks>
|
||||
<set>
|
||||
<option value="bundled-jdk-30f59d01ecdd-2fc7cc6b9a17-intellij.indexing.shared.core-IU-253.30387.90" />
|
||||
</set>
|
||||
</attachedChunks>
|
||||
</component>
|
||||
<component name="TaskManager">
|
||||
<task active="true" id="Default" summary="Default task">
|
||||
<changelist id="32fd08b0-7933-467d-9a46-1a53fd2da15c" name="Changes" comment="" />
|
||||
<created>1769531070022</created>
|
||||
<option name="number" value="Default" />
|
||||
<option name="presentableId" value="Default" />
|
||||
<updated>1769531070022</updated>
|
||||
<workItem from="1769531115917" duration="176000" />
|
||||
</task>
|
||||
<servers />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -1,198 +0,0 @@
|
|||
{
|
||||
"languages": {
|
||||
"typescript": {
|
||||
"name": "typescript-language-server",
|
||||
"command": "typescript-language-server",
|
||||
"args": [
|
||||
"--stdio"
|
||||
],
|
||||
"file_extensions": [
|
||||
"ts",
|
||||
"js",
|
||||
"tsx",
|
||||
"jsx"
|
||||
],
|
||||
"project_patterns": [
|
||||
"package.json",
|
||||
"tsconfig.json"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/node_modules/**",
|
||||
"**/dist/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {
|
||||
"preferences": {
|
||||
"disableSuggestions": false
|
||||
}
|
||||
},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"python": {
|
||||
"name": "pyright",
|
||||
"command": "pyright-langserver",
|
||||
"args": [
|
||||
"--stdio"
|
||||
],
|
||||
"file_extensions": [
|
||||
"py"
|
||||
],
|
||||
"project_patterns": [
|
||||
"pyproject.toml",
|
||||
"setup.py",
|
||||
"requirements.txt",
|
||||
"pyrightconfig.json"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/__pycache__/**",
|
||||
"**/venv/**",
|
||||
"**/.venv/**",
|
||||
"**/.pytest_cache/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"rust": {
|
||||
"name": "rust-analyzer",
|
||||
"command": "rust-analyzer",
|
||||
"args": [],
|
||||
"file_extensions": [
|
||||
"rs"
|
||||
],
|
||||
"project_patterns": [
|
||||
"Cargo.toml"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/target/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {
|
||||
"cargo": {
|
||||
"buildScripts": {
|
||||
"enable": true
|
||||
}
|
||||
},
|
||||
"diagnostics": {
|
||||
"enable": true,
|
||||
"enableExperimental": true
|
||||
},
|
||||
"workspace": {
|
||||
"symbol": {
|
||||
"search": {
|
||||
"scope": "workspace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"java": {
|
||||
"name": "jdtls",
|
||||
"command": "jdtls",
|
||||
"args": [],
|
||||
"file_extensions": [
|
||||
"java"
|
||||
],
|
||||
"project_patterns": [
|
||||
"pom.xml",
|
||||
"build.gradle",
|
||||
"build.gradle.kts",
|
||||
".project"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/target/**",
|
||||
"**/build/**",
|
||||
"**/.gradle/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {
|
||||
"settings": {
|
||||
"java": {
|
||||
"compile": {
|
||||
"nullAnalysis": {
|
||||
"mode": "automatic"
|
||||
}
|
||||
},
|
||||
"configuration": {
|
||||
"annotationProcessing": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"ruby": {
|
||||
"name": "solargraph",
|
||||
"command": "solargraph",
|
||||
"args": [
|
||||
"stdio"
|
||||
],
|
||||
"file_extensions": [
|
||||
"rb"
|
||||
],
|
||||
"project_patterns": [
|
||||
"Gemfile",
|
||||
"Rakefile"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/vendor/**",
|
||||
"**/tmp/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"go": {
|
||||
"name": "gopls",
|
||||
"command": "gopls",
|
||||
"args": [],
|
||||
"file_extensions": [
|
||||
"go"
|
||||
],
|
||||
"project_patterns": [
|
||||
"go.mod",
|
||||
"go.sum"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/vendor/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {
|
||||
"usePlaceholders": true,
|
||||
"completeUnimported": true
|
||||
},
|
||||
"request_timeout_secs": 60
|
||||
},
|
||||
"cpp": {
|
||||
"name": "clangd",
|
||||
"command": "clangd",
|
||||
"args": [
|
||||
"--background-index"
|
||||
],
|
||||
"file_extensions": [
|
||||
"cpp",
|
||||
"cc",
|
||||
"cxx",
|
||||
"c",
|
||||
"h",
|
||||
"hpp",
|
||||
"hxx"
|
||||
],
|
||||
"project_patterns": [
|
||||
"CMakeLists.txt",
|
||||
"compile_commands.json",
|
||||
"Makefile"
|
||||
],
|
||||
"exclude_patterns": [
|
||||
"**/build/**",
|
||||
"**/cmake-build-**/**"
|
||||
],
|
||||
"multi_workspace": false,
|
||||
"initialization_options": {},
|
||||
"request_timeout_secs": 60
|
||||
}
|
||||
}
|
||||
}
|
||||
24
.vscode/launch.json
vendored
24
.vscode/launch.json
vendored
|
|
@ -1,24 +0,0 @@
|
|||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug executable 'botserver'",
|
||||
"cargo": {
|
||||
"args": ["run", "--bin=botserver", "--package=botserver", "--manifest-path=${workspaceFolder}/botserver/Cargo.toml"],
|
||||
"filter": {
|
||||
"name": "botserver",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"env": {
|
||||
"RUST_LOG": "trace,aws_sigv4=off,aws_smithy_checksums=off,mio=off,reqwest=off,aws_runtime=off,aws_smithy_http_client=off,rustls=off,hyper_util=off,aws_smithy_runtime=off,aws_smithy_runtime_api=off,tracing=off,aws_sdk_s3=off"
|
||||
|
||||
},
|
||||
"cwd": "${workspaceFolder}/botserver"
|
||||
},
|
||||
]
|
||||
}
|
||||
5
.vscode/settings.json
vendored
5
.vscode/settings.json
vendored
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"git.ignoreLimitWarning": true,
|
||||
"Codegeex.SidebarUI.LanguagePreference": "English",
|
||||
"Codegeex.RepoIndex": true
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"languages": {
|
||||
"Rust": {
|
||||
"enable_language_server": false,
|
||||
},
|
||||
},
|
||||
}
|
||||
132
AGENTS.md
Normal file
132
AGENTS.md
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
# General Bots AI Agent Guidelines
|
||||
|
||||
|
||||
NEVER INCLUDE HERE CREDENTIALS OR COMPANY INFORMATION, THIS IS COMPANY AGNOSTIC.
|
||||
Use apenas a língua culta ao falar. Never save files to root — use `/tmp` for temp files. Never push to ALM without asking first (it is production). If a tool fails to install, check the official website for instructions. Local file support (`/opt/gbo/data`) has been removed; bots are loaded only from Drive (MinIO/S3).
|
||||
|
||||
---
|
||||
|
||||
## Critical Production Rules
|
||||
|
||||
Always manage services via `systemctl` inside the `system` Incus container. Never run `/opt/gbo/bin/botserver` or `/opt/gbo/bin/botui` directly — they skip the `.env` file, which means Vault credentials fail to load and services break. The correct commands are `sudo incus exec system -- systemctl start|stop|restart|status botserver` and the same for `ui`. Systemctl handles env loading, auto-restart, and process lifecycle.
|
||||
|
||||
In development you may use `cargo run` or `./target/debug/botserver` with `botserver/.env`. In production, always use `systemctl start botserver` with `/opt/gbo/bin/.env`.
|
||||
|
||||
---
|
||||
|
||||
## Workspace Structure
|
||||
|
||||
The workspace has eight crates. `botserver` is the main API server (port 8080) using Axum, Diesel, and Rhai BASIC. `botui` is the web UI server and proxy (port 3000) using Axum, HTML/HTMX/CSS. `botapp` is a Tauri 2 desktop wrapper. `botlib` holds shared types and errors. `botbook` is mdBook documentation. `bottest` holds integration tests. `botdevice` handles IoT/device support. `botplugin` is a JS browser extension.
|
||||
|
||||
Key paths: binary at `target/debug/botserver`, always run from the `botserver/` directory, env file at `botserver/.env`, UI files under `botui/ui/suite/`, bot data exclusively in Drive (MinIO/S3) under `/{botname}.gbai/` buckets. Test at `http://localhost:3000`; login at `http://localhost:3000/suite/auth/login.html`.
|
||||
|
||||
Bot files in Drive follow this structure: `{botname}.gbai/{botname}.gbdialog/` contains `*.bas` scripts, `config.csv`, and the `.gbkb/` knowledge base folder. There is no local file monitoring — botserver compiles `.bas` to `.ast` in memory from Drive only.
|
||||
|
||||
---
|
||||
|
||||
## Absolute Prohibitions
|
||||
|
||||
Never search the `/target` folder. Never build in release mode or use `--release`. Never run `cargo build` — use `cargo check` for verification. Never run `cargo clean` (causes 30-minute rebuilds); use `./reset.sh` for DB issues. Never deploy manually via `scp`, SSH binary copy, or any method other than the CI/CD pipeline (push → ALM → alm-ci builds → deploys to system container). Never run the binary directly in production — use `systemctl` or `./restart.sh`.
|
||||
|
||||
Never use `panic!()`, `todo!()`, `unimplemented!()`, `unwrap()`, or `expect()` in Rust code. Never use `Command::new()` directly — use `SafeCommand`. Never return raw error strings to HTTP clients — use `ErrorSanitizer`. Never use `#[allow()]` or lint exceptions in `Cargo.toml` — fix the code. Never use `_` prefix for unused variables — delete or use them. Never leave unused imports, dead code, or commented-out code. Never use CDN links — all assets must be local. Never create `.md` docs without checking `botbook/` first. Never hardcode credentials — use `generate_random_string()` or env vars. Never include sensitive data (IPs, tokens, keys) in docs or code; mask IPs in logs as `10.x.x.x`. Never create files with secrets anywhere except `/tmp/`.
|
||||
|
||||
---
|
||||
|
||||
## Build Pattern — Fix Fast Loop
|
||||
|
||||
When checking botserver, run `cargo check -p botserver > /tmp/check.log 2>&1 &`, capture the PID, then loop watching line count and kill the process once it exceeds 20 lines. After killing, check for errors with `strings /tmp/check.log | grep "^error" | head -20`. Fix errors immediately, then repeat. Never use `--all-features` (pulls docs/slides dependencies). This saves 10+ minutes per error cycle since full compilation takes 2–3 minutes. The key rule: kill at 20 lines, fix immediately, loop until clean.
|
||||
|
||||
If the process is killed by OOM, run `pkill -9 cargo; pkill -9 rustc; pkill -9 botserver` then retry with `CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200`.
|
||||
|
||||
---
|
||||
|
||||
## Security Directives — Mandatory
|
||||
|
||||
For error handling, never use `unwrap()`, `expect()`, `panic!()`, or `todo!()`. Use `value?`, `value.ok_or_else(|| Error::NotFound)?`, `value.unwrap_or_default()`, or `if let Some(v) = value { ... }`.
|
||||
|
||||
For command execution, never use `Command::new("cmd").arg(user_input).output()`. Use `SafeCommand::new("allowed_command")?.arg("safe_arg")?.execute()` from `crate::security::command_guard`.
|
||||
|
||||
For error responses, never return `Json(json!({ "error": e.to_string() }))`. Use `log_and_sanitize(&e, "context", None)` from `crate::security::error_sanitizer` and return `(StatusCode::INTERNAL_SERVER_ERROR, sanitized)`.
|
||||
|
||||
For SQL, never use `format!("SELECT * FROM {}", user_table)`. Use `sanitize_identifier` and `validate_table_name` from `crate::security::sql_guard`.
|
||||
|
||||
Rate limits: general 100 req/s, auth 10 req/s, API 50 req/s per token, WebSocket 10 msgs/s. Use the `governor` crate with per-IP and per-user tracking. All state-changing endpoints (POST/PUT/DELETE/PATCH) must require CSRF tokens via `tower_csrf` bound to the user session; Bearer Token endpoints are exempt. Every response must include these security headers: `Content-Security-Policy`, `Strict-Transport-Security`, `X-Frame-Options: DENY`, `X-Content-Type-Options: nosniff`, `Referrer-Policy: strict-origin-when-cross-origin`, and `Permissions-Policy: geolocation=(), microphone=(), camera=()`.
|
||||
|
||||
For dependencies, app crates track `Cargo.lock`; lib crates do not. Critical deps use exact versions (`=1.0.1`); regular deps use caret (`1.0`). Run `cargo audit` weekly and update only via PR with testing.
|
||||
|
||||
---
|
||||
|
||||
## Mandatory Code Patterns
|
||||
|
||||
Use `Self` not the type name in `impl` blocks. Always derive both `PartialEq` and `Eq` together. Use inline format args: `format!("Hello {name}")` not `format!("Hello {}", name)`. Combine identical match arms: `A | B => do_thing()`. Maximum 450 lines per file — split proactively at 350 lines into `types.rs`, `handlers.rs`, `operations.rs`, `utils.rs`, and `mod.rs`, re-exporting all public items in `mod.rs`.
|
||||
|
||||
---
|
||||
|
||||
## Error Fixing Workflow
|
||||
|
||||
Read the entire error list first. Group errors by file. For each file: view it, fix all errors, then write once. Only verify with `cargo check` after all fixes are applied — never compile after each individual fix. `cargo clippy --workspace` must pass with zero warnings.
|
||||
|
||||
---
|
||||
|
||||
## Execution Modes
|
||||
|
||||
In local standalone mode (no incus), botserver manages all services itself. Run `cargo run -- --install` once to download and extract PostgreSQL, Valkey, MinIO, and Vault binaries into `botserver-stack/bin/`, initialize data directories, and download the LLM model. Then `cargo run` starts everything and serves at `http://localhost:8080`. Use `./reset.sh` to wipe and restart the local environment.
|
||||
|
||||
In container (Incus) production mode, services run in separate named containers. Start them all with `sudo incus start system tables vault directory drive cache llm vector_db`. Access the system container with `sudo incus exec system -- bash`. View botserver logs with `sudo incus exec system -- journalctl -u botserver -f`. The container layout is: `system` runs BotServer on 8080; `tables` runs PostgreSQL on 5432; `vault` runs Vault on 8200; `directory` runs Zitadel on 8080 internally (external port 9000 via iptables NAT); `drive` runs MinIO on 9100; `cache` runs Valkey on 6379; `llm` runs llama.cpp on 8081; `vector_db` runs Qdrant on 6333.
|
||||
|
||||
Use the `LOAD_ONLY` variable in `/opt/gbo/bin/.env` to filter which bots are loaded and monitored by DriveMonitor, for example `LOAD_ONLY=default,salesianos`.
|
||||
|
||||
---
|
||||
|
||||
## Debugging & Testing
|
||||
|
||||
To watch for errors live: `tail -f botserver.log | grep -i "error\|tool"`. To debug a specific tool: grep `Tool error` in logs, fix the `.bas` file in MinIO at `/{bot}.gbai/{bot}.gbdialog/{tool}.bas`, then wait for DriveMonitor to recompile (automatic on file change, in-memory only, no local `.ast` cache). Test in browser at `http://localhost:3000/{botname}`.
|
||||
|
||||
Common BASIC errors: `=== is not a valid operator` means you used JavaScript-style `===` — replace with `==` or use `--` for string separators. `Syntax error` means bad BASIC syntax — check parentheses and commas. `Tool execution failed` means a runtime error — check logs for stack trace.
|
||||
|
||||
For Playwright testing, navigate to `http://localhost:3000/<botname>`, snapshot to verify welcome message and suggestion buttons including Portuguese accents, click a suggestion, wait 3–5 seconds, snapshot, fill data, submit, then verify DB records and backend logs. If the browser hangs, run `pkill -9 -f brave; pkill -9 -f chrome; pkill -9 -f chromium`, wait 3 seconds, and navigate again. The chat window may overlap other apps — click the middle (restore) button to minimize it or navigate directly via URL.
|
||||
|
||||
WhatsApp routing is global — one number serves all bots, with routing determined by the `whatsapp-id` key in each bot's `config.csv`. The bot name is sent as the first message to route correctly.
|
||||
|
||||
---
|
||||
|
||||
## Bot Scripts Architecture
|
||||
|
||||
`start.bas` is the entry point executed on WebSocket connect and on the first user message (once per session). It loads suggestion buttons via `ADD_SUGGESTION_TOOL` and marks the session in Redis to prevent re-runs. `{tool}.bas` files implement individual tools (e.g. `detecta.bas`). `tables.bas` is a special file — never call it with `CALL`; it is parsed automatically at compile time by `process_table_definitions()` and its table definitions are synced to the database via `sync_bot_tables()`. `init_folha.bas` handles initialization for specific features.
|
||||
|
||||
The `CALL` keyword can invoke in-memory procedures or `.bas` scripts by name. If the target is not in memory, botserver looks for `{name}.bas` in the bot's gbdialog folder in Drive. The `DETECT` keyword analyzes a database table for anomalies: it requires the table to exist (defined in `tables.bas`) and calls the BotModels API at `/api/anomaly/detect`.
|
||||
|
||||
Tool buttons use `MessageType::TOOL_EXEC` (id 6). When the frontend sends `message_type: 6` via WebSocket, the backend executes the named tool directly in `stream_response()`, bypassing KB injection and LLM entirely. The result appears in chat without any "/tool" prefix text. Other message types are: 0 EXTERNAL, 1 USER, 2 BOT_RESPONSE, 3 CONTINUE, 4 SUGGESTION, 5 CONTEXT_CHANGE.
|
||||
|
||||
---
|
||||
|
||||
## Submodule Push Rule — Mandatory
|
||||
|
||||
Every time you push the main repo, you must also push all submodules. CI builds based on submodule commits — if a submodule is not pushed, CI deploys old code. Always push botserver, botui, and botlib to both `origin` and `alm` remotes before or alongside the main repo push.
|
||||
|
||||
The deploy workflow is: push to ALM → CI triggers on alm-ci → builds inside system container via SSH (to match glibc 2.36 on Debian 12 Bookworm, not the CI runner's glibc 2.41) → deploys binary → service auto-restarts. Verify by checking service status and logs about 10 minutes after pushing.
|
||||
|
||||
---
|
||||
|
||||
## Zitadel Setup (Directory Service)
|
||||
|
||||
Zitadel runs in the `directory` container on port 8080 internally. External port 9000 is forwarded to it via iptables NAT on the system container. The database is `PROD-DIRECTORY` on the `tables` container. The PAT file is at `/opt/gbo/conf/directory/admin-pat.txt` on the directory container. Admin credentials are username `admin`, password `Admin123!`. Current version is Zitadel v4.13.1. **Known bug**: Web console UI will return 404 for environment.json when accessed via reverse proxy public domain. Use http://<host-ip>:9000/ui/console for administrative interface instead.
|
||||
|
||||
To reinstall: drop and recreate `PROD-DIRECTORY` on the tables container, write the init YAML to `/opt/gbo/conf/directory/zitadel-init-steps.yaml` (defining org name, admin user, and PAT expiry), then start Zitadel with env vars for the PostgreSQL host/port/database/credentials, `ZITADEL_EXTERNALSECURE=false`, `ZITADEL_EXTERNALDOMAIN=<directory-ip>`, `ZITADEL_EXTERNALPORT=9000`, and `ZITADEL_TLS_ENABLED=false`. Pass `--masterkey MasterkeyNeedsToHave32Characters`, `--tlsMode disabled`, and `--steps <yaml-path>`. Bootstrap takes about 90 seconds; verify with `curl -sf http://localhost:8080/debug/healthz`.
|
||||
|
||||
Key API endpoints: Use **v2 API endpoints** for all operations: `POST /v2/organizations/{org_id}/domains` to add domains, `POST /v2/users/new` to create users, `POST /oauth/v2/token` for access tokens, `GET /debug/healthz` for health. When calling externally via port 9000, include `Host: <directory-ip>` header. The v1 Management API is deprecated and not functional in this version.
|
||||
|
||||
|
||||
## Frontend Standards & Performance
|
||||
|
||||
HTMX-first: the server returns HTML fragments, not JSON. Use `hx-get`, `hx-post`, `hx-target`, `hx-swap`, and WebSocket via htmx-ws. All assets must be local — no CDN links.
|
||||
|
||||
Release profile must use `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, and `panic = "abort"`. Use `default-features = false` and opt into only needed features. Run `cargo tree --duplicates`, `cargo machete`, and `cargo audit` weekly.
|
||||
|
||||
Testing: unit tests live in per-crate `tests/` folders or `#[cfg(test)]` modules, run with `cargo test -p <crate>`. Integration tests live in `bottest/`, run with `cargo test -p bottest`. Aim for 80%+ coverage on critical paths; all error paths and security guards must be tested.
|
||||
|
||||
---
|
||||
|
||||
## Core Directives Summary
|
||||
|
||||
Fix offline first — read all errors before compiling again. Batch by file — fix all errors in a file at once and write once. Verify last — only run `cargo check` after all fixes are applied. Delete dead code — never keep unused code. Git workflow — always push to all repositories (origin and alm). Target zero warnings and zero errors — loop until clean.
|
||||
11600
Cargo.lock
generated
Normal file
11600
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -9,6 +9,7 @@ members = [
|
|||
"bottest",
|
||||
"botui",
|
||||
]
|
||||
exclude = ["backup-to-s3"]
|
||||
|
||||
[workspace.lints.rust]
|
||||
|
||||
|
|
@ -109,6 +110,7 @@ url = "2.5"
|
|||
dirs = "5.0"
|
||||
tempfile = "3"
|
||||
walkdir = "2.5.0"
|
||||
notify = "8.0"
|
||||
|
||||
# ─── COMPRESSION / ARCHIVES ───
|
||||
flate2 = "1.0"
|
||||
|
|
@ -174,7 +176,7 @@ indicatif = "0.18.0"
|
|||
|
||||
# ─── MEMORY ALLOCATOR ───
|
||||
tikv-jemallocator = "0.6"
|
||||
tikv-jemalloc-ctl = { version = "0.6", default-features = false }
|
||||
tikv-jemalloc-ctl = { version = "0.6", default-features = false, features = ["stats"] }
|
||||
|
||||
# ─── SECRETS / VAULT ───
|
||||
vaultrs = "0.7"
|
||||
|
|
|
|||
77
DEPENDENCIES.ps1
Normal file
77
DEPENDENCIES.ps1
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
<#
|
||||
.SYNOPSIS
|
||||
Installs runtime dependencies for General Bots on Windows.
|
||||
|
||||
.DESCRIPTION
|
||||
This script downloads and configures the system libraries required to build
|
||||
and run BotServer on Windows. It downloads PostgreSQL binaries (for libpq)
|
||||
and sets the PQ_LIB_DIR environment variable permanently.
|
||||
|
||||
.EXAMPLE
|
||||
PS> .\DEPENDENCIES.ps1
|
||||
#>
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
# ─── COLORS ───
|
||||
function Write-Step { param($msg) Write-Host " * $msg" -ForegroundColor Green }
|
||||
function Write-Warn { param($msg) Write-Host " ! $msg" -ForegroundColor Yellow }
|
||||
function Write-Err { param($msg) Write-Host " x $msg" -ForegroundColor Red }
|
||||
|
||||
Write-Host "========================================" -ForegroundColor Green
|
||||
Write-Host " General Bots Runtime Dependencies" -ForegroundColor Green
|
||||
Write-Host " (Windows)" -ForegroundColor Green
|
||||
Write-Host "========================================" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# ─── PostgreSQL binaries (libpq.lib for Diesel ORM) ───
|
||||
$PgsqlDir = "C:\pgsql\pgsql"
|
||||
$PgsqlLib = "$PgsqlDir\lib\libpq.lib"
|
||||
$PgsqlZipUrl = "https://get.enterprisedb.com/postgresql/postgresql-17.4-1-windows-x64-binaries.zip"
|
||||
$PgsqlZip = "$env:TEMP\pgsql.zip"
|
||||
|
||||
if (Test-Path $PgsqlLib) {
|
||||
Write-Step "PostgreSQL binaries already present at $PgsqlDir"
|
||||
} else {
|
||||
Write-Host "`nDownloading PostgreSQL binaries..." -ForegroundColor Cyan
|
||||
Write-Host " URL: $PgsqlZipUrl"
|
||||
Write-Host " This may take a few minutes (~300MB)...`n"
|
||||
|
||||
Invoke-WebRequest -Uri $PgsqlZipUrl -OutFile $PgsqlZip -UseBasicParsing
|
||||
|
||||
Write-Host "Extracting to C:\pgsql ..."
|
||||
if (Test-Path "C:\pgsql") { Remove-Item "C:\pgsql" -Recurse -Force }
|
||||
Expand-Archive -Path $PgsqlZip -DestinationPath "C:\pgsql" -Force
|
||||
Remove-Item $PgsqlZip -Force -ErrorAction SilentlyContinue
|
||||
|
||||
if (Test-Path $PgsqlLib) {
|
||||
Write-Step "PostgreSQL binaries installed successfully."
|
||||
} else {
|
||||
Write-Err "Failed to find libpq.lib after extraction!"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Set PQ_LIB_DIR permanently for the current user
|
||||
$CurrentPqDir = [System.Environment]::GetEnvironmentVariable("PQ_LIB_DIR", "User")
|
||||
if ($CurrentPqDir -ne "$PgsqlDir\lib") {
|
||||
[System.Environment]::SetEnvironmentVariable("PQ_LIB_DIR", "$PgsqlDir\lib", "User")
|
||||
$env:PQ_LIB_DIR = "$PgsqlDir\lib"
|
||||
Write-Step "PQ_LIB_DIR set to '$PgsqlDir\lib' (User environment variable)"
|
||||
} else {
|
||||
Write-Step "PQ_LIB_DIR already configured."
|
||||
}
|
||||
|
||||
# ─── Summary ───
|
||||
Write-Host ""
|
||||
Write-Host "========================================" -ForegroundColor Green
|
||||
Write-Host " Dependencies installed!" -ForegroundColor Green
|
||||
Write-Host "========================================" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
Write-Host "You can now build and run:" -ForegroundColor Cyan
|
||||
Write-Host " cargo build -p botserver"
|
||||
Write-Host " cargo build -p botui"
|
||||
Write-Host " .\restart.ps1"
|
||||
Write-Host ""
|
||||
Write-Host "NOTE: If this is the first time, restart your terminal" -ForegroundColor Yellow
|
||||
Write-Host " so PQ_LIB_DIR takes effect." -ForegroundColor Yellow
|
||||
212
PROD.md
Normal file
212
PROD.md
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
# Production Environment Guide (Compact)
|
||||
|
||||
## CRITICAL RULES — READ FIRST
|
||||
|
||||
NEVER INCLUDE HERE CREDENTIALS OR COMPANY INFORMATION, THIS IS COMPANY AGNOSTIC.
|
||||
Always manage services with `systemctl` inside the `system` Incus container. Never run `/opt/gbo/bin/botserver` or `/opt/gbo/bin/botui` directly — they will fail because they won't load the `.env` file containing Vault credentials and paths. The correct commands are `sudo incus exec system -- systemctl start|stop|restart|status botserver` and the same for `ui`. Systemctl handles environment loading, auto-restart, logging, and dependencies.
|
||||
|
||||
Never push secrets (API keys, passwords, tokens) to git. Never commit `init.json` (it contains Vault unseal keys). All secrets must come from Vault — only `VAULT_*` variables are allowed in `.env`. Never deploy manually via scp or ssh; always use CI/CD. Always push all submodules (botserver, botui, botlib) before or alongside the main repo. Always ask before pushing to ALM.
|
||||
|
||||
---
|
||||
|
||||
## Infrastructure Overview
|
||||
|
||||
The host machine is `PROD-GBO1`, accessed via `ssh user@<hostname>`, running Incus (an LXD fork) as hypervisor. All services run inside named Incus containers. You enter containers with `sudo incus exec <container> -- <command>` and list them with `sudo incus list`.
|
||||
|
||||
The containers and their roles are: `system` runs botserver on port 5858 and botui on port 5859; `alm-ci` runs the Forgejo Actions CI runner; `alm` hosts the Forgejo git server; `tables` runs PostgreSQL on port 5432; `cache` runs Valkey/Redis on port 6379; `drive` runs MinIO object storage on port 9100; `vault` runs HashiCorp Vault on port 8200; `vector` runs Qdrant on port 6333.
|
||||
|
||||
Externally, botserver is reachable at `https://system.example.com` and botui at `https://chat.example.com`. Internally, botui's `BOTSERVER_URL` must be `http://localhost:5858` — never the external HTTPS URL, because the Rust proxy runs server-side and needs direct localhost access.
|
||||
|
||||
---
|
||||
|
||||
## Services Detail
|
||||
|
||||
Botserver runs as user `gbuser`, binary at `/opt/gbo/bin/botserver`, logs at `/opt/gbo/logs/out.log` and `/opt/gbo/logs/err.log`, systemd unit at `/etc/systemd/system/botserver.service`, env loaded from `/opt/gbo/bin/.env`. Bot BASIC scripts live under `/opt/gbo/data/<botname>.gbai/<botname>.gbdialog/*.bas`; compiled AST cache goes to `/opt/gbo/work/`.
|
||||
|
||||
The directory service runs Zitadel as user `root`, binary at `/opt/gbo/bin/zitadel`, logs at `/opt/gbo/logs/zitadel.log`, systemd unit at `/etc/systemd/system/directory.service`, and loads environment from the service configuration. Zitadel provides identity management and OAuth2 services for the platform.
|
||||
|
||||
Internally, Zitadel listens on port 8080 within the directory container. For external access:
|
||||
- Via public domain (HTTPS): `https://login.example.com` (configured through proxy container)
|
||||
- Via host IP (HTTP): `http://<host-ip>:9000` (direct container port forwarding)
|
||||
- Via container IP (HTTP): `http://<directory-container-ip>:9000` (direct container access)
|
||||
Access the Zitadel console at `https://login.example.com/ui/console` with admin credentials. Zitadel implements v1 Management API (deprecated) and v2 Organization/User services. Always use the v2 endpoints under `/v2/organizations` and `/v2/users` for all operations.
|
||||
|
||||
The botserver bootstrap also manages: Vault (secrets), PostgreSQL (database), Valkey (cache, password auth), MinIO (object storage), Zitadel (identity provider), and llama.cpp (LLM).
|
||||
To obtain a PAT for Zitadel API access, check /opt/gbo/conf/directory/admin-pat.txt in the directory container. Use it with curl by setting the Authorization header: `Authorization: Bearer $(cat /opt/gbo/conf/directory/admin-pat.txt)` and include `-H \"Host: <IP> \"` for correct host resolution (replace with your directory container IP).
|
||||
|
||||
---
|
||||
|
||||
## Common Operations
|
||||
|
||||
**Check status:** `sudo incus exec system -- systemctl status botserver --no-pager` (same for `ui`). To check process existence: `sudo incus exec system -- pgrep -f botserver`.
|
||||
|
||||
**View logs:** For systemd journal: `sudo incus exec system -- journalctl -u botserver --no-pager -n 50`. For application logs: `sudo incus exec system -- tail -50 /opt/gbo/logs/out.log` or `err.log`. For live tail: `sudo incus exec system -- tail -f /opt/gbo/logs/out.log`.
|
||||
|
||||
**Restart:** `sudo incus exec system -- systemctl restart botserver` and same for `ui`. Never run the binary directly.
|
||||
|
||||
**Emergency manual deploy:** Kill the old process with `sudo incus exec system -- killall botserver`, copy the new binary from `/opt/gbo/ci/botserver/target/debug/botserver` to `/opt/gbo/bin/botserver`, set permissions with `chmod +x` and `chown gbuser:gbuser`, then start with `systemctl start botserver`.
|
||||
|
||||
**Transfer bot files:** Archive locally with `tar czf /tmp/bots.tar.gz -C /opt/gbo/data <botname>.gbai`, copy to host with `scp`, then extract inside container with `sudo incus exec system -- bash -c 'tar xzf /tmp/bots.tar.gz -C /opt/gbo/data/'`. Clear compiled cache with `find /opt/gbo/data -name "*.ast" -delete` and same for `/opt/gbo/work`.
|
||||
|
||||
**Snapshots:** `sudo incus snapshot list system` to list, `sudo incus snapshot restore system <name>` to restore.
|
||||
|
||||
---
|
||||
|
||||
## CI/CD Pipeline
|
||||
|
||||
Repositories exist on both GitHub and the internal ALM (Forgejo). The four repos are `gb` (main workspace), `botserver`, `botui`, and `botlib`. Always push submodules first (`cd botserver && git push alm main && git push origin main`), then update submodule references in the root repo and push that too.
|
||||
|
||||
The CI runner container (`alm-ci`) runs Debian Trixie with glibc 2.41, but the `system` container runs Debian 12 Bookworm with glibc 2.36. Binaries compiled on the CI runner are incompatible with the system container. The CI workflow (`botserver/.forgejo/workflows/botserver.yaml`) solves this by transferring source to the system container via `tar | ssh` and building there. The workflow triggers on pushes to `main`, clones repos, transfers source, builds inside system container, deploys the binary, and verifies botserver is running.
|
||||
|
||||
---
|
||||
|
||||
## DriveMonitor & Bot Configuration
|
||||
|
||||
DriveMonitor is a background service inside botserver that watches MinIO buckets and syncs changes to the local filesystem and database every 10 seconds. It monitors three directory types per bot: the `.gbdialog/` folder for BASIC scripts (downloads and recompiles on change), the `.gbot/` folder for `config.csv` (syncs to the `bot_configuration` database table), and the `.gbkb/` folder for knowledge base documents (downloads and indexes for vector search).
|
||||
|
||||
Bot configuration is stored in two PostgreSQL tables inside the `botserver` database. The `bot_configuration` table holds key-value pairs with columns `bot_id`, `config_key`, `config_value`, `config_type`, `is_encrypted`, and `updated_at`. The `gbot_config_sync` table tracks sync state with columns `bot_id`, `config_file_path`, `last_sync_at`, `file_hash`, and `sync_count`.
|
||||
|
||||
The `config.csv` format is a plain CSV with no header: each line is `key,value`, for example `llm-provider,groq` or `theme-color1,#cc0000`. DriveMonitor syncs it when the file ETag changes in MinIO, on botserver startup, or after a restart.
|
||||
|
||||
**Check config status:** Query `bot_configuration` via `sudo incus exec tables -- psql -h localhost -U postgres -d botserver -c "SELECT config_key, config_value FROM bot_configuration WHERE bot_id = (SELECT id FROM bots WHERE name = 'salesianos') ORDER BY config_key;"`. Check sync state via the `gbot_config_sync` table. Inspect the bucket directly with `sudo incus exec drive -- /opt/gbo/bin/mc cat local/salesianos.gbai/salesianos.gbot/config.csv`.
|
||||
|
||||
**Debug DriveMonitor:** Monitor live logs with `sudo incus exec system -- tail -f /opt/gbo/logs/out.log | grep -E "(DRIVE_MONITOR|check_gbot|config)"`. An empty `gbot_config_sync` table means DriveMonitor has not synced yet. If no new log entries appear after 30 seconds, the loop may be stuck — restart botserver with systemctl to clear the state.
|
||||
|
||||
**Common config issues:** If config.csv is missing from the bucket, create and upload it with `mc cp`. If the database shows stale values, restart botserver to force a fresh sync, or as a temporary fix update the database directly with `UPDATE bot_configuration SET config_value = 'groq', updated_at = NOW() WHERE ...`. To force a re-sync without restarting, copy config.csv over itself with `mc cp local/... local/...` to change the ETag.
|
||||
|
||||
---
|
||||
|
||||
## MinIO (Drive) Operations
|
||||
|
||||
All bot files live in MinIO buckets. Use the `mc` CLI at `/opt/gbo/bin/mc` from inside the `drive` container. The bucket structure per bot is: `{bot}.gbai/` as root, `{bot}.gbai/{bot}.gbdialog/` for BASIC scripts, `{bot}.gbai/{bot}.gbot/` for config.csv, and `{bot}.gbai/{bot}.gbkb/` for knowledge base folders.
|
||||
|
||||
Common mc commands: `mc ls local/` lists all buckets; `mc ls local/salesianos.gbai/` lists a bucket; `mc cat local/.../start.bas` prints a file; `mc cp local/.../file /tmp/file` downloads; `mc cp /tmp/file local/.../file` uploads (this triggers DriveMonitor recompile); `mc stat local/.../config.csv` shows ETag and metadata; `mc mb local/newbot.gbai` creates a bucket; `mc rb local/oldbot.gbai` removes an empty bucket.
|
||||
|
||||
If mc is not found, use the full path `/opt/gbo/bin/mc`. If alias `local` is not configured, check with `mc config host list`. If MinIO is not running, check with `sudo incus exec drive -- systemctl status minio`.
|
||||
|
||||
---
|
||||
|
||||
## Vault Security Architecture
|
||||
|
||||
HashiCorp Vault is the single source of truth for all secrets. Botserver reads `VAULT_ADDR` and `VAULT_TOKEN` from `/opt/gbo/bin/.env` at startup, initializes a TLS/mTLS client, then reads credentials from Vault paths. If Vault is unavailable, it falls back to defaults. The `.env` file must only contain `VAULT_*` variables plus `PORT`, `DATA_DIR`, `WORK_DIR`, and `LOAD_ONLY`.
|
||||
|
||||
**Global Vault paths:** `gbo/tables` holds PostgreSQL credentials; `gbo/drive` holds MinIO access key and secret; `gbo/cache` holds Valkey password; `gbo/llm` holds LLM URL and API keys; `gbo/directory` holds Zitadel config; `gbo/email` holds SMTP credentials; `gbo/vectordb` holds Qdrant config; `gbo/jwt` holds JWT signing secret; `gbo/encryption` holds the master encryption key. Organization-scoped secrets follow patterns like `gbo/orgs/{org_id}/bots/{bot_id}` and tenant infrastructure uses `gbo/tenants/{tenant_id}/infrastructure`.
|
||||
|
||||
**Credential resolution:** For any service, botserver checks the most specific Vault path first (org+bot level), falls back to a default bot path, then falls back to the global path, and only uses environment variables as a last resort in development.
|
||||
|
||||
**Verify Vault health:** `sudo incus exec vault -- curl -k -sf https://localhost:8200/v1/sys/health` should return JSON with `"sealed":false`. To read a secret: set `VAULT_ADDR`, `VAULT_TOKEN`, and `VAULT_CACERT` then run `vault kv get secret/gbo/tables`. To test from the system container, use curl with `--cacert /opt/gbo/conf/system/certificates/ca/ca.crt` and `-H "X-Vault-Token: <token>"`.
|
||||
|
||||
**init.json** is stored at `/opt/gbo/bin/botserver-stack/conf/vault/vault-conf/init.json` and contains the root token and 5 unseal keys (3 needed to unseal). Never commit this file to git. Store it encrypted in a secure location.
|
||||
|
||||
**Vault troubleshooting — cannot connect:** Check that the vault container's systemd unit is running, verify the token in `.env` is not expired with `vault token lookup`, confirm the CA cert path in `.env` matches the actual file location, and test network connectivity from system to vault container. To generate a new token: `vault token create -policy="botserver" -ttl="8760h" -format=json` then update `.env` and restart botserver.
|
||||
|
||||
# Get database credentials from Vault v2 API
|
||||
$ ssh user@ip"sudo incus exec system -- curl -s --cacert /opt/gbo/conf/system/certificates/ca/ca.crt -H 'X-Vault-Token: hvs...' https://ip:8200/v1/secret/data/gbo/tables 2>/dev/null"
|
||||
|
||||
**Vault troubleshooting — secrets missing:** Run `vault kv get secret/gbo/tables` (and other paths) to check if secrets exist. If a path returns NOT FOUND, add secrets with `vault kv put secret/gbo/tables host=<ip> port=5432 database=botserver username=gbuser password=<pw>` and similar for other paths.
|
||||
|
||||
**Vault sealed after restart:** Run `vault operator unseal <key1>`, repeat with key2 and key3 (3 of 5 keys from init.json), then verify with `vault status`.
|
||||
|
||||
**TLS certificate errors:** Confirm `/opt/gbo/conf/system/certificates/ca/ca.crt` exists in the system container. If missing, copy it from the vault container using `incus file pull vault/opt/gbo/conf/vault/ca.crt /tmp/ca.crt` then place it at the expected path.
|
||||
|
||||
**Vault snapshots:** Stop vault, run `sudo incus snapshot create vault backup-$(date +%Y%m%d-%H%M)`, start vault. Restore with `sudo incus snapshot restore vault <name>` while vault is stopped.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting Quick Reference
|
||||
|
||||
**GLIBC mismatch (`GLIBC_2.39 not found`):** The binary was compiled on the CI runner (glibc 2.41) not inside the system container (glibc 2.36). The CI workflow must SSH into the system container to build. Check `botserver.yaml` to confirm this.
|
||||
|
||||
**botserver won't start:** Run `sudo incus exec system -- ldd /opt/gbo/bin/botserver | grep "not found"` to check for missing libraries. Run `sudo incus exec system -- timeout 10 /opt/gbo/bin/botserver 2>&1` to see startup errors. Confirm `/opt/gbo/data/` exists and is accessible.
|
||||
|
||||
**botui can't reach botserver:** Check that the `ui.service` systemd file has `BOTSERVER_URL=http://localhost:5858` — not the external HTTPS URL. Fix with `sed -i 's|BOTSERVER_URL=.*|BOTSERVER_URL=http://localhost:5858|'` on the service file, then `systemctl daemon-reload` and `systemctl restart ui`.
|
||||
|
||||
**Suggestions not showing:** Confirm bot `.bas` files exist under `/opt/gbo/data/<bot>.gbai/<bot>.gbdialog/`. Check logs for compilation errors. Clear the AST cache in `/opt/gbo/work/` and restart botserver.
|
||||
|
||||
**IPv6 DNS timeouts on external APIs (Groq, Cloudflare):** The container's DNS may return AAAA records without IPv6 connectivity. The container should have `IPV6=no` in its network config and `gai.conf` set appropriately. Check for `RES_OPTIONS=inet4` in `botserver.service` if issues persist.
|
||||
|
||||
**Logs show development paths instead of `/opt/gbo/data/`:** Botserver is using hardcoded dev paths. Check `.env` has `DATA_DIR=/opt/gbo/data/` and `WORK_DIR=/opt/gbo/work/`, verify the systemd unit has `EnvironmentFile=/opt/gbo/bin/.env`, and confirm Vault is reachable so service discovery works. Expected startup log lines include `info watcher:Watching data directory /opt/gbo/data` and `info botserver:BotServer started successfully on port 5858`.
|
||||
|
||||
**Migrations not running after push:** If `stat /opt/gbo/bin/botserver` shows old timestamp and `__diesel_schema_migrations` table has no new entries, CI did not rebuild. Make a trivial code change (e.g., add a comment) in botserver and push again to force rebuild.
|
||||
|
||||
---
|
||||
|
||||
## Drive (MinIO) File Operations Cheatsheet
|
||||
|
||||
All `mc` commands run inside the `drive` container with `PATH` set: `sudo incus exec drive -- bash -c 'export PATH=/opt/gbo/bin:$PATH && mc <command>'`. If `local` alias is missing, create it with credentials from Vault path `gbo/drive`.
|
||||
|
||||
**List bucket contents recursively:** `mc ls local/<bot>.gbai/ --recursive`
|
||||
|
||||
**Read a file from Drive:** `mc cat local/<bot>.gbai/<bot>.gbdialog/start.bas`
|
||||
|
||||
**Download a file:** `mc cp local/<bot>.gbai/<bot>.gbdialog/start.bas /tmp/start.bas`
|
||||
|
||||
**Upload a file to Drive (triggers DriveMonitor recompile):** Transfer file to host via `scp`, push into drive container with `sudo incus file push /tmp/file drive/tmp/file`, then `mc put /tmp/file local/<bot>.gbai/<bot>.gbdialog/start.bas`
|
||||
|
||||
**Full upload workflow example — updating config.csv:**
|
||||
```bash
|
||||
# 1. Download current config from Drive
|
||||
ssh user@host "sudo incus exec drive -- bash -c 'export PATH=/opt/gbo/bin:\$PATH && mc cat local/salesianos.gbai/salesianos.gbot/config.csv'" > /tmp/config.csv
|
||||
|
||||
# 2. Edit locally (change model, keys, etc.)
|
||||
sed -i 's/llm-model,old-model/llm-model,new-model/' /tmp/config.csv
|
||||
|
||||
# 3. Push edited file back to Drive
|
||||
scp /tmp/config.csv user@host:/tmp/config.csv
|
||||
ssh user@host "sudo incus file push /tmp/config.csv drive/tmp/config.csv"
|
||||
ssh user@host "sudo incus exec drive -- bash -c 'export PATH=/opt/gbo/bin:\$PATH && mc put /tmp/config.csv local/salesianos.gbai/salesianos.gbot/config.csv'"
|
||||
|
||||
# 4. Wait ~15 seconds, then verify DriveMonitor picked up the change
|
||||
ssh user@host "sudo incus exec system -- bash -c 'grep -i \"Model:\" /opt/gbo/logs/err.log | tail -3'"
|
||||
```
|
||||
|
||||
**Force re-sync of config.csv** (change ETag without content change): `mc cp local/<bot>.gbai/<bot>.gbot/config.csv local/<bot>.gbai/<bot>.gbot/config.csv`
|
||||
|
||||
**Create a new bot bucket:** `mc mb local/newbot.gbai`
|
||||
|
||||
**Check MinIO health:** `sudo incus exec drive -- bash -c '/opt/gbo/bin/mc admin info local'`
|
||||
|
||||
---
|
||||
|
||||
## Logging Quick Reference
|
||||
|
||||
**Application logs** (searchable, timestamped, most useful): `sudo incus exec system -- tail -f /opt/gbo/logs/err.log` (errors and debug) or `/opt/gbo/logs/out.log` (stdout). The systemd journal only captures process lifecycle events, not application output.
|
||||
|
||||
**Search logs for specific bot activity:** `grep -i "salesianos\|llm\|Model:\|KB\|USE_KB\|drive_monitor" /opt/gbo/logs/err.log | tail -30`
|
||||
|
||||
**Check which LLM model a bot is using:** `grep "Model:" /opt/gbo/logs/err.log | tail -5`
|
||||
|
||||
**Check DriveMonitor config sync:** `grep "check_gbot\|config.csv\|should_sync" /opt/gbo/logs/err.log | tail -20`
|
||||
|
||||
**Check KB/vector operations:** `grep -i "gbkb\|qdrant\|embedding\|index" /opt/gbo/logs/err.log | tail -20`
|
||||
|
||||
**Live tail with filter:** `sudo incus exec system -- bash -c 'tail -f /opt/gbo/logs/err.log | grep --line-buffered -i "salesianos\|error\|KB"'`
|
||||
|
||||
---
|
||||
|
||||
## Program Access Cheatsheet
|
||||
|
||||
| Program | Container | Path | Notes |
|
||||
|---------|-----------|------|-------|
|
||||
| botserver | system | `/opt/gbo/bin/botserver` | Run via systemctl only |
|
||||
| botui | system | `/opt/gbo/bin/botui` | Run via systemctl only |
|
||||
| mc (MinIO Client) | drive | `/opt/gbo/bin/mc` | Must set `PATH=/opt/gbo/bin:$PATH` |
|
||||
| psql | tables | `/usr/bin/psql` | `psql -h localhost -U postgres -d botserver` |
|
||||
| vault | vault | `/opt/gbo/bin/vault` | Needs `VAULT_ADDR`, `VAULT_TOKEN`, `VAULT_CACERT` |
|
||||
| zitadel | directory | `/opt/gbo/bin/zitadel` | Runs as root on port 8080 internally |
|
||||
|
||||
**Quick psql query — bot config:** `sudo incus exec tables -- psql -h localhost -U postgres -d botserver -c "SELECT config_key, config_value FROM bot_configuration WHERE bot_id = (SELECT id FROM bots WHERE name = 'salesianos') ORDER BY config_key;"`
|
||||
|
||||
**Quick psql query — active KBs for session:** `sudo incus exec tables -- psql -h localhost -U postgres -d botserver -c "SELECT * FROM session_kb_associations WHERE session_id = '<uuid>' AND is_active = true;"`
|
||||
|
||||
---
|
||||
|
||||
## BASIC Compilation Architecture
|
||||
|
||||
Compilation and runtime are now strictly separated. **Compilation** happens only in `BasicCompiler` inside DriveMonitor when it detects `.bas` file changes. The output is a fully preprocessed `.ast` file written to `work/<bot>.gbai/<bot>.gbdialog/<tool>.ast`. **Runtime** (start.bas, TOOL_EXEC, automation, schedule) loads only `.ast` files and calls `ScriptService::run()` which does `engine.compile() + eval_ast_with_scope()` on the already-preprocessed Rhai source — no preprocessing at runtime.
|
||||
|
||||
The `.ast` file has all transforms applied: `USE KB "cartas"` becomes `USE_KB("cartas")`, `IF/END IF` → `if/{ }`, `WHILE/WEND` → `while/{ }`, `BEGIN TALK/END TALK` → function calls, `SAVE`, `FOR EACH/NEXT`, `SELECT CASE`, `SET SCHEDULE`, `WEBHOOK`, `USE WEBSITE`, `LLM` keyword expansion, variable predeclaration, and keyword lowercasing. Runtime never calls `compile()`, `compile_tool_script()`, or `compile_preprocessed()` — those methods no longer exist.
|
||||
|
||||
**Tools (TOOL_EXEC) load `.ast` only** — there is no `.bas` fallback. If an `.ast` file is missing, the tool fails with "Failed to read tool .ast file". DriveMonitor must have compiled it first.
|
||||
|
||||
**Suggestion deduplication** uses Redis `SADD` (set) instead of `RPUSH` (list). This prevents duplicate suggestion buttons when `start.bas` runs multiple times per session. The key format is `suggestions:{bot_id}:{session_id}` and `get_suggestions` uses `SMEMBERS` to read it.
|
||||
672
README.md
672
README.md
|
|
@ -1,8 +1,27 @@
|
|||
RULE 0: Never call tool_call while thinking. Ex NEVER do this: Let me check if the API call succeeded:<tool_call>terminal<arg_key>command</arg_key><arg_value>tail -50 botserver.log | grep -E "LLM streaming error|error|Error|SUCCESS|200"</arg_value><arg_key>cd</arg_key><arg_value>gb</arg_value></tool_call>. First finish Thinking, then emit a explanation and tool!
|
||||
# General Bots Workspace
|
||||
|
||||
## ⚠️ CRITICAL SECURITY WARNING
|
||||
|
||||
**Version:** 6.2.0
|
||||
**NEVER CREATE FILES WITH SECRETS IN THE REPOSITORY ROOT**
|
||||
|
||||
Secret files MUST be placed in `/tmp/` only:
|
||||
- ✅ `/tmp/vault-token-gb` - Vault root token
|
||||
- ✅ `/tmp/vault-unseal-key-gb` - Vault unseal key
|
||||
- ❌ `vault-unseal-keys` - FORBIDDEN (tracked by git)
|
||||
- ❌ `start-and-unseal.sh` - FORBIDDEN (contains secrets)
|
||||
|
||||
**Files added to .gitignore:** `vault-unseal-keys`, `start-and-unseal.sh`, `vault-token-*`
|
||||
|
||||
**Why `/tmp/`?**
|
||||
- Cleared on reboot (ephemeral)
|
||||
- Not tracked by git
|
||||
- Standard Unix security practice
|
||||
- Prevents accidental commits
|
||||
|
||||
---
|
||||
|
||||
|
||||
**Version:** 6.3.0
|
||||
**Type:** Rust Workspace (Monorepo with Independent Subproject Repos)
|
||||
|
||||
---
|
||||
|
|
@ -19,7 +38,7 @@ For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pr
|
|||
|
||||
| Crate | Purpose | Port | Tech Stack |
|
||||
|-------|---------|------|------------|
|
||||
| **botserver** | Main API server, business logic | 8088 | Axum, Diesel, Rhai BASIC |
|
||||
| **botserver** | Main API server, business logic | 9000 | Axum, Diesel, Rhai BASIC |
|
||||
| **botui** | Web UI server (dev) + proxy | 3000 | Axum, HTML/HTMX/CSS |
|
||||
| **botapp** | Desktop app wrapper | - | Tauri 2 |
|
||||
| **botlib** | Shared library | - | Core types, errors |
|
||||
|
|
@ -35,6 +54,35 @@ For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pr
|
|||
- **Env file:** `botserver/.env`
|
||||
- **Stack:** `botserver-stack/`
|
||||
- **UI Files:** `botui/ui/suite/`
|
||||
- **Local Bot Data:** `/opt/gbo/data/` (place `.gbai` packages here)
|
||||
|
||||
### Local Bot Data Directory
|
||||
|
||||
Place local bot packages in `/opt/gbo/data/` for automatic loading and monitoring:
|
||||
|
||||
**Directory Structure:**
|
||||
```
|
||||
/opt/gbo/data/
|
||||
└── mybot.gbai/
|
||||
├── mybot.gbdialog/
|
||||
│ ├── start.bas
|
||||
│ └── main.bas
|
||||
└── mybot.gbot/
|
||||
└── config.csv
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- **Auto-loading:** Bots automatically mounted on server startup
|
||||
- **Auto-compilation:** `.bas` files compiled to `.ast` on change
|
||||
- **Auto-creation:** New bots automatically added to database
|
||||
- **Hot-reload:** Changes trigger immediate recompilation
|
||||
- **Monitored by:** LocalFileMonitor and ConfigWatcher services
|
||||
|
||||
**Usage:**
|
||||
1. Create bot directory structure in `/opt/gbo/data/`
|
||||
2. Add `.bas` files to `<bot_name>.gbai/<bot_name>.gbdialog/`
|
||||
3. Server automatically detects and loads the bot
|
||||
4. Optional: Add `config.csv` for bot configuration
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -44,15 +92,36 @@ For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pr
|
|||
|
||||
BotServer automatically installs, configures, and manages all infrastructure components on first run. **DO NOT manually start these services** - BotServer handles everything.
|
||||
|
||||
| Component | Purpose | Port | Binary Location | Managed By |
|
||||
|-----------|---------|------|-----------------|------------|
|
||||
| **Vault** | Secrets management | 8200 | `botserver-stack/bin/vault/vault` | botserver |
|
||||
| **PostgreSQL** | Primary database | 5432 | `botserver-stack/bin/tables/bin/postgres` | botserver |
|
||||
| **MinIO** | Object storage (S3-compatible) | 9000/9001 | `botserver-stack/bin/drive/minio` | botserver |
|
||||
| **Zitadel** | Identity/Authentication | 8300 | `botserver-stack/bin/directory/zitadel` | botserver |
|
||||
| **Qdrant** | Vector database (embeddings) | 6333 | `botserver-stack/bin/vector_db/qdrant` | botserver |
|
||||
| **Valkey** | Cache/Queue (Redis-compatible) | 6379 | `botserver-stack/bin/cache/valkey-server` | botserver |
|
||||
| **Llama.cpp** | Local LLM server | 8081 | `botserver-stack/bin/llm/build/bin/llama-server` | botserver |
|
||||
**Automatic Service Lifecycle:**
|
||||
1. **Start**: When botserver starts, it automatically launches all infrastructure components (PostgreSQL, Vault, MinIO, Valkey, Qdrant, etc.)
|
||||
2. **Credentials**: BotServer retrieves all service credentials (passwords, tokens, API keys) from Vault
|
||||
3. **Connection**: BotServer uses these credentials to establish secure connections to each service
|
||||
4. **Query**: All database queries, cache operations, and storage requests are authenticated using Vault-managed credentials
|
||||
|
||||
**Credential Flow:**
|
||||
```
|
||||
botserver starts
|
||||
↓
|
||||
Launch PostgreSQL, MinIO, Valkey, Qdrant
|
||||
↓
|
||||
Connect to Vault
|
||||
↓
|
||||
Retrieve service credentials (from database)
|
||||
↓
|
||||
Authenticate with each service using retrieved credentials
|
||||
↓
|
||||
Ready to handle requests
|
||||
```
|
||||
|
||||
| Component | Purpose | Port | Binary Location | Credentials From |
|
||||
|-----------|---------|------|-----------------|------------------|
|
||||
| **Vault** | Secrets management | 8200 | `botserver-stack/bin/vault/vault` | Auto-unsealed |
|
||||
| **PostgreSQL** | Primary database | 5432 | `botserver-stack/bin/tables/bin/postgres` | Vault → database |
|
||||
| **MinIO** | Object storage (S3-compatible) | 9000/9001 | `botserver-stack/bin/drive/minio` | Vault → database |
|
||||
| **Zitadel** | Identity/Authentication | 8300 | `botserver-stack/bin/directory/zitadel` | Vault → database |
|
||||
| **Qdrant** | Vector database (embeddings) | 6333 | `botserver-stack/bin/vector_db/qdrant` | Vault → database |
|
||||
| **Valkey** | Cache/Queue (Redis-compatible) | 6379 | `botserver-stack/bin/cache/valkey-server` | Vault → database |
|
||||
| **Llama.cpp** | Local LLM server | 8081 | `botserver-stack/bin/llm/build/bin/llama-server` | Vault → database |
|
||||
|
||||
### 📦 Component Installation System
|
||||
|
||||
|
|
@ -271,9 +340,12 @@ cd botserver-stack/bin/cache && ./valkey-cli ping
|
|||
The script handles BOTH servers properly:
|
||||
1. Stop existing processes cleanly
|
||||
2. Build botserver and botui sequentially (no race conditions)
|
||||
3. Start botserver in background → auto-bootstrap infrastructure
|
||||
4. Start botui in background → proxy to botserver
|
||||
5. Show process IDs and monitoring commands
|
||||
3. Start botserver in background → **automatically starts all infrastructure services (PostgreSQL, Vault, MinIO, Valkey, Qdrant)**
|
||||
4. BotServer retrieves credentials from Vault and authenticates with all services
|
||||
5. Start botui in background → proxy to botserver
|
||||
6. Show process IDs and monitoring commands
|
||||
|
||||
**Infrastructure services are fully automated - no manual configuration required!**
|
||||
|
||||
**Monitor startup:**
|
||||
```bash
|
||||
|
|
@ -282,7 +354,7 @@ tail -f botserver.log botui.log
|
|||
|
||||
**Access:**
|
||||
- Web UI: http://localhost:3000
|
||||
- API: http://localhost:8088
|
||||
- API: http://localhost:9000
|
||||
|
||||
### 📊 Monitor & Debug
|
||||
|
||||
|
|
@ -306,7 +378,7 @@ grep -E " E |W |CLIENT:" botserver.log | tail -20
|
|||
|
||||
```bash
|
||||
cd botserver && cargo run -- --noconsole > ../botserver.log 2>&1 &
|
||||
cd botui && BOTSERVER_URL="http://localhost:8088" cargo run > ../botui.log 2>&1 &
|
||||
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run > ../botui.log 2>&1 &
|
||||
```
|
||||
|
||||
### 🛑 Stop Servers
|
||||
|
|
@ -324,7 +396,7 @@ rm -rf botserver-stack/data/vault botserver-stack/conf/vault/init.json && ./rest
|
|||
|
||||
**Port in use?** Find and kill:
|
||||
```bash
|
||||
lsof -ti:8088 | xargs kill -9
|
||||
lsof -ti:9000 | xargs kill -9
|
||||
lsof -ti:3000 | xargs kill -9
|
||||
```
|
||||
|
||||
|
|
@ -335,8 +407,16 @@ All infrastructure services (PostgreSQL, Vault, Redis, Qdrant, MinIO, etc.) are
|
|||
- **Configurations:** `botserver-stack/conf/`
|
||||
- **Data storage:** `botserver-stack/data/`
|
||||
- **Service logs:** `botserver-stack/logs/` (check here for troubleshooting)
|
||||
- **Credentials:** Stored in Vault, retrieved by botserver at startup
|
||||
|
||||
**Do NOT install or reference global PostgreSQL, Redis, or other services.** When botserver starts, it automatically launches all required stack services. If you encounter service errors, check the individual service logs in `./botserver-stack/logs/[service]/` directories.
|
||||
**Do NOT install or reference global PostgreSQL, Redis, or other services.** When botserver starts, it automatically:
|
||||
1. Launches all required stack services
|
||||
2. Connects to Vault
|
||||
3. Retrieves credentials from the `bot_configuration` database table
|
||||
4. Authenticates with each service using retrieved credentials
|
||||
5. Begins handling requests with authenticated connections
|
||||
|
||||
If you encounter service errors, check the individual service logs in `./botserver-stack/logs/[service]/` directories.
|
||||
|
||||
### UI File Deployment - Production Options
|
||||
|
||||
|
|
@ -393,7 +473,7 @@ See `botserver/deploy/README.md` for deployment scripts.
|
|||
cd botserver && cargo run -- --noconsole
|
||||
|
||||
# Terminal 2: botui
|
||||
cd botui && BOTSERVER_URL="http://localhost:8088" cargo run
|
||||
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run
|
||||
```
|
||||
|
||||
### Build Commands
|
||||
|
|
@ -410,234 +490,11 @@ cargo test -p bottest
|
|||
|
||||
---
|
||||
|
||||
## 🧭 LLM Navigation Guide
|
||||
## 🤖 AI Agent Guidelines
|
||||
|
||||
### Quick Context Jump
|
||||
- [Primary Purpose](#overview) - Unified workspace for AI automation platform
|
||||
- [Crate Structure](#-workspace-structure) - 9 independent crates with shared libraries
|
||||
- [Dependencies](#-component-dependency-graph) - How crates depend on each other
|
||||
- [Quick Start](#quick-start) - Get running in 2 commands
|
||||
- [Error Patterns](#common-error-patterns) - Fix compilation errors efficiently
|
||||
- [Security Rules](#-security-directives---mandatory) - MUST-FOLLOW security patterns
|
||||
- [Code Patterns](#-mandatory-code-patterns) - Required coding conventions
|
||||
- [Testing](#testing-strategy) - How to test changes
|
||||
- [Debugging](#debugging-guide) - Troubleshoot common issues
|
||||
> **For LLM instructions, coding rules, security directives, testing workflows, and error handling patterns, see [AGENTS.md](./AGENTS.md).**
|
||||
|
||||
### Reading This Workspace
|
||||
|
||||
**For LLMs analyzing this codebase:**
|
||||
1. Start with [Component Dependency Graph](#-component-dependency-graph) to understand relationships
|
||||
2. Review [Module Responsibility Matrix](#-module-responsibility-matrix) for what each module does
|
||||
3. Study [Data Flow Patterns](#-data-flow-patterns) to understand execution flow
|
||||
4. Reference [Common Architectural Patterns](#-common-architectural-patterns) before making changes
|
||||
5. Check [Security Rules](#-security-directives---mandatory) - violations are blocking issues
|
||||
6. Follow [Code Patterns](#-mandatory-code-patterns) - consistency is mandatory
|
||||
|
||||
**For Humans working on this codebase:**
|
||||
1. Follow [Error Fixing Workflow](#-error-fixing-workflow) for compilation errors
|
||||
2. Observe [File Size Limits](#-file-size-limits---mandatory) - max 450 lines per file
|
||||
3. Run [Weekly Maintenance Tasks](#-weekly-maintenance-tasks) to keep codebase healthy
|
||||
4. Read project-specific READMEs in [Project-Specific Guidelines](#-project-specific-guidelines)
|
||||
|
||||
## 🧪 Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- **Location**: Each crate has `tests/` directory or inline `#[cfg(test)]` modules
|
||||
- **Naming**: Test functions use `test_` prefix or describe what they test
|
||||
- **Running**: `cargo test -p <crate_name>` or `cargo test` for all
|
||||
|
||||
### Integration Tests
|
||||
- **Location**: `bottest/` crate contains integration tests
|
||||
- **Scope**: Tests full workflows across multiple crates
|
||||
- **Running**: `cargo test -p bottest`
|
||||
- **Database**: Uses test database, automatically migrates on first run
|
||||
|
||||
### Test Utilities Available
|
||||
- **TestAppStateBuilder** (`bottest/src/harness.rs`) - Build test state with mocked components
|
||||
- **TestBot** (`bottest/src/bot/mod.rs`) - Mock bot for testing
|
||||
- **Test Database**: Auto-created, migrations run automatically
|
||||
|
||||
### Coverage Goals
|
||||
- **Critical paths**: 80%+ coverage required
|
||||
- **Error handling**: ALL error paths must have tests
|
||||
- **Security**: All security guards must have tests
|
||||
|
||||
## 🚨 CRITICAL ERROR HANDLING RULE
|
||||
|
||||
**STOP EVERYTHING WHEN ERRORS APPEAR**
|
||||
|
||||
When ANY error appears in logs during startup or operation:
|
||||
1. **IMMEDIATELY STOP** - Do not continue with other tasks
|
||||
2. **IDENTIFY THE ERROR** - Read the full error message and context
|
||||
3. **FIX THE ERROR** - Address the root cause, not symptoms
|
||||
4. **VERIFY THE FIX** - Ensure error is completely resolved
|
||||
5. **ONLY THEN CONTINUE** - Never ignore or work around errors
|
||||
|
||||
**NEVER restart servers to "fix" errors - FIX THE ACTUAL PROBLEM**
|
||||
|
||||
Examples of errors that MUST be fixed immediately:
|
||||
- Database connection errors
|
||||
- Component initialization failures
|
||||
- Service startup errors
|
||||
- Configuration errors
|
||||
- Any error containing "Error:", "Failed:", "Cannot", "Unable"
|
||||
|
||||
## 🐛 Debugging Guide
|
||||
|
||||
### Log Locations
|
||||
|
||||
| Component | Log File | What's Logged |
|
||||
|-----------|----------|---------------|
|
||||
| **botserver** | `botserver.log` | API requests, errors, script execution, **client navigation events** |
|
||||
| **botui** | `botui.log` | UI rendering, WebSocket connections |
|
||||
| **drive_monitor** | In botserver logs with `[drive_monitor]` prefix | File sync, compilation |
|
||||
| **script execution** | In botserver logs with `[ScriptService]` prefix | BASIC compilation, runtime errors |
|
||||
| **client errors** | In botserver logs with `CLIENT:` prefix | JavaScript errors, navigation events |
|
||||
|
||||
### Client-Side Logging
|
||||
|
||||
**Navigation Tracking:** All client-side navigation is logged to botserver.log with `CLIENT:` prefix:
|
||||
```
|
||||
CLIENT:NAVIGATION: click: home -> drive
|
||||
CLIENT:NAVIGATION: hashchange: drive -> chat
|
||||
```
|
||||
|
||||
**Error Reporting:** JavaScript errors automatically appear in server logs:
|
||||
```
|
||||
CLIENT:ERROR: Uncaught TypeError: Cannot read property 'x' of undefined at /suite/js/app.js:123
|
||||
```
|
||||
|
||||
**For LLM Troubleshooting:** ALWAYS check both:
|
||||
1. `botserver.log` - Server errors + client navigation/errors (prefixed with `CLIENT:`)
|
||||
2. `botui.log` - UI server logs
|
||||
|
||||
### USE WEBSITE Feature - Vector DB Context Injection
|
||||
|
||||
**FIXED (v6.2.0+):** The `USE WEBSITE` BASIC command now properly injects vector database embeddings into chat context.
|
||||
|
||||
**How it works:**
|
||||
1. **Preprocessing:** When a `.bas` file containing `USE WEBSITE "https://..."` is compiled, the website is registered for crawling
|
||||
2. **Crawling:** Content is extracted, chunked, and embedded into Qdrant vector DB (collection name: `website_<url_hash>`)
|
||||
3. **Runtime Association:** The compiled `.ast` file contains `USE_WEBSITE()` function call that creates session-website association
|
||||
4. **Context Injection:** During chat, `inject_kb_context()` searches active websites' embeddings and includes relevant chunks in LLM prompt
|
||||
|
||||
**Example BASIC script:**
|
||||
```basic
|
||||
USE WEBSITE "https://docs.pragmatismo.com.br" REFRESH "1h"
|
||||
|
||||
TALK "Hello! I can now answer questions about the documentation."
|
||||
```
|
||||
|
||||
**Database tables involved:**
|
||||
- `session_website_associations` - Links sessions to websites
|
||||
- `website_embeddings` - Stores crawled content vectors in Qdrant
|
||||
|
||||
**Verification:**
|
||||
```sql
|
||||
-- Check if website is associated with session
|
||||
SELECT * FROM session_website_associations WHERE session_id = '<uuid>';
|
||||
|
||||
-- Check if embeddings exist in Qdrant (via HTTP API)
|
||||
curl http://localhost:6333/collections/website_<hash>/points/scroll
|
||||
```
|
||||
|
||||
**Previous Issue:** In earlier versions, `USE WEBSITE` was removed during preprocessing and never executed at runtime, preventing context injection. Now the function call is preserved in the compiled AST.
|
||||
|
||||
### Common Error Messages
|
||||
|
||||
| Error | Meaning | Fix |
|
||||
|-------|---------|-----|
|
||||
| `Session not found` | Invalid session_id in request | Check auth flow, verify session exists in DB |
|
||||
| `Bot not found` | Invalid bot_name or bot_id | Verify bot exists in `bots` table |
|
||||
| `Script compilation error` | BASIC syntax error in .bas file | Check .bas file syntax, look for typos |
|
||||
| `Failed to send TALK message` | WebSocket disconnected | Check client connection, verify web_adapter running |
|
||||
| `Drive sync failed` | S3 connection or permission issue | Verify S3 credentials, check bucket exists |
|
||||
| `unwrap() called on None value` | Panic in production code | MUST FIX - replace with proper error handling |
|
||||
| `Component not responding: <component_name>` | Infrastructure component not accessible | Check component status: `ps aux | grep <component>`. View logs: `tail -f botserver-stack/logs/<component>/`. Restart via ./restart.sh |
|
||||
| `Config key not found: <key>` | Missing configuration in database | Check `bot_configuration` table. Set correct value via API or direct SQL update. |
|
||||
| `403 Forbidden on POST /api/client-errors` | RBAC blocking client error reporting | FIXED in v6.2.0+ - endpoint now allows anonymous access |
|
||||
|
||||
### Useful Debugging Commands
|
||||
|
||||
```bash
|
||||
# Check if botserver is running
|
||||
ps aux | grep botserver
|
||||
|
||||
# View botserver logs in real-time
|
||||
tail -f botserver/logs/botserver.log
|
||||
|
||||
# Check work directory structure
|
||||
ls -la ./work/*.gbai/*/
|
||||
|
||||
# Test database connection
|
||||
cd botserver && cargo run --bin botserver -- --test-db
|
||||
|
||||
# Run specific test with output
|
||||
cargo test -p botserver test_name -- --nocapture
|
||||
|
||||
# Check for memory leaks during compilation
|
||||
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | grep -i error
|
||||
```
|
||||
|
||||
### Troubleshooting Workflows
|
||||
|
||||
**Problem: Script not executing**
|
||||
1. Check if .bas file exists in `./work/{bot_name}.gbai/{bot_name}.gbdialog/`
|
||||
2. Verify file has correct syntax (compile with ScriptService)
|
||||
3. Check logs for compilation errors
|
||||
4. Verify drive_monitor is running and syncing files
|
||||
|
||||
**Problem: WebSocket messages not received**
|
||||
1. Check browser console for WebSocket errors
|
||||
2. Verify session_id is valid in database
|
||||
3. Check web_adapter is registered for session
|
||||
4. Look for TALK execution in botserver logs
|
||||
|
||||
**Problem: Component not starting or crashing**
|
||||
1. Identify the component from error message (e.g., Vault, PostgreSQL, MinIO, Qdrant, Valkey)
|
||||
2. Check if process is running: `ps aux | grep <component_name>`
|
||||
3. Check component logs: `tail -f botserver-stack/logs/<component_name>/`
|
||||
4. Common fixes:
|
||||
- Config error: Check `botserver-stack/conf/<component_name>/` for valid configuration
|
||||
- Port conflict: Ensure no other process using the component's port
|
||||
- Permission error: Check file permissions in `botserver-stack/data/<component_name>/`
|
||||
- Missing binary: Re-run `./reset.sh && ./restart.sh` to reinstall components
|
||||
5. Restart: `./restart.sh`
|
||||
|
||||
**Problem: Component configuration errors**
|
||||
1. All component configs stored in database `bot_configuration` table
|
||||
2. Check current value: `SELECT * FROM bot_configuration WHERE config_key = '<key_name>';`
|
||||
3. Update incorrect config: `UPDATE bot_configuration SET config_value = '<correct_value>' WHERE config_key = '<key_name>';`
|
||||
4. For path configs: Ensure paths are relative to component binary or absolute
|
||||
5. Restart botserver after config changes
|
||||
|
||||
**Problem: File not found errors**
|
||||
1. Check if file exists in expected location
|
||||
2. Verify config paths use correct format (relative/absolute)
|
||||
3. Check file permissions: `ls -la <file_path>`
|
||||
4. For model/data files: Ensure downloaded to `botserver-stack/data/<component>/`
|
||||
|
||||
**Problem: LLM not responding**
|
||||
1. Check LLM API credentials in config
|
||||
2. Verify API key has available quota
|
||||
3. Check network connectivity to LLM provider
|
||||
4. Review request/response logs for API errors
|
||||
|
||||
### Performance Profiling
|
||||
|
||||
```bash
|
||||
# Profile compilation time
|
||||
cargo build --release --timings
|
||||
|
||||
# Profile runtime performance
|
||||
cargo flamegraph --bin botserver
|
||||
|
||||
# Check binary size
|
||||
ls -lh target/release/botserver
|
||||
|
||||
# Memory usage
|
||||
valgrind --leak-check=full target/release/botserver
|
||||
```
|
||||
---
|
||||
|
||||
## 📖 Glossary
|
||||
|
||||
|
|
@ -657,284 +514,7 @@ valgrind --leak-check=full target/release/botserver
|
|||
|
||||
---
|
||||
|
||||
## 🔥 Error Fixing Workflow
|
||||
|
||||
### Mode 1: OFFLINE Batch Fix (PREFERRED)
|
||||
|
||||
When given error output:
|
||||
|
||||
```
|
||||
1. Read ENTIRE error list first
|
||||
2. Group errors by file
|
||||
3. For EACH file with errors:
|
||||
a. View file → understand context
|
||||
b. Fix ALL errors in that file
|
||||
c. Write once with all fixes
|
||||
4. Move to next file
|
||||
5. REPEAT until ALL errors addressed
|
||||
6. ONLY THEN → verify with build/diagnostics
|
||||
```
|
||||
|
||||
**NEVER run cargo build/check/clippy DURING fixing**
|
||||
**Fix ALL errors OFFLINE first, verify ONCE at the end**
|
||||
|
||||
### Mode 2: Interactive Loop
|
||||
|
||||
```
|
||||
LOOP UNTIL (0 warnings AND 0 errors):
|
||||
1. Run diagnostics → pick file with issues
|
||||
2. Read entire file
|
||||
3. Fix ALL issues in that file
|
||||
4. Write file once with all fixes
|
||||
5. Verify with diagnostics
|
||||
6. CONTINUE LOOP
|
||||
END LOOP
|
||||
```
|
||||
|
||||
### Common Error Patterns
|
||||
|
||||
| Error | Fix |
|
||||
|-------|-----|
|
||||
| `expected i64, found u64` | `value as i64` |
|
||||
| `expected Option<T>, found T` | `Some(value)` |
|
||||
| `expected T, found Option<T>` | `value.unwrap_or(default)` |
|
||||
| `cannot multiply f32 by f64` | `f64::from(f32_val) * f64_val` |
|
||||
| `no field X on type Y` | Check struct definition |
|
||||
| `no variant X found` | Check enum definition |
|
||||
| `function takes N arguments` | Match function signature |
|
||||
| `cannot find function` | Add missing function or fix import |
|
||||
| `unused variable` | Delete or use with `..` in patterns |
|
||||
| `unused import` | Delete the import line |
|
||||
| `cannot move out of X because borrowed` | Use scoping `{ }` to limit borrow |
|
||||
|
||||
---
|
||||
|
||||
## 🧠 Memory Management
|
||||
|
||||
When compilation fails due to memory issues (process "Killed"):
|
||||
|
||||
```bash
|
||||
pkill -9 cargo; pkill -9 rustc; pkill -9 botserver
|
||||
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📏 File Size Limits - MANDATORY
|
||||
|
||||
### Maximum 450 Lines Per File
|
||||
|
||||
When a file grows beyond this limit:
|
||||
|
||||
1. **Identify logical groups** - Find related functions
|
||||
2. **Create subdirectory module** - e.g., `handlers/`
|
||||
3. **Split by responsibility:**
|
||||
- `types.rs` - Structs, enums, type definitions
|
||||
- `handlers.rs` - HTTP handlers and routes
|
||||
- `operations.rs` - Core business logic
|
||||
- `utils.rs` - Helper functions
|
||||
- `mod.rs` - Re-exports and configuration
|
||||
4. **Keep files focused** - Single responsibility
|
||||
5. **Update mod.rs** - Re-export all public items
|
||||
|
||||
**NEVER let a single file exceed 450 lines - split proactively at 350 lines**
|
||||
|
||||
### Current Files Requiring Immediate Refactoring
|
||||
|
||||
| File | Lines | Target Split |
|
||||
|------|-------|--------------|
|
||||
| `botserver/src/drive/mod.rs` | 1522 | → 4 files |
|
||||
| `botserver/src/auto_task/app_generator.rs` | 2981 | → 7 files |
|
||||
| `botui/ui/suite/sheet/sheet.js` | 3220 | → 8 files |
|
||||
| `botserver/src/tasks/mod.rs` | 2651 | → 6 files |
|
||||
| `botserver/src/learn/mod.rs` | 2306 | → 5 files |
|
||||
|
||||
See `TODO-refactor1.md` for detailed refactoring plans.
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Continuous Monitoring
|
||||
|
||||
**YOLO Forever Monitoring Pattern:**
|
||||
|
||||
The system includes automated log monitoring to catch errors in real-time:
|
||||
|
||||
```bash
|
||||
# Continuous monitoring (check every 5 seconds)
|
||||
while true; do
|
||||
sleep 5
|
||||
echo "=== Check at $(date +%H:%M:%S) ==="
|
||||
tail -50 botserver.log | grep -E "ERROR|WARN|CLIENT:" | tail -5 || echo "✓ Clean"
|
||||
done
|
||||
```
|
||||
|
||||
**Quick Status Check:**
|
||||
```bash
|
||||
# Check last 200 lines for any issues
|
||||
tail -200 botserver.log | grep -E "ERROR|WARN|CLIENT:" | tail -10
|
||||
|
||||
# Show recent server activity
|
||||
tail -30 botserver.log
|
||||
|
||||
# Check if server is running
|
||||
ps aux | grep botserver | grep -v grep
|
||||
```
|
||||
|
||||
**Monitoring Dashboard:**
|
||||
- **Server Status**: https://localhost:8088 (health endpoint)
|
||||
- **Logs**: `tail -f botserver.log`
|
||||
- **Client Errors**: Look for `CLIENT:` prefix
|
||||
- **Server Errors**: Look for `ERROR` or `WARN` prefixes
|
||||
|
||||
**Status Indicators:**
|
||||
- ✅ **Clean**: No ERROR/WARN/CLIENT: entries in logs
|
||||
- ⚠️ **Warnings**: Non-critical issues that should be reviewed
|
||||
- ❌ **Errors**: Critical issues requiring immediate attention
|
||||
|
||||
**When Errors Appear:**
|
||||
1. Capture the full error context (50 lines before/after)
|
||||
2. Identify the component (server, client, database, etc.)
|
||||
3. Check troubleshooting section for specific fixes
|
||||
4. Update this README with discovered issues and resolutions
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Performance & Size Standards
|
||||
|
||||
### Binary Size Optimization
|
||||
- **Release Profile**: Always maintain `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, `panic = "abort"`.
|
||||
- **Dependencies**:
|
||||
- Run `cargo tree --duplicates` weekly to find and resolve duplicate versions.
|
||||
- Run `cargo machete` to remove unused dependencies.
|
||||
- Use `default-features = false` and explicitly opt-in to needed features.
|
||||
|
||||
### Memory Optimization
|
||||
- **Strings**: Prefer `&str` over `String` where possible. Use `Cow<str>` for conditional ownership.
|
||||
- **Collections**: Use `Vec::with_capacity` when size is known. Consider `SmallVec` for hot paths.
|
||||
- **Allocations**: Minimize heap allocations in hot paths.
|
||||
- **Cloning**: Avoid unnecessary `.clone()` calls. Use references or `Cow` types.
|
||||
|
||||
### Code Quality Issues Found
|
||||
- **955 instances** of `unwrap()`/`expect()` in codebase - ALL must be replaced with proper error handling
|
||||
- **12,973 instances** of excessive `clone()`/`to_string()` calls - optimize for performance
|
||||
- **Test code exceptions**: `unwrap()` allowed in test files only
|
||||
|
||||
### Linting & Code Quality
|
||||
- **Clippy**: Code MUST pass `cargo clippy --all-targets --all-features` with **0 warnings**.
|
||||
- **No Allow**: Do not use `#[allow(clippy::...)]` unless absolutely necessary and documented. Fix the underlying issue.
|
||||
|
||||
---
|
||||
|
||||
## 🔐 Security Directives - MANDATORY
|
||||
|
||||
### Error Handling - NO PANICS IN PRODUCTION
|
||||
|
||||
```rust
|
||||
// ❌ FORBIDDEN
|
||||
value.unwrap()
|
||||
value.expect("message")
|
||||
panic!("error")
|
||||
todo!()
|
||||
unimplemented!()
|
||||
|
||||
// ✅ REQUIRED
|
||||
value?
|
||||
value.ok_or_else(|| Error::NotFound)?
|
||||
value.unwrap_or_default()
|
||||
value.unwrap_or_else(|e| { log::error!("{}", e); default })
|
||||
if let Some(v) = value { ... }
|
||||
match value { Ok(v) => v, Err(e) => return Err(e.into()) }
|
||||
```
|
||||
|
||||
### Command Execution - USE SafeCommand
|
||||
|
||||
```rust
|
||||
// ❌ FORBIDDEN
|
||||
Command::new("some_command").arg(user_input).output()
|
||||
|
||||
// ✅ REQUIRED
|
||||
use crate::security::command_guard::SafeCommand;
|
||||
SafeCommand::new("allowed_command")?
|
||||
.arg("safe_arg")?
|
||||
.execute()
|
||||
```
|
||||
|
||||
### Error Responses - USE ErrorSanitizer
|
||||
|
||||
```rust
|
||||
// ❌ FORBIDDEN
|
||||
Json(json!({ "error": e.to_string() }))
|
||||
format!("Database error: {}", e)
|
||||
|
||||
// ✅ REQUIRED
|
||||
use crate::security::error_sanitizer::log_and_sanitize;
|
||||
let sanitized = log_and_sanitize(&e, "context", None);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, sanitized)
|
||||
```
|
||||
|
||||
### SQL - USE sql_guard
|
||||
|
||||
```rust
|
||||
// ❌ FORBIDDEN
|
||||
format!("SELECT * FROM {}", user_table)
|
||||
|
||||
// ✅ REQUIRED
|
||||
use crate::security::sql_guard::{sanitize_identifier, validate_table_name};
|
||||
let safe_table = sanitize_identifier(&user_table);
|
||||
validate_table_name(&safe_table)?;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ❌ Absolute Prohibitions
|
||||
|
||||
```
|
||||
❌ NEVER use .unwrap() or .expect() in production code (tests OK)
|
||||
❌ NEVER use panic!(), todo!(), unimplemented!()
|
||||
❌ NEVER use Command::new() directly - use SafeCommand
|
||||
❌ NEVER return raw error strings to HTTP clients
|
||||
❌ NEVER use #[allow()] in source code - FIX the code instead
|
||||
❌ NEVER add lint exceptions to Cargo.toml - FIX the code instead
|
||||
❌ NEVER use _ prefix for unused variables - DELETE or USE them
|
||||
❌ NEVER leave unused imports or dead code
|
||||
❌ NEVER add comments - code must be self-documenting
|
||||
❌ NEVER modify Cargo.toml lints section!
|
||||
❌ NEVER use CDN links - all assets must be local
|
||||
❌ NEVER use cargo clean - causes 30min rebuilds, use ./reset.sh for database issues
|
||||
❌ NEVER create .md documentation files without checking botbook/ first - documentation belongs there
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Mandatory Code Patterns
|
||||
|
||||
### Use Self in Impl Blocks
|
||||
```rust
|
||||
impl MyStruct {
|
||||
fn new() -> Self { Self { } } // ✅ Not MyStruct
|
||||
}
|
||||
```
|
||||
|
||||
### Derive Eq with PartialEq
|
||||
```rust
|
||||
#[derive(PartialEq, Eq)] // ✅ Always both
|
||||
struct MyStruct { }
|
||||
```
|
||||
|
||||
### Inline Format Args
|
||||
```rust
|
||||
format!("Hello {name}") // ✅ Not format!("{}", name)
|
||||
```
|
||||
|
||||
### Combine Match Arms
|
||||
```rust
|
||||
match x {
|
||||
A | B => do_thing(), // ✅ Combine identical arms
|
||||
C => other(),
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ UI Architecture (botui + botserver)
|
||||
|
||||
|
|
@ -943,13 +523,13 @@ match x {
|
|||
| Server | Port | Purpose |
|
||||
|--------|------|---------|
|
||||
| **botui** | 3000 | Serves UI files + proxies API to botserver |
|
||||
| **botserver** | 8088 | Backend API + embedded UI fallback |
|
||||
| **botserver** | 9000 | Backend API + embedded UI fallback |
|
||||
|
||||
### How It Works
|
||||
|
||||
```
|
||||
Browser → localhost:3000 → botui (serves HTML/CSS/JS)
|
||||
→ /api/* proxied to botserver:8088
|
||||
→ /api/* proxied to botserver:9000
|
||||
→ /suite/* served from botui/ui/suite/
|
||||
```
|
||||
|
||||
|
|
@ -1104,44 +684,18 @@ Both repositories must be pushed for changes to take effect in production.
|
|||
|
||||
## Development Workflow
|
||||
|
||||
1. Read this README.md (workspace-level rules)
|
||||
2. **BEFORE creating any .md file, search botbook/ for existing documentation**
|
||||
3. Read `<project>/README.md` (project-specific rules)
|
||||
4. Use diagnostics tool to check warnings
|
||||
5. Fix all warnings with full file rewrites
|
||||
6. Verify with diagnostics after each file
|
||||
7. Never suppress warnings with `#[allow()]`
|
||||
1. Read this README.md (workspace structure)
|
||||
2. Read **[AGENTS.md](./AGENTS.md)** (coding rules & workflows)
|
||||
3. **BEFORE creating any .md file, search botbook/ for existing documentation**
|
||||
4. Read `<project>/README.md` (project-specific rules)
|
||||
5. Use diagnostics tool to check warnings
|
||||
6. Fix all warnings with full file rewrites
|
||||
7. Verify with diagnostics after each file
|
||||
8. Never suppress warnings with `#[allow()]`
|
||||
|
||||
---
|
||||
|
||||
## Main Directive
|
||||
|
||||
**LOOP AND COMPACT UNTIL 0 WARNINGS - MAXIMUM PRECISION**
|
||||
|
||||
- 0 warnings
|
||||
- 0 errors
|
||||
- Trust project diagnostics
|
||||
- Respect all rules
|
||||
- No `#[allow()]` in source code
|
||||
- Real code fixes only
|
||||
|
||||
---
|
||||
|
||||
## 🔑 Remember
|
||||
|
||||
- **OFFLINE FIRST** - Fix all errors from list before compiling
|
||||
- **ZERO WARNINGS, ZERO ERRORS** - The only acceptable state
|
||||
- **FIX, DON'T SUPPRESS** - No #[allow()], no Cargo.toml lint exceptions
|
||||
- **SECURITY FIRST** - No unwrap, no raw errors, no direct commands
|
||||
- **READ BEFORE FIX** - Always understand context first
|
||||
- **BATCH BY FILE** - Fix ALL errors in a file at once
|
||||
- **WRITE ONCE** - Single edit per file with all fixes
|
||||
- **VERIFY LAST** - Only compile/diagnostics after ALL fixes
|
||||
- **DELETE DEAD CODE** - Don't keep unused code around
|
||||
- **Version 6.2.0** - Do not change without approval
|
||||
- **GIT WORKFLOW** - ALWAYS push to ALL repositories (github, pragmatismo)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
|
|
|
|||
2
botapp
2
botapp
|
|
@ -1 +1 @@
|
|||
Subproject commit b5ee6e061acf1388aef777ddcd9a2bf84bd6ed57
|
||||
Subproject commit 0b556948f970832e8606f886853793e2bc8dc35c
|
||||
2
botbook
2
botbook
|
|
@ -1 +1 @@
|
|||
Subproject commit 827e011ac05084396aaf2c3098409bf5e02b5cf9
|
||||
Subproject commit 82a236f369e58fe0eda4df704b9ee74a725874e8
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 97778e06dd804be55ff761c7fe2788af0ef50626
|
||||
Subproject commit 35411f4f9e64e54b1039360ab654d537cd2958c9
|
||||
2
botlib
2
botlib
|
|
@ -1 +1 @@
|
|||
Subproject commit 2765fa2ebadc91435e8d90f068b4c96dbb77329b
|
||||
Subproject commit e7caed45a44ab319c64d90f84281dbdbcba905b7
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 22a1954fac2f87a0a13b5e599771273172afc73a
|
||||
Subproject commit e088a8e69eb8fe064bf1510a720d42abe159ab00
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 17a3caebabddbe843c2b7fd93f624b0ccd9c44fb
|
||||
Subproject commit 1727e48307fdb7b54c726af8cd6b12669764e908
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit 125b9d43892e797f5d1dcb1909c18d94e20d7bbc
|
||||
Subproject commit 09f4c876b4ad831005e425546a79a2d4ea461652
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit dd3d8c74dd58a1cc6d6b18d22108819519aaf9c3
|
||||
Subproject commit 3110dd587290047f283300d674ad325f4f9b3046
|
||||
2
bottest
2
bottest
|
|
@ -1 +1 @@
|
|||
Subproject commit 74e761de0dd5105885acf00183223a702a8436df
|
||||
Subproject commit 346120cb0b916f72abd2fdad577ae1c606aba1a2
|
||||
2
botui
2
botui
|
|
@ -1 +1 @@
|
|||
Subproject commit e286d64ce310533d648790057600c9fad5070edf
|
||||
Subproject commit dfb8ae656dd745f1fc4feae5c1deab334f90b783
|
||||
12
package.json
12
package.json
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"name": "gb",
|
||||
"version": "1.0.0",
|
||||
"main": "index.js",
|
||||
"author": "Rodrigo Rodriguez (Pragmatismo) <me@rodrigorodriguez.com>",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.58.1",
|
||||
"@types/node": "^25.2.0"
|
||||
},
|
||||
"scripts": {}
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
import { defineConfig, devices } from '@playwright/test';
|
||||
|
||||
/**
|
||||
* Read environment variables from file.
|
||||
* https://github.com/motdotla/dotenv
|
||||
*/
|
||||
// import dotenv from 'dotenv';
|
||||
// import path from 'path';
|
||||
// dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||
|
||||
/**
|
||||
* See https://playwright.dev/docs/test-configuration.
|
||||
*/
|
||||
export default defineConfig({
|
||||
testDir: './tests',
|
||||
/* Run tests in files in parallel */
|
||||
fullyParallel: true,
|
||||
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
||||
forbidOnly: !!process.env.CI,
|
||||
/* Retry on CI only */
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
/* Opt out of parallel tests on CI. */
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
||||
reporter: 'html',
|
||||
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
||||
use: {
|
||||
/* Base URL to use in actions like `await page.goto('')`. */
|
||||
// baseURL: 'http://localhost:3000',
|
||||
|
||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||
trace: 'on-first-retry',
|
||||
},
|
||||
|
||||
/* Configure projects for major browsers */
|
||||
projects: [
|
||||
{
|
||||
name: 'chromium',
|
||||
use: { ...devices['Desktop Chrome'] },
|
||||
},
|
||||
|
||||
{
|
||||
name: 'firefox',
|
||||
use: { ...devices['Desktop Firefox'] },
|
||||
},
|
||||
|
||||
{
|
||||
name: 'webkit',
|
||||
use: { ...devices['Desktop Safari'] },
|
||||
},
|
||||
|
||||
/* Test against mobile viewports. */
|
||||
// {
|
||||
// name: 'Mobile Chrome',
|
||||
// use: { ...devices['Pixel 5'] },
|
||||
// },
|
||||
// {
|
||||
// name: 'Mobile Safari',
|
||||
// use: { ...devices['iPhone 12'] },
|
||||
// },
|
||||
|
||||
/* Test against branded browsers. */
|
||||
// {
|
||||
// name: 'Microsoft Edge',
|
||||
// use: { ...devices['Desktop Edge'], channel: 'msedge' },
|
||||
// },
|
||||
// {
|
||||
// name: 'Google Chrome',
|
||||
// use: { ...devices['Desktop Chrome'], channel: 'chrome' },
|
||||
// },
|
||||
],
|
||||
|
||||
/* Run your local dev server before starting the tests */
|
||||
// webServer: {
|
||||
// command: 'npm run start',
|
||||
// url: 'http://localhost:3000',
|
||||
// reuseExistingServer: !process.env.CI,
|
||||
// },
|
||||
});
|
||||
1113
prompts/automate-incus.md
Normal file
1113
prompts/automate-incus.md
Normal file
File diff suppressed because it is too large
Load diff
146
prompts/crmex.md
Normal file
146
prompts/crmex.md
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
# Email Campaigns — Feature Plan
|
||||
|
||||
## Existing Foundation (botserver/src/marketing/)
|
||||
- `campaigns.rs` — CrmCampaign model, CRUD handlers
|
||||
- `metrics.rs` — CampaignMetrics, ChannelBreakdown, open/click/conversion rates
|
||||
- `lists.rs` — recipient lists
|
||||
- `templates.rs` — content templates
|
||||
- `triggers.rs` — event-based sending
|
||||
- `email/tracking.rs` — open/click tracking pixels
|
||||
|
||||
---
|
||||
|
||||
## Features to Build
|
||||
|
||||
### 1. Insights Dashboard
|
||||
**What:** Time series views of delivery + engagement metrics per campaign.
|
||||
|
||||
**Data points per time bucket (hourly/daily):**
|
||||
- Sent, delivered, bounced, failed
|
||||
- Opens (unique + total), clicks, replies, unsubscribes
|
||||
- Delivery rate, open rate, click-to-open rate (CTOR)
|
||||
|
||||
**Filters/pivots:**
|
||||
- By mailbox provider (Gmail, Outlook, Yahoo, etc. — parsed from MX/SMTP response)
|
||||
- By sender identity (from address / domain)
|
||||
- By campaign or list
|
||||
- Message search → show exact SMTP response from provider
|
||||
|
||||
**Implementation:**
|
||||
- Add `email_delivery_events` table: `(id, campaign_id, recipient_id, event_type, provider, smtp_response, ts)`
|
||||
- API: `GET /api/campaigns/:id/insights?from=&to=&group_by=provider|identity|day`
|
||||
- UI: HTMX + chart.js time series (local vendor)
|
||||
|
||||
---
|
||||
|
||||
### 2. Advisor Recommendations
|
||||
**What:** Analyze sending config + results and surface actionable fixes.
|
||||
|
||||
**Checks to run:**
|
||||
| Check | Signal | Recommendation |
|
||||
|---|---|---|
|
||||
| SPF/DKIM/DMARC | DNS lookup | "Add missing record" |
|
||||
| Bounce rate > 5% | delivery_events | "Clean list — remove hard bounces" |
|
||||
| Open rate < 15% | metrics | "Improve subject line / send time" |
|
||||
| Spam complaints > 0.1% | FBL data | "Remove complainers immediately" |
|
||||
| Sending from new IP | warmup_schedule | "Follow warmup plan" |
|
||||
| List age > 6 months | list.last_sent | "Re-engagement campaign before bulk send" |
|
||||
|
||||
**Implementation:**
|
||||
- `marketing/advisor.rs` — `AdvisorEngine::analyze(campaign_id) -> Vec<Recommendation>`
|
||||
- API: `GET /api/campaigns/:id/advisor`
|
||||
- Runs automatically after each campaign completes
|
||||
|
||||
---
|
||||
|
||||
### 3. IP Warmup (like OneSignal / Mailchimp)
|
||||
**What:** Gradually increase daily send volume over 4–6 weeks to build sender reputation.
|
||||
|
||||
**Warmup schedule (standard):**
|
||||
| Day | Max emails/day |
|
||||
|---|---|
|
||||
| 1–2 | 50 |
|
||||
| 3–4 | 100 |
|
||||
| 5–7 | 500 |
|
||||
| 8–10 | 1,000 |
|
||||
| 11–14 | 5,000 |
|
||||
| 15–21 | 10,000 |
|
||||
| 22–28 | 50,000 |
|
||||
| 29+ | unlimited |
|
||||
|
||||
**Rules:**
|
||||
- Only send to most engaged subscribers first (opened in last 90 days)
|
||||
- Stop warmup if bounce rate > 3% or complaint rate > 0.1%
|
||||
- Resume next day at same volume if paused
|
||||
|
||||
**Implementation:**
|
||||
- `marketing/warmup.rs` — `WarmupSchedule`, `WarmupEngine::get_daily_limit(ip, day) -> u32`
|
||||
- `warmup_schedules` table: `(id, ip, started_at, current_day, status, paused_reason)`
|
||||
- Scheduler checks warmup limit before each send batch
|
||||
- API: `GET /api/warmup/status`, `POST /api/warmup/start`
|
||||
|
||||
---
|
||||
|
||||
### 4. Optimized Shared Delivery
|
||||
**What:** Auto-select best sending IP based on real-time reputation signals.
|
||||
|
||||
**Logic:**
|
||||
- Track per-IP: bounce rate, complaint rate, delivery rate (last 24h)
|
||||
- Score each IP: `score = delivery_rate - (bounce_rate * 10) - (complaint_rate * 100)`
|
||||
- Route each send to highest-scored IP for that destination provider
|
||||
- Rotate IPs to spread load and preserve reputation
|
||||
|
||||
**Implementation:**
|
||||
- `marketing/ip_router.rs` — `IpRouter::select(destination_domain) -> IpAddr`
|
||||
- `ip_reputation` table: `(ip, provider, bounces, complaints, delivered, window_start)`
|
||||
- Plugs into Stalwart send path via botserver API
|
||||
|
||||
---
|
||||
|
||||
### 5. Modern Email Marketing Features
|
||||
|
||||
| Feature | Description |
|
||||
|---|---|
|
||||
| **Send time optimization** | ML-based per-contact best send time (based on past open history) |
|
||||
| **A/B testing** | Split subject/content, auto-pick winner after N hours |
|
||||
| **Suppression list** | Global unsubscribe/bounce/complaint list, auto-applied to all sends |
|
||||
| **Re-engagement flows** | Auto-trigger "we miss you" to contacts inactive > 90 days |
|
||||
| **Transactional + marketing separation** | Separate IPs/domains for transactional vs bulk |
|
||||
| **One-click unsubscribe** | RFC 8058 `List-Unsubscribe-Post` header on all bulk sends |
|
||||
| **Preview & spam score** | Pre-send SpamAssassin score check |
|
||||
| **Link tracking** | Redirect all links through tracker, record clicks per contact |
|
||||
| **Webhook events** | Push delivery events to external URLs (Stalwart webhook → botserver) |
|
||||
|
||||
---
|
||||
|
||||
## DB Tables to Add
|
||||
|
||||
```sql
|
||||
email_delivery_events (id, campaign_id, recipient_id, event_type, provider, smtp_code, smtp_response, ts)
|
||||
warmup_schedules (id, ip, started_at, current_day, daily_limit, status, paused_reason)
|
||||
ip_reputation (id, ip, provider, delivered, bounced, complained, window_start)
|
||||
advisor_recommendations (id, campaign_id, check_name, severity, message, created_at, dismissed)
|
||||
ab_tests (id, campaign_id, variant_a, variant_b, split_pct, winner, decided_at)
|
||||
suppression_list (id, org_id, email, reason, added_at)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Files to Create
|
||||
```
|
||||
botserver/src/marketing/
|
||||
├── warmup.rs — IP warmup engine + schedule
|
||||
├── advisor.rs — recommendation engine
|
||||
├── ip_router.rs — optimized IP selection
|
||||
├── ab_test.rs — A/B test logic
|
||||
├── suppression.rs — global suppression list
|
||||
└── send_time.rs — send time optimization
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Existing Code to Extend
|
||||
- `marketing/metrics.rs` → add time-series queries + provider breakdown
|
||||
- `marketing/campaigns.rs` → add warmup_enabled, ab_test_id fields
|
||||
- `email/tracking.rs` → already has open/click tracking, extend with provider parsing
|
||||
- `core/shared/schema/` → add new tables above
|
||||
59
prompts/folha.md
Normal file
59
prompts/folha.md
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
# detector - Detecção de Desvios na Folha
|
||||
|
||||
## Objetivo
|
||||
- Bot detector deve usar start.bas para inserir dados via init_folha.bas
|
||||
- detecta.bas deve detectar anomalias nos dados inseridos
|
||||
|
||||
## ✅ Status Atual
|
||||
|
||||
### Correção REM em mod.rs (FEITA)
|
||||
**Arquivo:** `botserver/src/basic/mod.rs` linha ~588-594
|
||||
|
||||
Filtro adicionado para `REM ` e `REM\t` no `compile_tool_script`:
|
||||
```rust
|
||||
!(trimmed.starts_with("PARAM ") ||
|
||||
trimmed.starts_with("PARAM\t") ||
|
||||
trimmed.starts_with("DESCRIPTION ") ||
|
||||
trimmed.starts_with("DESCRIPTION\t") ||
|
||||
trimmed.starts_with("REM ") || // <-- ADICIONADO
|
||||
trimmed.starts_with("REM\t") || // <-- ADICIONADO
|
||||
trimmed.starts_with('\'') ||
|
||||
trimmed.starts_with('#') ||
|
||||
trimmed.is_empty())
|
||||
```
|
||||
|
||||
### Arquivos Envolvidos (VERIFICADOS)
|
||||
- `/opt/gbo/data/detector.gbai/detector.gbdialog/start.bas` ✅ OK
|
||||
- Contém botões de sugestão: detecta e init_folha
|
||||
- `/opt/gbo/data/detector.gbai/detector.gbdialog/init_folha.bas` ✅ OK
|
||||
- 4 INSERT statements para dados de exemplo
|
||||
- `/opt/gbo/data/detector.gbai/detector.gbdialog/detecta.bas` ✅ OK
|
||||
- Usa DETECT keyword
|
||||
- `/opt/gbo/data/detector.gbai/detector.gbdialog/tables.bas` ✅ OK
|
||||
- TABLE folha_salarios definida
|
||||
|
||||
### Botserver (RODANDO)
|
||||
- ✅ Botserver compilado com sucesso
|
||||
- ✅ Botserver rodando em http://localhost:8080
|
||||
- ✅ Health check OK
|
||||
|
||||
## Próximos Passos (Pendentes)
|
||||
|
||||
1. **Testar via navegador** - Necessário instalar Playwright browsers
|
||||
- Navegar para http://localhost:3000/detector
|
||||
- Clicar em "⚙️ Inicializar Dados de Teste"
|
||||
- Verificar se INSERT funciona
|
||||
- Clicar em "🔍 Detectar Desvios na Folha"
|
||||
- Verificar se DETECT funciona
|
||||
|
||||
2. **Verificar se há warnings relevantes**
|
||||
- Alguns warnings de código podem precisar ser corrigidos
|
||||
|
||||
## Cache
|
||||
- AST limpo: `rm ./botserver-stack/data/system/work/detector.gbai/detector.gbdialog/*.ast`
|
||||
- Reiniciado: `./restart.sh`
|
||||
- Botserver: ✅ Rodando
|
||||
|
||||
## Arquivos de Trabalho
|
||||
- Work directory: `./botserver-stack/data/system/work/detector.gbai/detector.gbdialog/`
|
||||
- Todos os arquivos BASIC estão presentes e parecem válidos
|
||||
272
prompts/integratedsuite.md
Normal file
272
prompts/integratedsuite.md
Normal file
|
|
@ -0,0 +1,272 @@
|
|||
# Integrated Suite — Conversational Interface Plan
|
||||
|
||||
> **Pattern:** Every suite app exposes its own `PROMPT.md` + internal tools.
|
||||
> The shared chat bar activates app-specific context when the user is inside that app.
|
||||
> WhatsApp campaigns is the first full example.
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
User (WhatsApp / Suite chat bar)
|
||||
↓
|
||||
BotOrchestrator (core/bot/mod.rs)
|
||||
↓
|
||||
detect active app context
|
||||
↓
|
||||
load app PROMPT.md + app InternalTools
|
||||
↓
|
||||
LLM with tools → tool_executor.rs
|
||||
↓
|
||||
app data / actions
|
||||
```
|
||||
|
||||
### Key existing pieces
|
||||
| File | Role |
|
||||
|---|---|
|
||||
| `core/bot/mod.rs` | `get_session_tools()` + `ToolExecutor::execute_tool_call()` |
|
||||
| `tasks/PROMPT.md` | Pattern for app-level LLM prompt |
|
||||
| `marketing/whatsapp.rs` | WhatsApp campaign send/metrics |
|
||||
| `marketing/campaigns.rs` | Campaign CRUD |
|
||||
| `marketing/lists.rs` | Recipient lists |
|
||||
| `botui/ui/suite/campaigns/` | Campaigns UI |
|
||||
|
||||
---
|
||||
|
||||
## Standard: Every Suite App
|
||||
|
||||
### 1. `PROMPT.md` per app folder
|
||||
Location: `botserver/src/<app>/PROMPT.md`
|
||||
|
||||
```markdown
|
||||
# <App> — Internal Tools Guide
|
||||
|
||||
You are the <App> assistant. When the user is in <App>, you have access to:
|
||||
- tool: list_<entities>
|
||||
- tool: create_<entity>
|
||||
- tool: search_<entity>
|
||||
- tool: <app_specific_action>
|
||||
|
||||
Rules:
|
||||
- Always confirm destructive actions before executing
|
||||
- Show results as structured summaries, not raw JSON
|
||||
- If user uploads a file, parse it and confirm before acting
|
||||
```
|
||||
|
||||
### 2. `tools.rs` per app
|
||||
Location: `botserver/src/<app>/tools.rs`
|
||||
|
||||
Registers `Vec<Tool>` (LLM function-calling schema) + handler mapping.
|
||||
Loaded by `get_session_tools()` when session's active app = this app.
|
||||
|
||||
### 3. App context detection
|
||||
`core/bot/mod.rs` reads `session.active_app` (set by UI via `POST /api/session/context`).
|
||||
Loads `<app>/PROMPT.md` as system prompt prefix + `<app>/tools.rs` tools.
|
||||
|
||||
---
|
||||
|
||||
## WhatsApp Campaigns — Full Conversational Flow
|
||||
|
||||
### Meta Rules (enforced in tools)
|
||||
- Only approved Message Templates for marketing (non-session-initiated)
|
||||
- 24h session window for free-form after user replies
|
||||
- Media: image/video/document via Media Upload API before send
|
||||
- Opt-out: always honor STOP, add to suppression list immediately
|
||||
- Rate: respect per-phone-number rate limits (1000 msg/s business tier)
|
||||
- Template category: MARKETING requires explicit opt-in from recipient
|
||||
|
||||
### Conversation Flow (WhatsApp → campaign creation)
|
||||
|
||||
```
|
||||
User sends to bot number:
|
||||
"I want to send a campaign"
|
||||
↓
|
||||
Bot: "Great! Send me:
|
||||
1. Your contact list (.xlsx or .csv)
|
||||
2. The message text
|
||||
3. An image (optional)
|
||||
4. When to send (or 'now')"
|
||||
↓
|
||||
User uploads contacts.xlsx
|
||||
↓
|
||||
[tool: parse_contact_file]
|
||||
→ extract phone numbers, names
|
||||
→ validate E.164 format
|
||||
→ show preview: "Found 342 contacts. First 3: +55..."
|
||||
↓
|
||||
User sends message text
|
||||
↓
|
||||
[tool: check_template_compliance]
|
||||
→ check if free-form or needs approved template
|
||||
→ if template needed: list available approved templates
|
||||
→ suggest closest match
|
||||
↓
|
||||
User sends image (optional)
|
||||
↓
|
||||
[tool: upload_media]
|
||||
→ upload to Meta Media API
|
||||
→ return media_id
|
||||
↓
|
||||
Bot: "Ready to send to 342 contacts at 14:00 today.
|
||||
Preview: [image] Hello {name}, ...
|
||||
Estimated cost: $X
|
||||
Confirm? (yes/no)"
|
||||
↓
|
||||
User: "yes"
|
||||
↓
|
||||
[tool: create_and_schedule_campaign]
|
||||
→ create campaign record
|
||||
→ apply warmup limit if IP warming
|
||||
→ schedule via TaskScheduler
|
||||
```
|
||||
|
||||
### WhatsApp Campaign Tools (`marketing/whatsapp_tools.rs`)
|
||||
|
||||
```rust
|
||||
// Tool definitions for LLM function calling
|
||||
pub fn whatsapp_campaign_tools() -> Vec<Tool> {
|
||||
vec![
|
||||
Tool::new("parse_contact_file", "Parse uploaded xlsx/csv into contact list"),
|
||||
Tool::new("list_templates", "List approved WhatsApp message templates"),
|
||||
Tool::new("check_template_compliance", "Check if message needs approved template"),
|
||||
Tool::new("upload_media", "Upload image/video to Meta Media API"),
|
||||
Tool::new("preview_campaign", "Show campaign preview with cost estimate"),
|
||||
Tool::new("create_and_schedule_campaign", "Create campaign and schedule send"),
|
||||
Tool::new("get_campaign_status", "Get delivery/read metrics for a campaign"),
|
||||
Tool::new("pause_campaign", "Pause an in-progress campaign"),
|
||||
Tool::new("list_campaigns", "List recent campaigns with metrics"),
|
||||
Tool::new("add_to_suppression", "Add number to opt-out list"),
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### WhatsApp PROMPT.md (`marketing/WHATSAPP_PROMPT.md`)
|
||||
|
||||
```markdown
|
||||
# WhatsApp Campaign Assistant
|
||||
|
||||
You help users create and manage WhatsApp marketing campaigns.
|
||||
|
||||
## Meta Platform Rules (MANDATORY)
|
||||
- Marketing messages MUST use pre-approved templates outside 24h session window
|
||||
- Always check opt-in status before adding to campaign
|
||||
- Honor STOP/unsubscribe immediately via add_to_suppression tool
|
||||
- Never send more than warmup daily limit if IP is warming up
|
||||
- Image must be uploaded via upload_media before referencing in campaign
|
||||
|
||||
## Conversation Style
|
||||
- Guide step by step: contacts → message → media → schedule → confirm
|
||||
- Show cost estimate before confirming
|
||||
- After send: proactively share open/read rates when available
|
||||
|
||||
## File Handling
|
||||
- .xlsx/.csv → use parse_contact_file tool
|
||||
- Images → use upload_media tool
|
||||
- Always confirm parsed data before proceeding
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Integrated Suite Chat Bar — Standard
|
||||
|
||||
### How it works
|
||||
1. User opens any suite app (CRM, Campaigns, Drive, etc.)
|
||||
2. Chat bar at bottom activates with app context
|
||||
3. `POST /api/session/context { app: "campaigns" }` sets `session.active_app`
|
||||
4. BotOrchestrator loads `campaigns/PROMPT.md` + `campaigns/tools.rs`
|
||||
5. User can ask natural language questions or trigger actions
|
||||
|
||||
### Examples per app
|
||||
|
||||
| App | Example query | Tool activated |
|
||||
|---|---|---|
|
||||
| **Campaigns** | "How did last week's campaign perform?" | `get_campaign_metrics` |
|
||||
| **CRM** | "Show deals closing this month" | `list_deals` with filter |
|
||||
| **Drive** | "Find the Q1 report" | `search_files` |
|
||||
| **Tasks** | "Create a task to follow up with Acme" | `create_task` |
|
||||
| **People** | "Who hasn't been contacted in 30 days?" | `list_contacts` with filter |
|
||||
| **Mail** | "Summarize unread emails from clients" | `list_emails` + LLM summary |
|
||||
| **Sheet** | "What's the total revenue in column D?" | `query_sheet` |
|
||||
| **Learn** | "What does our refund policy say?" | `search_kb` |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1 — Infrastructure (1 sprint)
|
||||
- [ ] `core/bot/mod.rs` — read `session.active_app`, load app PROMPT + tools
|
||||
- [ ] `core/tool_context.rs` — app tool registry: `register_app_tools(app_name) -> Vec<Tool>`
|
||||
- [ ] `POST /api/session/context` — set active app from UI
|
||||
- [ ] Suite chat bar UI component (`botui/ui/suite/partials/chatbar.html`)
|
||||
|
||||
### Phase 2 — WhatsApp Campaigns (1 sprint)
|
||||
- [ ] `marketing/whatsapp_tools.rs` — 10 tools above
|
||||
- [ ] `marketing/WHATSAPP_PROMPT.md`
|
||||
- [ ] `marketing/file_parser.rs` — xlsx/csv → contact list
|
||||
- [ ] Meta warmup enforcement in send path
|
||||
- [ ] Conversational campaign creation flow (state machine in session)
|
||||
|
||||
### Phase 3 — App-by-app rollout (1 app/sprint)
|
||||
Priority order based on value:
|
||||
1. CRM (deals, contacts, pipeline queries)
|
||||
2. Campaigns (email + WhatsApp)
|
||||
3. Tasks (create, assign, status)
|
||||
4. Drive (search, summarize docs)
|
||||
5. Mail (summarize, draft reply)
|
||||
6. People (segment, find contacts)
|
||||
7. Sheet (query, calculate)
|
||||
8. Learn (KB search)
|
||||
|
||||
### Phase 4 — Cross-app intelligence
|
||||
- [ ] Global search across all apps via single query
|
||||
- [ ] "What happened today?" — aggregates activity across CRM + Mail + Tasks
|
||||
- [ ] Proactive suggestions: "You have 3 deals closing this week and no follow-up tasks"
|
||||
|
||||
---
|
||||
|
||||
## File Structure to Create
|
||||
|
||||
```
|
||||
botserver/src/
|
||||
├── marketing/
|
||||
│ ├── whatsapp_tools.rs ← NEW: LLM tool definitions + handlers
|
||||
│ ├── WHATSAPP_PROMPT.md ← NEW: WhatsApp assistant system prompt
|
||||
│ ├── file_parser.rs ← NEW: xlsx/csv → contacts
|
||||
│ └── warmup.rs ← NEW: (from campaigns.md plan)
|
||||
├── core/
|
||||
│ ├── tool_registry.rs ← NEW: app → tools mapping
|
||||
│ └── bot/
|
||||
│ └── app_context.rs ← NEW: load app prompt + tools per session
|
||||
├── crm/
|
||||
│ ├── tools.rs ← NEW
|
||||
│ └── PROMPT.md ← NEW
|
||||
├── tasks/
|
||||
│ └── tools.rs ← NEW (PROMPT.md exists)
|
||||
└── <each app>/
|
||||
├── tools.rs ← NEW per app
|
||||
└── PROMPT.md ← NEW per app
|
||||
|
||||
botui/ui/suite/
|
||||
└── partials/
|
||||
└── chatbar.html ← NEW: shared chat bar component
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Chat Bar UI (`partials/chatbar.html`)
|
||||
|
||||
```html
|
||||
<div id="suite-chatbar" class="chatbar">
|
||||
<div id="chatbar-messages" hx-ext="ws" ws-connect="/ws/suite-chat"></div>
|
||||
<form ws-send>
|
||||
<input type="hidden" name="app_context" value="{{ active_app }}">
|
||||
<input type="file" id="chatbar-file" name="file" accept=".xlsx,.csv,.png,.jpg,.pdf" style="display:none">
|
||||
<button type="button" onclick="document.getElementById('chatbar-file').click()">📎</button>
|
||||
<input type="text" name="message" placeholder="Ask anything about {{ active_app }}...">
|
||||
<button type="submit">→</button>
|
||||
</form>
|
||||
</div>
|
||||
```
|
||||
|
||||
File uploads go to `POST /api/suite/upload` → stored in Drive → media_id passed to tool.
|
||||
434
prompts/switcher.md
Normal file
434
prompts/switcher.md
Normal file
|
|
@ -0,0 +1,434 @@
|
|||
# SWITCHER Feature - Response Format Modifiers
|
||||
|
||||
## Overview
|
||||
Add a switcher interface that allows users to toggle response modifiers that influence how the AI generates responses. Unlike suggestions (which are one-time actions), switchers are persistent toggles that remain active until deactivated.
|
||||
|
||||
## Location
|
||||
`botui/ui/suite/chat/` - alongside existing suggestion buttons
|
||||
|
||||
## Syntax
|
||||
|
||||
### Standard Switcher (predefined prompt)
|
||||
```
|
||||
ADD SWITCHER "tables" AS "Tabelas"
|
||||
```
|
||||
|
||||
### Custom Switcher (with custom prompt)
|
||||
```
|
||||
ADD SWITCHER "sempre mostrar 10 perguntas" AS "Mostrar Perguntas"
|
||||
```
|
||||
|
||||
## What Switcher Does
|
||||
|
||||
The switcher:
|
||||
1. **Injects the prompt** into every LLM request
|
||||
2. **The prompt** can be:
|
||||
- **Standard**: References a predefined prompt by ID (`"tables"`, `"cards"`, etc.)
|
||||
- **Custom**: Any custom instruction string (`"sempre mostrar 10 perguntas"`)
|
||||
3. **Influences** the AI response format
|
||||
4. **Persists** until toggled OFF
|
||||
|
||||
## Available Standard Switchers
|
||||
|
||||
| ID | Label | Color | Description |
|
||||
|----|--------|--------|-------------|
|
||||
| tables | Tabelas | #4CAF50 | Format responses as tables |
|
||||
| infographic | Infográfico | #2196F3 | Visual, graphical representations |
|
||||
| cards | Cards | #FF9800 | Card-based layout |
|
||||
| list | Lista | #9C27B0 | Bulleted lists |
|
||||
| comparison | Comparação | #E91E63 | Side-by-side comparisons |
|
||||
| timeline | Timeline | #00BCD4 | Chronological ordering |
|
||||
| markdown | Markdown | #607D8B | Standard markdown |
|
||||
| chart | Gráfico | #F44336 | Charts and diagrams |
|
||||
|
||||
## Predefined Prompts (Backend)
|
||||
|
||||
Each standard ID maps to a predefined prompt in the backend:
|
||||
|
||||
```
|
||||
ID: tables
|
||||
Prompt: "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML usando <table>, <thead>, <tbody>, <tr>, <th>, <td>. Cada dado deve ser uma célula. Use cabeçalhos claros na primeira linha. Se houver dados numéricos, alinhe à direita. Se houver texto, alinhe à esquerda. Use cores sutis em linhas alternadas (nth-child). NÃO use markdown tables, use HTML puro."
|
||||
|
||||
ID: infographic
|
||||
Prompt: "REGRAS DE FORMATO: Crie representações visuais HTML usando SVG, progress bars, stat cards, e elementos gráficos. Use elementos como: <svg> para gráficos, <div style="width:X%;background:color"> para barras de progresso, ícones emoji, badges coloridos. Organize informações visualmente com grids, flexbox, e espaçamento. Inclua legendas e rótulos visuais claros."
|
||||
|
||||
ID: cards
|
||||
Prompt: "REGRAS DE FORMATO: Retorne informações em formato de cards HTML. Cada card deve ter: <div class="card" style="border:1px solid #ddd;border-radius:8px;padding:16px;margin:8px;box-shadow:0 2px 4px rgba(0,0,0,0.1)">. Dentro do card use: título em <h3> ou <strong>, subtítulo em <p> style="color:#666", ícone emoji ou ícone SVG no topo, badges de status. Organize cards em grid usando display:grid ou flex-wrap."
|
||||
|
||||
ID: list
|
||||
Prompt: "REGRAS DE FORMATO: Use apenas listas HTML: <ul> para bullets e <ol> para números numerados. Cada item em <li>. Use sublistas aninhadas quando apropriado. NÃO use parágrafos de texto, converta tudo em itens de lista. Adicione ícones emoji no início de cada <li> quando possível. Use classes CSS para estilização: .list-item, .sub-list."
|
||||
|
||||
ID: comparison
|
||||
Prompt: "REGRAS DE FORMATO: Crie comparações lado a lado em HTML. Use grid de 2 colunas: <div style="display:grid;grid-template-columns:1fr 1fr;gap:20px">. Cada lado em uma <div class="comparison-side"> com borda colorida distinta. Use headers claros para cada lado. Adicione seção de "Diferenças Chave" com bullet points. Use cores contrastantes para cada lado (ex: azul vs laranja). Inclua tabela de comparação resumida no final."
|
||||
|
||||
ID: timeline
|
||||
Prompt: "REGRAS DE FORMATO: Organize eventos cronologicamente em formato de timeline HTML. Use <div class="timeline"> com border-left vertical. Cada evento em <div class="timeline-item"> com: data em <span class="timeline-date" style="font-weight:bold;color:#666">, título em <h3>, descrição em <p>. Adicione círculo indicador na timeline line. Ordene do mais antigo para o mais recente. Use espaçamento claro entre eventos."
|
||||
|
||||
ID: markdown
|
||||
Prompt: "REGRAS DE FORMATO: Use exclusivamente formato Markdown padrão. Sintaxe permitida: **negrito**, *itálico*, `inline code`, ```bloco de código```, # cabeçalhos, - bullets, 1. números, [links](url), , | tabela | markdown |. NÃO use HTML tags exceto para blocos de código. Siga estritamente a sintaxe CommonMark."
|
||||
|
||||
ID: chart
|
||||
Prompt: "REGRAS DE FORMATO: Crie gráficos e diagramas em HTML SVG. Use elementos SVG: <svg width="X" height="Y">, <line> para gráficos de linha, <rect> para gráficos de barra, <circle> para gráficos de pizza, <path> para gráficos de área. Inclua eixos com labels, grid lines, legendas. Use cores distintas para cada série de dados (ex: vermelho, azul, verde). Adicione tooltips com valores ao hover. Se o usuário pedir gráfico de pizza com "pizza vermelha", use fill="#FF0000" no SVG."
|
||||
```
|
||||
|
||||
## UI Design
|
||||
|
||||
### HTML Structure
|
||||
```html
|
||||
<div class="switchers-container" id="switchers">
|
||||
<div class="switchers-label">Formato:</div>
|
||||
<div class="switchers-chips" id="switchersChips">
|
||||
<!-- Switcher chips will be rendered here -->
|
||||
</div>
|
||||
</div>
|
||||
```
|
||||
|
||||
### Placement
|
||||
Position the switchers container **above** the suggestions container:
|
||||
```html
|
||||
<footer>
|
||||
<div class="switchers-container" id="switchers"></div>
|
||||
<div class="suggestions-container" id="suggestions"></div>
|
||||
<!-- ... existing form ... -->
|
||||
</footer>
|
||||
```
|
||||
|
||||
### CSS Styling
|
||||
|
||||
#### Container
|
||||
```css
|
||||
.switchers-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
padding: 8px 16px;
|
||||
flex-wrap: wrap;
|
||||
background: rgba(0, 0, 0, 0.02);
|
||||
border-top: 1px solid rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
|
||||
.switchers-label {
|
||||
font-size: 13px;
|
||||
font-weight: 600;
|
||||
color: #666;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.5px;
|
||||
}
|
||||
```
|
||||
|
||||
#### Switcher Chips (Toggle Buttons)
|
||||
```css
|
||||
.switchers-chips {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.switcher-chip {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
padding: 6px 12px;
|
||||
border-radius: 20px;
|
||||
border: 2px solid transparent;
|
||||
font-size: 13px;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
background: rgba(0, 0, 0, 0.05);
|
||||
color: #666;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.switcher-chip:hover {
|
||||
background: rgba(0, 0, 0, 0.08);
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
.switcher-chip.active {
|
||||
border-color: currentColor;
|
||||
background: currentColor;
|
||||
color: white;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
|
||||
.switcher-chip-icon {
|
||||
font-size: 14px;
|
||||
}
|
||||
```
|
||||
|
||||
## JavaScript Implementation
|
||||
|
||||
### State Management
|
||||
```javascript
|
||||
// Track active switchers
|
||||
var activeSwitchers = new Set();
|
||||
|
||||
// Switcher definitions (from ADD SWITCHER commands in start.bas)
|
||||
var switcherDefinitions = [
|
||||
{
|
||||
id: 'tables',
|
||||
label: 'Tabelas',
|
||||
icon: '📊',
|
||||
color: '#4CAF50'
|
||||
},
|
||||
{
|
||||
id: 'infographic',
|
||||
label: 'Infográfico',
|
||||
icon: '📈',
|
||||
color: '#2196F3'
|
||||
},
|
||||
{
|
||||
id: 'cards',
|
||||
label: 'Cards',
|
||||
icon: '🃏',
|
||||
color: '#FF9800'
|
||||
},
|
||||
{
|
||||
id: 'list',
|
||||
label: 'Lista',
|
||||
icon: '📋',
|
||||
color: '#9C27B0'
|
||||
},
|
||||
{
|
||||
id: 'comparison',
|
||||
label: 'Comparação',
|
||||
icon: '⚖️',
|
||||
color: '#E91E63'
|
||||
},
|
||||
{
|
||||
id: 'timeline',
|
||||
label: 'Timeline',
|
||||
icon: '📅',
|
||||
color: '#00BCD4'
|
||||
},
|
||||
{
|
||||
id: 'markdown',
|
||||
label: 'Markdown',
|
||||
icon: '📝',
|
||||
color: '#607D8B'
|
||||
},
|
||||
{
|
||||
id: 'chart',
|
||||
label: 'Gráfico',
|
||||
icon: '📉',
|
||||
color: '#F44336'
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
### Render Switchers
|
||||
```javascript
|
||||
function renderSwitchers() {
|
||||
var container = document.getElementById("switcherChips");
|
||||
if (!container) return;
|
||||
|
||||
container.innerHTML = switcherDefinitions.map(function(sw) {
|
||||
var isActive = activeSwitchers.has(sw.id);
|
||||
return (
|
||||
'<div class="switcher-chip' + (isActive ? ' active' : '') + '" ' +
|
||||
'data-switch-id="' + sw.id + '" ' +
|
||||
'style="--switcher-color: ' + sw.color + '; ' +
|
||||
(isActive ? 'color: ' + sw.color + ' background: ' + sw.color + '; ' : '') +
|
||||
'">' +
|
||||
'<span class="switcher-chip-icon">' + sw.icon + '</span>' +
|
||||
'<span>' + sw.label + '</span>' +
|
||||
'</div>'
|
||||
);
|
||||
}).join('');
|
||||
|
||||
// Add click handlers
|
||||
container.querySelectorAll('.switcher-chip').forEach(function(chip) {
|
||||
chip.addEventListener('click', function() {
|
||||
toggleSwitcher(this.getAttribute('data-switch-id'));
|
||||
});
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Toggle Handler
|
||||
```javascript
|
||||
function toggleSwitcher(switcherId) {
|
||||
if (activeSwitchers.has(switcherId)) {
|
||||
activeSwitchers.delete(switcherId);
|
||||
} else {
|
||||
activeSwitchers.add(switcherId);
|
||||
}
|
||||
renderSwitchers();
|
||||
}
|
||||
```
|
||||
|
||||
### Message Enhancement
|
||||
When sending a user message, prepend active switcher prompts:
|
||||
|
||||
```javascript
|
||||
function sendMessage(messageContent) {
|
||||
// ... existing code ...
|
||||
|
||||
var content = messageContent || input.value.trim();
|
||||
if (!content) return;
|
||||
|
||||
// Prepend active switcher prompts
|
||||
var enhancedContent = content;
|
||||
if (activeSwitchers.size > 0) {
|
||||
// Get prompts for active switchers from backend
|
||||
var activePrompts = [];
|
||||
activeSwitchers.forEach(function(id) {
|
||||
// Backend has predefined prompts for each ID
|
||||
activePrompts.push(getSwitcherPrompt(id));
|
||||
});
|
||||
|
||||
// Inject prompts before user message
|
||||
if (activePrompts.length > 0) {
|
||||
enhancedContent = activePrompts.join('\n\n') + '\n\n---\n\n' + content;
|
||||
}
|
||||
}
|
||||
|
||||
// Send enhanced content
|
||||
addMessage("user", content);
|
||||
|
||||
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||
ws.send(JSON.stringify({
|
||||
bot_id: currentBotId,
|
||||
user_id: currentUserId,
|
||||
session_id: currentSessionId,
|
||||
channel: "web",
|
||||
content: enhancedContent,
|
||||
message_type: MessageType.USER,
|
||||
timestamp: new Date().toISOString(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
function getSwitcherPrompt(switcherId) {
|
||||
// Get predefined prompt from backend or API
|
||||
// For example, tables ID maps to:
|
||||
// "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML..."
|
||||
var switcher = switcherDefinitions.find(function(s) { return s.id === switcherId; });
|
||||
if (!switcher) return "";
|
||||
|
||||
// This could be fetched from backend or stored locally
|
||||
return SWITCHER_PROMPTS[switcherId] || "";
|
||||
}
|
||||
```
|
||||
|
||||
## Bot Integration (start.bas)
|
||||
|
||||
The bot receives the switcher prompt injected into the user message and simply passes it to the LLM.
|
||||
|
||||
### Example in start.bas
|
||||
|
||||
```basic
|
||||
REM Switcher prompts are automatically injected by frontend
|
||||
REM Just pass user_input to LLM - no parsing needed!
|
||||
|
||||
REM If user types: "mostra os cursos"
|
||||
REM And "Tabelas" switcher is active
|
||||
REM Frontend sends: "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML... --- mostra os cursos"
|
||||
|
||||
REM Bot passes directly to LLM:
|
||||
response$ = CALL_LLM(user_input)
|
||||
|
||||
REM The LLM will follow the REGRAS DE FORMATO instructions
|
||||
```
|
||||
|
||||
### Multiple Active Switchers
|
||||
|
||||
When multiple switchers are active, all prompts are injected:
|
||||
|
||||
```basic
|
||||
REM Frontend injects multiple REGRAS DE FORMATO blocks
|
||||
REM Example with "Tabelas" and "Gráfico" active:
|
||||
REM
|
||||
REM "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML...
|
||||
REM REGRAS DE FORMATO: Crie gráficos e diagramas em HTML SVG...
|
||||
REM ---
|
||||
REM mostra os dados de vendas"
|
||||
|
||||
REM Bot passes to LLM:
|
||||
response$ = CALL_LLM(user_input)
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. ✅ Create prompts/switcher.md (this file)
|
||||
2. ⬜ Define predefined prompts in backend (map IDs to prompt strings)
|
||||
3. ⬜ Add HTML structure to chat.html (switchers container)
|
||||
4. ⬜ Add CSS styles to chat.css (switcher chip styles)
|
||||
5. ⬜ Add switcher definitions to chat.js
|
||||
6. ⬜ Implement renderSwitchers() function
|
||||
7. ⬜ Implement toggleSwitcher() function
|
||||
8. ⬜ Modify sendMessage() to prepend switcher prompts
|
||||
9. ⬜ Update salesianos bot start.bas to use ADD SWITCHER commands
|
||||
10. ⬜ Test locally with all switcher options
|
||||
11. ⬜ Verify multiple switchers can be active simultaneously
|
||||
12. ⬜ Test persistence across page refreshes (optional - localStorage)
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Switchers appear above suggestions
|
||||
- [ ] Switchers are colorful and match their defined colors
|
||||
- [ ] Clicking a switcher toggles it on/off
|
||||
- [ ] Multiple switchers can be active simultaneously
|
||||
- [ ] Active switchers have distinct visual state (border, background, shadow)
|
||||
- [ ] Formatted responses match the selected format
|
||||
- [ ] Toggling off removes the format modifier
|
||||
- [ ] Works with empty active switchers (normal response)
|
||||
- [ ] Works in combination with suggestions
|
||||
- [ ] Responsive design on mobile devices
|
||||
|
||||
## Files to Modify
|
||||
|
||||
1. `botui/ui/suite/chat/chat.html` - Add switcher container HTML
|
||||
2. `botui/ui/suite/chat/chat.css` - Add switcher styles
|
||||
3. `botui/ui/suite/chat/chat.js` - Add switcher logic
|
||||
4. `botserver/bots/salesianos/start.bas` - Add ADD SWITCHER commands
|
||||
|
||||
## Example start.bas
|
||||
|
||||
```basic
|
||||
USE_WEBSITE("https://salesianos.br", "30d")
|
||||
|
||||
USE KB "carta"
|
||||
USE KB "proc"
|
||||
|
||||
USE TOOL "inscricao"
|
||||
USE TOOL "consultar_inscricao"
|
||||
USE TOOL "agendamento_visita"
|
||||
USE TOOL "informacoes_curso"
|
||||
USE TOOL "documentos_necessarios"
|
||||
USE TOOL "contato_secretaria"
|
||||
USE TOOL "calendario_letivo"
|
||||
|
||||
ADD_SUGGESTION_TOOL "inscricao" AS "Fazer Inscrição"
|
||||
ADD_SUGGESTION_TOOL "consultar_inscricao" AS "Consultar Inscrição"
|
||||
ADD_SUGGESTION_TOOL "agendamento_visita" AS "Agendar Visita"
|
||||
ADD_SUGGESTION_TOOL "informacoes_curso" AS "Informações de Cursos"
|
||||
ADD_SUGGESTION_TOOL "documentos_necessarios" AS "Documentos Necessários"
|
||||
ADD_SUGGESTION_TOOL "contato_secretaria" AS "Falar com Secretaria"
|
||||
ADD_SUGGESTION_TOOL "segunda_via" AS "Segunda Via de Boleto"
|
||||
ADD_SUGGESTION_TOOL "calendario_letivo" AS "Calendário Letivo"
|
||||
ADD_SUGGESTION_TOOL "outros" AS "Outros"
|
||||
|
||||
ADD SWITCHER "tables" AS "Tabelas"
|
||||
ADD SWITCHER "infographic" AS "Infográfico"
|
||||
ADD SWITCHER "cards" AS "Cards"
|
||||
ADD SWITCHER "list" AS "Lista"
|
||||
ADD SWITCHER "comparison" AS "Comparação"
|
||||
ADD SWITCHER "timeline" AS "Timeline"
|
||||
ADD SWITCHER "markdown" AS "Markdown"
|
||||
ADD SWITCHER "chart" AS "Gráfico"
|
||||
|
||||
TALK "Olá! Sou o assistente virtual da Escola Salesiana. Como posso ajudá-lo hoje com inscrições, visitas, informações sobre cursos, documentos ou calendário letivo? Você pode também escolher formatos de resposta acima da caixa de mensagem."
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Switchers are **persistent** until deactivated
|
||||
- Multiple switchers can be active at once
|
||||
- Switcher prompts are prepended to user messages with "---" separator
|
||||
- The backend (LLM) should follow these format instructions
|
||||
- UI should provide clear visual feedback for active switchers
|
||||
- Color coding helps users quickly identify active formats
|
||||
- Standard switchers use predefined prompts in backend
|
||||
- Custom switchers allow any prompt string to be injected
|
||||
154
prompts/usekb2.md
Normal file
154
prompts/usekb2.md
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
# USE KB 2.0: Group-Based Knowledge Base Access
|
||||
|
||||
## Overview
|
||||
Modify the USE KB keyword to respect user group permissions, ensuring that THINK KB queries only return answers from knowledge base folders that belong to groups the logged-in user is a member of.
|
||||
|
||||
## Current Architecture
|
||||
|
||||
### USE KB Flow
|
||||
1. User executes `USE KB "kb_name"` in BASIC script
|
||||
2. `use_kb.rs:add_kb_to_session()` checks if KB exists in `kb_collections`
|
||||
3. Creates default KB entry if not found
|
||||
4. Adds association to `session_kb_associations` table
|
||||
5. KB becomes active for the session
|
||||
|
||||
### THINK KB Flow
|
||||
1. User executes `THINK KB "query"`
|
||||
2. `think_kb.rs:think_kb_search()` gets all active KBs from `session_kb_associations`
|
||||
3. For each active KB, calls `KnowledgeBaseManager.search()` on its Qdrant collection
|
||||
4. Returns combined results from all active KBs
|
||||
|
||||
### Group System
|
||||
- Groups stored in `rbac_groups` table
|
||||
- User membership in `rbac_user_groups` table
|
||||
- Group permissions via `rbac_group_roles` table
|
||||
|
||||
## Proposed Changes
|
||||
|
||||
### 1. Database Schema Changes
|
||||
|
||||
Add new table `kb_group_associations`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE kb_group_associations (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
kb_id UUID NOT NULL REFERENCES kb_collections(id) ON DELETE CASCADE,
|
||||
group_id UUID NOT NULL REFERENCES rbac_groups(id) ON DELETE CASCADE,
|
||||
granted_by UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||
granted_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(kb_id, group_id)
|
||||
);
|
||||
```
|
||||
|
||||
Migration file: `botserver/migrations/6.2.0-01-kb-groups/up.sql`
|
||||
|
||||
### 2. Backend Logic Changes
|
||||
|
||||
#### Modify `think_kb_search()` in `think_kb.rs`
|
||||
- Add user group lookup before searching
|
||||
- Filter active KBs to only those accessible by user's groups
|
||||
- Allow access if KB has no group associations (public KBs) OR user is in associated groups
|
||||
|
||||
```rust
|
||||
async fn think_kb_search(
|
||||
kb_manager: Arc<KnowledgeBaseManager>,
|
||||
db_pool: DbPool,
|
||||
session_id: Uuid,
|
||||
bot_id: Uuid,
|
||||
user_id: Uuid, // Add user_id parameter
|
||||
query: &str,
|
||||
) -> Result<serde_json::Value, String> {
|
||||
// Get user's groups
|
||||
let user_groups = get_user_groups(&db_pool, user_id)?;
|
||||
|
||||
// Get active KBs filtered by groups
|
||||
let accessible_kbs = get_accessible_kbs_for_session(&db_pool, session_id, &user_groups)?;
|
||||
|
||||
// Search only accessible KBs
|
||||
// ... rest of search logic
|
||||
}
|
||||
```
|
||||
|
||||
#### Add `get_accessible_kbs_for_session()` function
|
||||
```rust
|
||||
fn get_accessible_kbs_for_session(
|
||||
conn_pool: &DbPool,
|
||||
session_id: Uuid,
|
||||
user_groups: &[String],
|
||||
) -> Result<Vec<(String, String, String)>, String> {
|
||||
// Query that joins session_kb_associations with kb_group_associations
|
||||
// Returns KBs where group_id IS NULL (public) OR group_id IN user_groups
|
||||
}
|
||||
```
|
||||
|
||||
#### Modify `add_kb_to_session()` in `use_kb.rs`
|
||||
- Add optional group access check
|
||||
- Allow USE KB if user has access to the KB's groups
|
||||
|
||||
### 3. API Changes
|
||||
|
||||
Add new endpoints in `rbac.rs` for KB-group management:
|
||||
|
||||
```rust
|
||||
// Assign KB to group
|
||||
POST /api/rbac/kbs/{kb_id}/groups/{group_id}
|
||||
|
||||
// Remove KB from group
|
||||
DELETE /api/rbac/kbs/{kb_id}/groups/{group_id}
|
||||
|
||||
// Get groups for KB
|
||||
GET /api/rbac/kbs/{kb_id}/groups
|
||||
|
||||
// Get KBs accessible by user
|
||||
GET /api/rbac/users/{user_id}/accessible-kbs
|
||||
```
|
||||
|
||||
### 4. Frontend Changes
|
||||
|
||||
#### Update `botui/ui/suite/admin/groups.html`
|
||||
- Add "Knowledge Bases" tab to group detail panel
|
||||
- Show list of KBs assigned to the group
|
||||
- Allow adding/removing KB assignments
|
||||
|
||||
#### Update `botui/ui/suite/drive/drive.html`
|
||||
- Add group visibility indicators for KB folders
|
||||
- Show which groups have access to each KB
|
||||
|
||||
### 5. Migration Strategy
|
||||
|
||||
1. Create new migration for `kb_group_associations` table
|
||||
2. Run migration to create table
|
||||
3. Assign existing KBs to default groups (e.g., "all_users" group)
|
||||
4. Update application code
|
||||
5. Deploy and test
|
||||
|
||||
### 6. Backward Compatibility
|
||||
|
||||
- Existing KBs without group associations remain public
|
||||
- Existing USE KB calls continue to work
|
||||
- THINK KB will filter results based on new permissions
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. ✅ Database migration for kb_group_associations
|
||||
2. ✅ Modify think_kb_search to accept user_id and filter by groups
|
||||
3. ✅ Update THINK KB keyword registration to pass user_id
|
||||
4. ✅ Add group access check to USE KB
|
||||
5. ✅ Add API endpoints for KB-group management
|
||||
6. ✅ Update admin UI for group-KB assignment
|
||||
7. ✅ Update drive UI to show group access
|
||||
8. ✅ Add tests for group-based access control
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- All KB access checks must happen at the database level
|
||||
- No client-side filtering of search results
|
||||
- Group membership verified on each request
|
||||
- Audit logging for KB access attempts
|
||||
|
||||
## Testing
|
||||
|
||||
- Unit tests for group access functions
|
||||
- Integration tests for THINK KB with group filtering
|
||||
- UI tests for admin group-KB management
|
||||
- End-to-end tests with different user group scenarios
|
||||
11
reset.sh
11
reset.sh
|
|
@ -1 +1,10 @@
|
|||
rm -rf botserver-stack/ ./work/ .env
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Cleaning up..."
|
||||
rm -rf botserver-stack/ ./work/ .env
|
||||
|
||||
echo "Starting services..."
|
||||
./restart.sh
|
||||
|
||||
echo "Reset complete!"
|
||||
|
|
|
|||
32
restart.ps1
Normal file
32
restart.ps1
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
$ErrorActionPreference = "Continue"
|
||||
|
||||
Write-Host "Stopping..."
|
||||
Stop-Process -Name "botserver" -Force -ErrorAction SilentlyContinue
|
||||
Stop-Process -Name "botui" -Force -ErrorAction SilentlyContinue
|
||||
Stop-Process -Name "rustc" -Force -ErrorAction SilentlyContinue
|
||||
|
||||
Write-Host "Cleaning..."
|
||||
Remove-Item -Path "botserver.log", "botui.log" -Force -ErrorAction SilentlyContinue
|
||||
|
||||
Write-Host "Building..."
|
||||
cargo build -p botserver
|
||||
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botserver"; exit 1 }
|
||||
|
||||
cargo build -p botui
|
||||
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botui"; exit 1 }
|
||||
|
||||
Write-Host "Starting botserver..."
|
||||
$env:PORT = "8080"
|
||||
$env:RUST_LOG = "debug"
|
||||
$env:PATH += ";C:\pgsql\pgsql\bin;C:\pgsql\pgsql\lib"
|
||||
$botserverProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botserver.exe" -ArgumentList "--noconsole" -RedirectStandardOutput "botserver.log" -RedirectStandardError "botserver.log"
|
||||
Write-Host " PID: $($botserverProcess.Id)"
|
||||
|
||||
Write-Host "Starting botui..."
|
||||
$env:BOTSERVER_URL = "http://localhost:8080"
|
||||
$env:PORT = "3000"
|
||||
$botuiProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botui.exe" -RedirectStandardOutput "botui.log" -RedirectStandardError "botui.log"
|
||||
Write-Host " PID: $($botuiProcess.Id)"
|
||||
|
||||
Write-Host "Done. Logs are being written to botserver.log and botui.log"
|
||||
Write-Host "To view logs, you can use: Get-Content botserver.log -Wait"
|
||||
59
restart.sh
59
restart.sh
|
|
@ -1,28 +1,49 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "🛑 Stopping existing processes..."
|
||||
pkill -f botserver || true
|
||||
pkill -f botui || true
|
||||
pkill -f rustc || true
|
||||
echo "=== Fast Restart: botserver + botmodels only ==="
|
||||
|
||||
echo "🧹 Cleaning logs..."
|
||||
rm -f botserver.log botui.log
|
||||
# Kill only the app services, keep infra running
|
||||
pkill -f "botserver --noconsole" || true
|
||||
pkill -f "botmodels" || true
|
||||
|
||||
echo "🔨 Building botserver..."
|
||||
# Clean logs
|
||||
rm -f botserver.log botmodels.log
|
||||
|
||||
# Build only botserver (botui likely already built)
|
||||
cargo build -p botserver
|
||||
|
||||
echo "🔨 Building botui..."
|
||||
cargo build -p botui
|
||||
# Start botmodels
|
||||
cd botmodels
|
||||
source venv/bin/activate
|
||||
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
|
||||
echo " botmodels PID: $!"
|
||||
cd ..
|
||||
|
||||
echo "🚀 Starting botserver..."
|
||||
RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
|
||||
BOTSERVER_PID=$!
|
||||
# Wait for botmodels
|
||||
for i in $(seq 1 20); do
|
||||
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
|
||||
echo " botmodels ready"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "🚀 Starting botui..."
|
||||
BOTSERVER_URL="https://localhost:8088" ./target/debug/botui > botui.log 2>&1 &
|
||||
BOTUI_PID=$!
|
||||
# Start botserver (keep botui running if already up)
|
||||
if ! pgrep -f "botui" > /dev/null; then
|
||||
echo "Starting botui..."
|
||||
cargo build -p botui
|
||||
cd botui
|
||||
BOTSERVER_URL="http://localhost:8080" ./target/debug/botui > ../botui.log 2>&1 &
|
||||
echo " botui PID: $!"
|
||||
cd ..
|
||||
fi
|
||||
|
||||
echo "✅ Started botserver (PID: $BOTSERVER_PID) and botui (PID: $BOTUI_PID)"
|
||||
echo "📊 Monitor with: tail -f botserver.log botui.log"
|
||||
echo "🌐 Access at: http://localhost:3000"
|
||||
# Start botserver
|
||||
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
|
||||
echo " botserver PID: $!"
|
||||
|
||||
# Quick health check
|
||||
sleep 2
|
||||
curl -s http://localhost:8080/health > /dev/null 2>&1 && echo "✅ botserver ready" || echo "❌ botserver failed"
|
||||
|
||||
echo "Done. botserver $(pgrep -f 'botserver --noconsole') botui $(pgrep -f botui) botmodels $(pgrep -f botmodels)"
|
||||
|
|
|
|||
31
snapshot_chat_ready.yml
Normal file
31
snapshot_chat_ready.yml
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
- generic [ref=e5]:
|
||||
- generic [ref=e6]:
|
||||
- generic [ref=e7]: Chat
|
||||
- generic [ref=e8]:
|
||||
- button [ref=e9] [cursor=pointer]:
|
||||
- img [ref=e10]
|
||||
- button [ref=e11] [cursor=pointer]:
|
||||
- img [ref=e12]
|
||||
- button [ref=e14] [cursor=pointer]:
|
||||
- img [ref=e15]
|
||||
- generic [ref=e18]:
|
||||
- generic [ref=e19]:
|
||||
- main [ref=e20]:
|
||||
- paragraph [ref=e23]: Olá! Bem-vinda(o) ao assistente virtual do Salesianos.
|
||||
- generic [ref=e25]: oi
|
||||
- contentinfo [ref=e29]:
|
||||
- generic [ref=e30]:
|
||||
- button "Cartas" [ref=e31] [cursor=pointer]
|
||||
- button "Procedimentos" [ref=e32] [cursor=pointer]
|
||||
- button "Tabelas" [ref=e33] [cursor=pointer]
|
||||
- generic [ref=e34]:
|
||||
- generic [ref=e35]:
|
||||
- button "Agent" [ref=e36] [cursor=pointer]
|
||||
- button "Chat" [ref=e37] [cursor=pointer]
|
||||
- textbox "Message... (type @ to mention)" [active] [ref=e38]
|
||||
- button "↑" [ref=e39] [cursor=pointer]
|
||||
- button "Scroll to bottom":
|
||||
- img
|
||||
- generic:
|
||||
- generic:
|
||||
- button "View"
|
||||
30
snapshot_initial.yml
Normal file
30
snapshot_initial.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
- generic [ref=e106]:
|
||||
- generic [ref=e107]:
|
||||
- generic [ref=e108]: Chat
|
||||
- generic [ref=e109]:
|
||||
- button [ref=e110] [cursor=pointer]:
|
||||
- img [ref=e111]
|
||||
- button [ref=e112] [cursor=pointer]:
|
||||
- img [ref=e113]
|
||||
- button [ref=e115] [cursor=pointer]:
|
||||
- img [ref=e116]
|
||||
- generic [ref=e119]:
|
||||
- generic [ref=e120]:
|
||||
- main [ref=e194]:
|
||||
- paragraph [ref=e197]: Olá! Bem-vinda(o) ao assistente virtual do Salesianos.
|
||||
- contentinfo [ref=e166]:
|
||||
- generic [ref=e198]:
|
||||
- button "Cartas" [ref=e199] [cursor=pointer]
|
||||
- button "Procedimentos" [ref=e200] [cursor=pointer]
|
||||
- button "Tabelas" [ref=e201] [cursor=pointer]
|
||||
- generic [ref=e169]:
|
||||
- generic [ref=e170]:
|
||||
- button "Agent" [ref=e171] [cursor=pointer]
|
||||
- button "Chat" [ref=e172] [cursor=pointer]
|
||||
- textbox "Message... (type @ to mention)" [active] [ref=e173]
|
||||
- button "↑" [ref=e174] [cursor=pointer]
|
||||
- button "Scroll to bottom":
|
||||
- img
|
||||
- generic:
|
||||
- generic:
|
||||
- button "View"
|
||||
61
snapshot_response.yml
Normal file
61
snapshot_response.yml
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
- generic [ref=e106]:
|
||||
- generic [ref=e107]:
|
||||
- generic [ref=e108]: Chat
|
||||
- generic [ref=e109]:
|
||||
- button [ref=e110] [cursor=pointer]:
|
||||
- img [ref=e111]
|
||||
- button [ref=e112] [cursor=pointer]:
|
||||
- img [ref=e113]
|
||||
- button [ref=e115] [cursor=pointer]:
|
||||
- img [ref=e116]
|
||||
- generic [ref=e119]:
|
||||
- generic [ref=e120]:
|
||||
- main [ref=e194]:
|
||||
- paragraph [ref=e197]: Olá! Bem-vinda(o) ao assistente virtual do Salesianos.
|
||||
- generic [ref=e203]: oi
|
||||
- generic [ref=e326]:
|
||||
- generic [ref=e327]:
|
||||
- heading "Olá! Seja bem-vindo(a)!" [level=1] [ref=e330]
|
||||
- heading "Sou o assistente virtual da Escola Salesiana" [level=2] [ref=e331]
|
||||
- paragraph [ref=e332]:
|
||||
- text: É uma grande alegria recebê-lo aqui! Meu nome é o Assistente Virtual Salesiano, criado para ser ponte entre você e toda a riqueza da nossa instituição, fundamentada nos ideais de Dom Bosco e na
|
||||
- strong [ref=e333]: Pedagogia Preventiva
|
||||
- text: .
|
||||
- paragraph [ref=e334]: Baseado nas cartas inspiradoras de Dom Bosco e no modo salesiano de educar, estou aqui para ajudá-lo com informações, orientações e todo o suporte que precisar sobre nossa escola.
|
||||
- generic [ref=e335]:
|
||||
- paragraph [ref=e336]:
|
||||
- strong [ref=e337]: Como posso ser útil hoje?
|
||||
- paragraph [ref=e338]:
|
||||
- text: Posso orientá-lo sobre
|
||||
- strong [ref=e339]: matrículas
|
||||
- text: ","
|
||||
- strong [ref=e340]: protocolos escolares
|
||||
- text: ","
|
||||
- strong [ref=e341]: eventos
|
||||
- text: ","
|
||||
- strong [ref=e342]: calendário acadêmico
|
||||
- text: ","
|
||||
- strong [ref=e343]: projetos pedagógicos
|
||||
- text: e muito mais. Sinta-se à vontade para perguntar sobre qualquer aspecto da nossa comunidade educativa!
|
||||
- generic [ref=e344]:
|
||||
- generic [ref=e345]:
|
||||
- img [ref=e346]
|
||||
- heading "Calendário" [level=3] [ref=e348]
|
||||
- paragraph [ref=e349]: Datas importantes, feriados, eventos e prazos do ano letivo.
|
||||
- img [ref=e351]
|
||||
- contentinfo [ref=e166]:
|
||||
- generic [ref=e198]:
|
||||
- button "Cartas" [ref=e199] [cursor=pointer]
|
||||
- button "Procedimentos" [ref=e200] [cursor=pointer]
|
||||
- button "Tabelas" [ref=e201] [cursor=pointer]
|
||||
- generic [ref=e169]:
|
||||
- generic [ref=e170]:
|
||||
- button "Agent" [ref=e171] [cursor=pointer]
|
||||
- button "Chat" [ref=e172] [cursor=pointer]
|
||||
- textbox "Message... (type @ to mention)" [active] [ref=e173]
|
||||
- button "↑" [ref=e174] [cursor=pointer]
|
||||
- button "Scroll to bottom":
|
||||
- img
|
||||
- generic:
|
||||
- generic:
|
||||
- button "View"
|
||||
28
start.bas
28
start.bas
|
|
@ -1,28 +0,0 @@
|
|||
REM Knowledge Base Website Crawler Bot - Start Template
|
||||
REM Sets up bot context and crawled websites, then exits
|
||||
|
||||
REM Load bot introduction
|
||||
intro = GET BOT MEMORY "introduction"
|
||||
IF intro = "" THEN
|
||||
intro = "I'm your documentation assistant with access to crawled websites."
|
||||
END IF
|
||||
|
||||
REM Register websites for crawling (preprocessing mode)
|
||||
USE WEBSITE "https://docs.python.org"
|
||||
USE WEBSITE "https://developer.mozilla.org"
|
||||
USE WEBSITE "https://stackoverflow.com"
|
||||
|
||||
REM Set context for LLM
|
||||
SET CONTEXT "role" AS intro
|
||||
SET CONTEXT "capabilities" AS "I can search Python docs, MDN web docs, and Stack Overflow."
|
||||
|
||||
REM Configure suggestion buttons
|
||||
CLEAR SUGGESTIONS
|
||||
ADD SUGGESTION "python" AS "How do I use Python dictionaries?"
|
||||
ADD SUGGESTION "javascript" AS "Explain JavaScript async/await"
|
||||
ADD SUGGESTION "web" AS "What is the DOM in web development?"
|
||||
|
||||
REM Initial greeting
|
||||
TALK intro
|
||||
TALK "I have access to Python documentation, MDN web docs, and Stack Overflow."
|
||||
TALK "Ask me any programming question!"
|
||||
4
stop.sh
4
stop.sh
|
|
@ -1,4 +0,0 @@
|
|||
pkill botui
|
||||
pkill botserver -9
|
||||
|
||||
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
import { test, expect } from '@playwright/test';
|
||||
|
||||
test('has title', async ({ page }) => {
|
||||
await page.goto('https://playwright.dev/');
|
||||
|
||||
// Expect a title "to contain" a substring.
|
||||
await expect(page).toHaveTitle(/Playwright/);
|
||||
});
|
||||
|
||||
test('get started link', async ({ page }) => {
|
||||
await page.goto('https://playwright.dev/');
|
||||
|
||||
// Click the get started link.
|
||||
await page.getByRole('link', { name: 'Get started' }).click();
|
||||
|
||||
// Expects page to have a heading with the name of Installation.
|
||||
await expect(page.getByRole('heading', { name: 'Installation' })).toBeVisible();
|
||||
});
|
||||
41
yarn.lock
41
yarn.lock
|
|
@ -1,41 +0,0 @@
|
|||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
"@playwright/test@^1.58.1":
|
||||
version "1.58.1"
|
||||
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.58.1.tgz#891dcd1da815cb1042490531f6d8778988509d22"
|
||||
integrity sha512-6LdVIUERWxQMmUSSQi0I53GgCBYgM2RpGngCPY7hSeju+VrKjq3lvs7HpJoPbDiY5QM5EYRtRX5fvrinnMAz3w==
|
||||
dependencies:
|
||||
playwright "1.58.1"
|
||||
|
||||
"@types/node@^25.2.0":
|
||||
version "25.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@types/node/-/node-25.2.0.tgz#015b7d228470c1dcbfc17fe9c63039d216b4d782"
|
||||
integrity sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w==
|
||||
dependencies:
|
||||
undici-types "~7.16.0"
|
||||
|
||||
fsevents@2.3.2:
|
||||
version "2.3.2"
|
||||
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
|
||||
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
|
||||
|
||||
playwright-core@1.58.1:
|
||||
version "1.58.1"
|
||||
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.58.1.tgz#d63be2c9b7dcbdb035beddd4b42437bd3ca89107"
|
||||
integrity sha512-bcWzOaTxcW+VOOGBCQgnaKToLJ65d6AqfLVKEWvexyS3AS6rbXl+xdpYRMGSRBClPvyj44njOWoxjNdL/H9UNg==
|
||||
|
||||
playwright@1.58.1:
|
||||
version "1.58.1"
|
||||
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.58.1.tgz#63300e77a604c77264e1b499c0d94b54ed96d6ba"
|
||||
integrity sha512-+2uTZHxSCcxjvGc5C891LrS1/NlxglGxzrC4seZiVjcYVQfUa87wBL6rTDqzGjuoWNjnBzRqKmF6zRYGMvQUaQ==
|
||||
dependencies:
|
||||
playwright-core "1.58.1"
|
||||
optionalDependencies:
|
||||
fsevents "2.3.2"
|
||||
|
||||
undici-types@~7.16.0:
|
||||
version "7.16.0"
|
||||
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.16.0.tgz#ffccdff36aea4884cbfce9a750a0580224f58a46"
|
||||
integrity sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==
|
||||
Loading…
Add table
Reference in a new issue