Compare commits
501 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f949180a89 | |||
| 9a6dd337f9 | |||
| f679593d7b | |||
| a4ae8bf156 | |||
| d66ea28357 | |||
| 791e1672f4 | |||
| a16cf65c3c | |||
| ee362fbd15 | |||
| bfee85f5b5 | |||
| b04a03aa7d | |||
| 4292a7ce76 | |||
| 0529f51b68 | |||
| 4388d8d042 | |||
| 9c61885b9b | |||
| 42bf55c348 | |||
| cae3c25c8f | |||
| 620d84a845 | |||
| 57a718426e | |||
| 3b09fce82b | |||
| 75650df065 | |||
| 2519294add | |||
| 553b7b9e21 | |||
| 7366541614 | |||
| 3a03a7dc5a | |||
| 254901bf4a | |||
| cf69c01feb | |||
| 0c5b20ce61 | |||
| db6f2610ee | |||
| 1707ead3c0 | |||
| 90fb3f0dc0 | |||
| 3e1a3d4e5e | |||
| 6b857e8d17 | |||
| 5b5e3202e5 | |||
| 11ccba624c | |||
| 1ab01b09a2 | |||
| 5ebd2d8f5d | |||
| 0c008c1c0e | |||
| 07f34991d5 | |||
| ba7184c10b | |||
| 02e7389e62 | |||
| a1aa97f1be | |||
| 7950120339 | |||
| d963b6c562 | |||
| 851c66fbaa | |||
| 36fddd0aec | |||
| 3aac836a49 | |||
| 50ffd05068 | |||
| 4478b3d7ba | |||
| f0fde2c7bd | |||
| a3e7c90669 | |||
| 7122285a60 | |||
| e0fec021e8 | |||
| 6965e08313 | |||
| f33968c4e0 | |||
| 264873d776 | |||
| 1c1a9ea4cf | |||
| 3cd1d9b23c | |||
| 3b76f31792 | |||
| e22ae04993 | |||
| 50a220f1f8 | |||
| 7c1d022ad2 | |||
| a170ec4c2b | |||
| d80e80856a | |||
| 0432cf2c70 | |||
| cee8aeee34 | |||
| c405f18efe | |||
| 1bfaad789d | |||
| a5a6b372b7 | |||
| e00e066ffe | |||
| d5e2d8e5a8 | |||
| a1b4bac917 | |||
| 14d66671ae | |||
| 5e5cfabbb6 | |||
| de0e3844d4 | |||
| 2185d3352c | |||
| f5096d49ff | |||
| ee1e484316 | |||
| 55d58535d6 | |||
| 397da6cf48 | |||
| 915cf1baf3 | |||
| 098f46ef2d | |||
| d8f8aff96e | |||
| b301620094 | |||
| e8ff264bd7 | |||
| 9844ce5ab0 | |||
| 730739aedf | |||
| d3ae322256 | |||
| decea7ddfc | |||
| f2ba1f4dfe | |||
| 03d223517d | |||
| f41b984b6f | |||
| 93f80e0af5 | |||
| fa03fc65a1 | |||
| 4b67b0f486 | |||
| 49bc6e52bc | |||
| a8ae578a80 | |||
| b2995cdcdb | |||
| 4f510d1196 | |||
| d28510a632 | |||
| e094dc138a | |||
| 07b6af9bf3 | |||
| a5f16fbab9 | |||
| 2257c980cb | |||
| 123771c996 | |||
| f767337ed8 | |||
| d598bdc29a | |||
| b45f63a7a6 | |||
| e1b456d199 | |||
| 96ece5a3ea | |||
| 1662905a32 | |||
| d0d68e792e | |||
| fc95cba887 | |||
| 93dc55c47c | |||
| 0c1a988f82 | |||
| dcabb6c0bc | |||
| 083b56921f | |||
| daa76e8a7b | |||
| be03dfe880 | |||
| 934e46e038 | |||
| fbd2a8647d | |||
| 8e6549a9ea | |||
| 05859d5276 | |||
| 96c61938d2 | |||
| 6386f65e58 | |||
| 613409b5d6 | |||
| ef8e7b9b56 | |||
| 915945e1b5 | |||
| d2e24c581b | |||
| a65365d19f | |||
| 6e63c47087 | |||
| 370fa6511d | |||
| 2368c30e59 | |||
| 6ddb1ebcc5 | |||
| d9587863aa | |||
| 48add934bd | |||
| 9d87f4b60d | |||
| a492d1abc1 | |||
| 27c1cd9671 | |||
| 50798824f8 | |||
| 737fb45fc0 | |||
| ff1680cafc | |||
| 0dcd46bfe7 | |||
| 55043a4d8a | |||
| 6a97db0931 | |||
| b1c3800ca8 | |||
| c4c52264db | |||
| 277789e0bc | |||
| c2d60d7cb8 | |||
| 72e6992f33 | |||
| 9016868345 | |||
| e7a42b5011 | |||
| 1bd81a4c2c | |||
| a367d8fca5 | |||
| 932fc30cea | |||
| bb79ac931f | |||
| 99c64d32ff | |||
| 80494ea4fe | |||
| 7ee9d42560 | |||
| 272f56c79d | |||
| fb02e72b8f | |||
| 50a3718d82 | |||
| 373bb6a6e4 | |||
| ada9db7a42 | |||
| fb0b7f079f | |||
| 92d8a0d858 | |||
| 1ae46149ee | |||
| 9068bc25f8 | |||
| 80c798ed05 | |||
| 41cf536cd4 | |||
| 901153803f | |||
| a8521d7480 | |||
| f5b954df76 | |||
| 1bf9e1872b | |||
| a377af5ba3 | |||
| 0dbc7cb081 | |||
| 8eecc7f871 | |||
| 5bebba4d7f | |||
| 60ef1d0562 | |||
| 1df0ea8626 | |||
| 54639690e2 | |||
| 04454f2274 | |||
| 3dadec70a8 | |||
| 76a74b87f2 | |||
| 264bd6f4c9 | |||
| 466bd729af | |||
| 9e3232ae85 | |||
| 75eccecbe9 | |||
| f9aa5dacd0 | |||
| 82f5f24b44 | |||
| 752cdd6f3b | |||
| 3a6a571361 | |||
| ed2052c8ec | |||
| 3e5c569354 | |||
| 9093340f4e | |||
| 5cec129302 | |||
| 4fa2018f5d | |||
| 814b0214be | |||
| b6f83df229 | |||
| 66b19098b4 | |||
| 8ef34d011c | |||
| d8fb6c954a | |||
| a2da4820b7 | |||
| 9815ab313b | |||
| 30e78ba40c | |||
| 45df9d9caf | |||
| 52aac0af21 | |||
| c79ab35409 | |||
| 90ee0257cb | |||
| 1388463695 | |||
| 6a13837bb8 | |||
| 602dfdee93 | |||
| c404cc0b4d | |||
| 926759f630 | |||
| ebf516e967 | |||
| 28326cb049 | |||
| ff146657e0 | |||
| 9677c90907 | |||
| f65b288cc5 | |||
| 0ec849d323 | |||
| 9f44d084ac | |||
| bb1cef6675 | |||
| af87de255b | |||
| ff4ab52fef | |||
| eb87414a78 | |||
| 083024a438 | |||
| f815943491 | |||
| e61cf84bc4 | |||
| 22172cf64a | |||
| a7ccec940a | |||
| ec2e93b22c | |||
| 2cd3a5d692 | |||
| d09a11e31e | |||
| 4941f74c6a | |||
| 7a70798c85 | |||
| bd1aeb442d | |||
| ae94a7b77c | |||
| 94ba355910 | |||
| 4f4795b981 | |||
| 2658659ff0 | |||
| 3b6e2df6e2 | |||
| 4bf1dc4689 | |||
| d933ac52af | |||
| 21add146a6 | |||
| bbea8fb9fd | |||
| 76b64182bd | |||
| ce6a65a902 | |||
| b9835d6d12 | |||
| e0ab4bb1fe | |||
| e30b070eff | |||
| f0858a443e | |||
| 0ce43ed8f7 | |||
| 8502eac494 | |||
| 70cdd6e5a5 | |||
| 98e8179810 | |||
| 67d3c7f901 | |||
| ec4c660887 | |||
| 7d3cf9bd61 | |||
| 02c8efeca7 | |||
| 0be7cb1f61 | |||
| dfad0f3989 | |||
| 58be60fda0 | |||
| f7bcd16212 | |||
| 4900274887 | |||
| bfc9ced932 | |||
| 65583977fa | |||
| 12d9dc50af | |||
| 1ca4d2c712 | |||
| 176df967f7 | |||
| 7e9b8b00fe | |||
| e2ffa39bf8 | |||
| 297af2f9e1 | |||
| 4caa147841 | |||
| 29416db1b8 | |||
| 9c3bd0c1d5 | |||
| 21b219f0d3 | |||
| f7238b5d33 | |||
| 4c26b77143 | |||
|
|
7de55efe79 | ||
|
|
7d70035050 | ||
|
|
c19095f141 | ||
|
|
2572425cb1 | ||
|
|
8a6fec467c | ||
| 2fe4586be5 | |||
| 6297018124 | |||
| 7d95a607f4 | |||
| 91ad0232f0 | |||
| c43bda9b2a | |||
| 144c4b0b79 | |||
| 920666db40 | |||
| 731892db36 | |||
| 0465216f66 | |||
| a695da5a05 | |||
| f1455ad7cb | |||
| e42ee6ee43 | |||
| 1f73d82c50 | |||
| dd1d105ef0 | |||
| b7275ed233 | |||
| e245077700 | |||
| a23b4f1983 | |||
| ff9e3f56a9 | |||
| 62e2390da3 | |||
| 39a6a4cf20 | |||
| 4da3910b3b | |||
| db119148cf | |||
| d89a5c9d30 | |||
| f8c36c3778 | |||
| 9123756e82 | |||
| 28c4f89e16 | |||
| cc8b432aa8 | |||
| d65794fa93 | |||
| 2fe91226b0 | |||
| 465fa71c84 | |||
| 853181cd1d | |||
| c7fe607171 | |||
| 12e7cba9bd | |||
| 31f68d7bf0 | |||
| 334bb9239b | |||
| a9c4714929 | |||
| 41efb790ef | |||
| 4e0737f60a | |||
| 649afd7947 | |||
| a99c17008e | |||
| abedde3af7 | |||
| 9fc33725b7 | |||
| 7d4708b516 | |||
| 34af1f2a16 | |||
| 21b96804e8 | |||
| a6a221788b | |||
| 610741e123 | |||
| ddb11a7c06 | |||
| 57b09e5b66 | |||
| 046dbc63ad | |||
| c3c235f8c4 | |||
| 19b4a20a02 | |||
| 792a13eb67 | |||
| 9eb2bfe09c | |||
| 49930f2aa0 | |||
| d08dab26dc | |||
| ba032f41eb | |||
| ec8d002574 | |||
| ce81951a69 | |||
| 1d0c9ccdb8 | |||
| f0e0553966 | |||
| 9b02df3bec | |||
| 49d9b193b2 | |||
| b11140d106 | |||
| 8e27900529 | |||
| 60e2054a02 | |||
| 40e735a56f | |||
| a83d9a88aa | |||
| 8b32eaf41f | |||
| c12ad1eda4 | |||
| 4729fe7071 | |||
| e443aa9d1a | |||
| 3891f137fd | |||
| f40449ed51 | |||
| 146f04b373 | |||
| 1ecd5f4f0c | |||
| b5bf1061f8 | |||
| 7bda264f61 | |||
| caafad484a | |||
| ddc1bdb2db | |||
| faeb77fc07 | |||
| a31d7b355a | |||
| 30ec64d878 | |||
| dd64a4102c | |||
| 041c76209a | |||
| c6e8ced648 | |||
| 56e8054839 | |||
| 8519c3cfd2 | |||
| 8a0216c654 | |||
| abcf959fdf | |||
| b9ab17fb4c | |||
| 84083b9ae0 | |||
| 079e802b17 | |||
| 33845109fd | |||
| bd3857f2d4 | |||
| acea58eecb | |||
| ead23594c0 | |||
| b606913d7d | |||
| 5ffa0d71af | |||
| 0057ca3612 | |||
| 504bb66a82 | |||
| 1e6289b223 | |||
| 8c3f51a49d | |||
| cf0d94873d | |||
| 8eef47058d | |||
| dcaf90d39f | |||
| a8e107059a | |||
| ea4cb78646 | |||
| eda45af678 | |||
| fc994375b1 | |||
| 3fe4f7ece7 | |||
| 4202f0dcf9 | |||
| fad4e1457b | |||
| b798123f14 | |||
| 5b34b21622 | |||
| cf80060818 | |||
| cda27734f0 | |||
| f1b231ce3a | |||
| 6a7cdf2800 | |||
| 21aa782fd2 | |||
| 54966ff63f | |||
| bcb43ce887 | |||
| 4736383997 | |||
| 93519eba14 | |||
| 0e47fd9476 | |||
| 2f200e1e99 | |||
| 8e02195eca | |||
| 41e314fd67 | |||
| fe865fdfa1 | |||
| 068f011907 | |||
| df257e990e | |||
| 03a03c3a3f | |||
| 974e1e688a | |||
| b5896c2493 | |||
| 5221bc9ec5 | |||
| 65f1c762c1 | |||
| 5c3d772010 | |||
| 164a0972a4 | |||
| 1ccd1f9cc9 | |||
| e194dcf9e6 | |||
| 3c5c01f4ea | |||
| e50bcee8c7 | |||
| 198bcf8edb | |||
| 613a14ecfb | |||
| cd2dd0f6f6 | |||
| a13e3f7c51 | |||
| c9eabb0f0d | |||
| edb40ea8e0 | |||
| 416d0c7de6 | |||
| 24e0f3c3a6 | |||
| 22d7bdc16e | |||
| f67e25fcd3 | |||
| 93e46dfdaa | |||
| 0b591e663e | |||
| b3c35feff7 | |||
| 0ac7c4ed03 | |||
| cb9c94223d | |||
| 1f5c25ce79 | |||
| 2a45c5d7c4 | |||
| b6a0faf879 | |||
| f34e760697 | |||
| b6f69951f4 | |||
| 92f2c012f0 | |||
| 14d95994b9 | |||
| 4f7f0fff0e | |||
| d48791290a | |||
| 8831c56c90 | |||
| b9d6816644 | |||
| c258a61ad5 | |||
| 3326e07234 | |||
| 7bd5375d1a | |||
| 3dbadbafcb | |||
| b431ae6602 | |||
| 7e89d183ff | |||
| b2f143f664 | |||
| 860a460a93 | |||
| cda2d8f155 | |||
| 2a1f4305a2 | |||
| 090b42618b | |||
| f97451482d | |||
| 8a2072919a | |||
| 62749e9eba | |||
| 52e551734a | |||
| 00647f7edf | |||
| 4100e53c28 | |||
| 117ea78e39 | |||
| 91ddf87dce | |||
| 12b661fdc1 | |||
| 3375394ca5 | |||
| 23fe731d30 | |||
| 9f46a41abf | |||
| 13cea1b75c | |||
| 96076831ab | |||
| 02cd170eeb | |||
| 3befc141e5 | |||
| 64c14a30c9 | |||
| 1ee5492bd8 | |||
| 43b168a4b5 | |||
| 0dfabc46b3 | |||
| 79f849da6b | |||
| 81ac2ae8c6 | |||
| 68f3ada343 | |||
| 599c338a4a | |||
| f54ad8175c | |||
| cb61c130c3 | |||
| 1a5f54e576 | |||
| 2beeec3292 | |||
| 5f3f82748c | |||
| 5b0d274e5e | |||
| dc0055d9e6 | |||
| 1ce77cc14f | |||
| 4a9b969a9c | |||
| ece4a72e10 | |||
| ee26396c49 | |||
| 76c3ea15fb | |||
| ac4b07fde9 | |||
| b259c6a506 | |||
| 3a7eb3729c | |||
| 1c39e743d4 |
35 changed files with 15102 additions and 845 deletions
|
|
@ -1,5 +1,5 @@
|
||||||
[build]
|
[build]
|
||||||
rustc-wrapper = "sccache"
|
# rustc-wrapper = "sccache"
|
||||||
|
|
||||||
[target.x86_64-unknown-linux-gnu]
|
[target.x86_64-unknown-linux-gnu]
|
||||||
linker = "clang"
|
linker = "clang"
|
||||||
|
|
|
||||||
8
.env.example
Normal file
8
.env.example
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
# General Bots Environment Configuration
|
||||||
|
# Copy this file to .env and fill in values
|
||||||
|
# NEVER commit .env to version control
|
||||||
|
|
||||||
|
# Vault connection
|
||||||
|
VAULT_ADDR=https://127.0.0.1:8200
|
||||||
|
VAULT_TOKEN=<your-vault-token-here>
|
||||||
|
VAULT_CACERT=./botserver-stack/vault/certs/ca.crt
|
||||||
55
.gitignore
vendored
55
.gitignore
vendored
|
|
@ -2,11 +2,17 @@
|
||||||
target/
|
target/
|
||||||
*.out
|
*.out
|
||||||
bin/
|
bin/
|
||||||
|
*.png
|
||||||
|
*.jpg
|
||||||
# Logs
|
# Logs
|
||||||
*.log
|
*.log
|
||||||
*logfile*
|
*logfile*
|
||||||
*-log*
|
*-log*
|
||||||
|
.vscode
|
||||||
|
.zed
|
||||||
|
.gemini
|
||||||
|
.claude
|
||||||
|
|
||||||
|
|
||||||
# Temporary files
|
# Temporary files
|
||||||
.tmp*
|
.tmp*
|
||||||
|
|
@ -24,13 +30,54 @@ work/
|
||||||
|
|
||||||
# Documentation build
|
# Documentation build
|
||||||
docs/book
|
docs/book
|
||||||
|
.ruff_cache
|
||||||
|
.goutputstream*
|
||||||
# Installers (keep gitkeep)
|
# Installers (keep gitkeep)
|
||||||
botserver-installers/*
|
botserver-installers/*
|
||||||
!botserver-installers/.gitkeep
|
!botserver-installers/.gitkeep
|
||||||
botserver-stack
|
botserver-stack
|
||||||
TODO*
|
TODO*
|
||||||
|
work
|
||||||
|
.swp
|
||||||
|
# Lock file
|
||||||
|
# Cargo.lock (should be tracked)
|
||||||
|
.kiro
|
||||||
|
config
|
||||||
|
|
||||||
|
# Data directory (contains bot configs and API keys)
|
||||||
|
data/
|
||||||
|
|
||||||
# Lock file (regenerated from Cargo.toml)
|
# Playwright
|
||||||
Cargo.lock
|
node_modules/
|
||||||
|
/test-results/
|
||||||
|
/playwright-report/
|
||||||
|
/blob-report/
|
||||||
|
/playwright/.cache/
|
||||||
|
/playwright/.auth/
|
||||||
|
.playwright*
|
||||||
|
.ruff_cache
|
||||||
|
.opencode
|
||||||
|
config/directory_config.json
|
||||||
|
# CI cache bust: Fri Feb 13 22:33:51 UTC 2026
|
||||||
|
|
||||||
|
# Secrets - NEVER commit these files
|
||||||
|
vault-unseal-keys
|
||||||
|
start-and-unseal.sh
|
||||||
|
vault-token-*
|
||||||
|
init.json
|
||||||
|
*.pem
|
||||||
|
*.key
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
$null
|
||||||
|
AppData/
|
||||||
|
build_errors*.txt
|
||||||
|
build_errors_utf8.txt
|
||||||
|
check.json
|
||||||
|
clippy*.txt
|
||||||
|
errors.txt
|
||||||
|
errors_utf8.txt
|
||||||
|
|
||||||
|
vault-unseal-keysdefault-vault.tar
|
||||||
|
prompts/sec-bots.md
|
||||||
|
AGENTS-PROD.md
|
||||||
|
|
|
||||||
23
.gitmodules
vendored
23
.gitmodules
vendored
|
|
@ -1,42 +1,43 @@
|
||||||
[submodule "botapp"]
|
[submodule "botapp"]
|
||||||
path = botapp
|
path = botapp
|
||||||
url = https://github.com/GeneralBots/botapp.git
|
url = ../botapp.git
|
||||||
|
|
||||||
[submodule "botserver"]
|
[submodule "botserver"]
|
||||||
path = botserver
|
path = botserver
|
||||||
url = https://github.com/GeneralBots/botserver.git
|
url = ../BotServer.git
|
||||||
|
|
||||||
[submodule "botlib"]
|
[submodule "botlib"]
|
||||||
path = botlib
|
path = botlib
|
||||||
url = https://github.com/GeneralBots/botlib.git
|
url = ../botlib.git
|
||||||
|
|
||||||
[submodule "botui"]
|
[submodule "botui"]
|
||||||
path = botui
|
path = botui
|
||||||
url = https://github.com/GeneralBots/botui.git
|
url = ../botui.git
|
||||||
|
|
||||||
[submodule "botbook"]
|
[submodule "botbook"]
|
||||||
path = botbook
|
path = botbook
|
||||||
url = https://github.com/GeneralBots/botbook.git
|
url = ../botbook.git
|
||||||
|
|
||||||
[submodule "bottest"]
|
[submodule "bottest"]
|
||||||
path = bottest
|
path = bottest
|
||||||
url = https://github.com/GeneralBots/bottest.git
|
url = ../bottest.git
|
||||||
|
|
||||||
[submodule "botdevice"]
|
[submodule "botdevice"]
|
||||||
path = botdevice
|
path = botdevice
|
||||||
url = https://github.com/GeneralBots/botdevice.git
|
url = ../botdevice.git
|
||||||
|
|
||||||
[submodule "botmodels"]
|
[submodule "botmodels"]
|
||||||
path = botmodels
|
path = botmodels
|
||||||
url = https://github.com/GeneralBots/botmodels.git
|
url = ../botmodels.git
|
||||||
|
|
||||||
[submodule "botplugin"]
|
[submodule "botplugin"]
|
||||||
path = botplugin
|
path = botplugin
|
||||||
url = https://github.com/GeneralBots/botplugin.git
|
url = ../botplugin.git
|
||||||
|
|
||||||
[submodule "bottemplates"]
|
[submodule "bottemplates"]
|
||||||
path = bottemplates
|
path = bottemplates
|
||||||
url = https://github.com/GeneralBots/bottemplates.git
|
url = ../bottemplates.git
|
||||||
|
|
||||||
[submodule ".github"]
|
[submodule ".github"]
|
||||||
path = .github
|
path = .github
|
||||||
url = https://github.com/GeneralBots/.github.git
|
url = ../.github.git
|
||||||
|
|
|
||||||
24
.vscode/launch.json
vendored
24
.vscode/launch.json
vendored
|
|
@ -1,24 +0,0 @@
|
||||||
{
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
|
|
||||||
{
|
|
||||||
"type": "lldb",
|
|
||||||
"request": "launch",
|
|
||||||
"name": "Debug executable 'botserver'",
|
|
||||||
"cargo": {
|
|
||||||
"args": ["run", "--bin=botserver", "--package=botserver", "--manifest-path=${workspaceFolder}/botserver/Cargo.toml"],
|
|
||||||
"filter": {
|
|
||||||
"name": "botserver",
|
|
||||||
"kind": "bin"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"args": [],
|
|
||||||
"env": {
|
|
||||||
"RUST_LOG": "trace,aws_sigv4=off,aws_smithy_checksums=off,mio=off,reqwest=off,aws_runtime=off,aws_smithy_http_client=off,rustls=off,hyper_util=off,aws_smithy_runtime=off,aws_smithy_runtime_api=off,tracing=off,aws_sdk_s3=off"
|
|
||||||
|
|
||||||
},
|
|
||||||
"cwd": "${workspaceFolder}/botserver"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
5
.vscode/settings.json
vendored
5
.vscode/settings.json
vendored
|
|
@ -1,5 +0,0 @@
|
||||||
{
|
|
||||||
"git.ignoreLimitWarning": true,
|
|
||||||
"Codegeex.SidebarUI.LanguagePreference": "English",
|
|
||||||
"Codegeex.RepoIndex": true
|
|
||||||
}
|
|
||||||
132
AGENTS.md
Normal file
132
AGENTS.md
Normal file
|
|
@ -0,0 +1,132 @@
|
||||||
|
# General Bots AI Agent Guidelines
|
||||||
|
|
||||||
|
|
||||||
|
NEVER INCLUDE HERE CREDENTIALS OR COMPANY INFORMATION, THIS IS COMPANY AGNOSTIC.
|
||||||
|
Use apenas a língua culta ao falar. Never save files to root — use `/tmp` for temp files. Never push to ALM without asking first (it is production). If a tool fails to install, check the official website for instructions. Local file support (`/opt/gbo/data`) has been removed; bots are loaded only from Drive (MinIO/S3).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Critical Production Rules
|
||||||
|
|
||||||
|
Always manage services via `systemctl` inside the `system` Incus container. Never run `/opt/gbo/bin/botserver` or `/opt/gbo/bin/botui` directly — they skip the `.env` file, which means Vault credentials fail to load and services break. The correct commands are `sudo incus exec system -- systemctl start|stop|restart|status botserver` and the same for `ui`. Systemctl handles env loading, auto-restart, and process lifecycle.
|
||||||
|
|
||||||
|
In development you may use `cargo run` or `./target/debug/botserver` with `botserver/.env`. In production, always use `systemctl start botserver` with `/opt/gbo/bin/.env`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workspace Structure
|
||||||
|
|
||||||
|
The workspace has eight crates. `botserver` is the main API server (port 8080) using Axum, Diesel, and Rhai BASIC. `botui` is the web UI server and proxy (port 3000) using Axum, HTML/HTMX/CSS. `botapp` is a Tauri 2 desktop wrapper. `botlib` holds shared types and errors. `botbook` is mdBook documentation. `bottest` holds integration tests. `botdevice` handles IoT/device support. `botplugin` is a JS browser extension.
|
||||||
|
|
||||||
|
Key paths: binary at `target/debug/botserver`, always run from the `botserver/` directory, env file at `botserver/.env`, UI files under `botui/ui/suite/`, bot data exclusively in Drive (MinIO/S3) under `/{botname}.gbai/` buckets. Test at `http://localhost:3000`; login at `http://localhost:3000/suite/auth/login.html`.
|
||||||
|
|
||||||
|
Bot files in Drive follow this structure: `{botname}.gbai/{botname}.gbdialog/` contains `*.bas` scripts, `config.csv`, and the `.gbkb/` knowledge base folder. There is no local file monitoring — botserver compiles `.bas` to `.ast` in memory from Drive only.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Absolute Prohibitions
|
||||||
|
|
||||||
|
Never search the `/target` folder. Never build in release mode or use `--release`. Never run `cargo build` — use `cargo check` for verification. Never run `cargo clean` (causes 30-minute rebuilds); use `./reset.sh` for DB issues. Never deploy manually via `scp`, SSH binary copy, or any method other than the CI/CD pipeline (push → ALM → alm-ci builds → deploys to system container). Never run the binary directly in production — use `systemctl` or `./restart.sh`.
|
||||||
|
|
||||||
|
Never use `panic!()`, `todo!()`, `unimplemented!()`, `unwrap()`, or `expect()` in Rust code. Never use `Command::new()` directly — use `SafeCommand`. Never return raw error strings to HTTP clients — use `ErrorSanitizer`. Never use `#[allow()]` or lint exceptions in `Cargo.toml` — fix the code. Never use `_` prefix for unused variables — delete or use them. Never leave unused imports, dead code, or commented-out code. Never use CDN links — all assets must be local. Never create `.md` docs without checking `botbook/` first. Never hardcode credentials — use `generate_random_string()` or env vars. Never include sensitive data (IPs, tokens, keys) in docs or code; mask IPs in logs as `10.x.x.x`. Never create files with secrets anywhere except `/tmp/`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Build Pattern — Fix Fast Loop
|
||||||
|
|
||||||
|
When checking botserver, run `cargo check -p botserver > /tmp/check.log 2>&1 &`, capture the PID, then loop watching line count and kill the process once it exceeds 20 lines. After killing, check for errors with `strings /tmp/check.log | grep "^error" | head -20`. Fix errors immediately, then repeat. Never use `--all-features` (pulls docs/slides dependencies). This saves 10+ minutes per error cycle since full compilation takes 2–3 minutes. The key rule: kill at 20 lines, fix immediately, loop until clean.
|
||||||
|
|
||||||
|
If the process is killed by OOM, run `pkill -9 cargo; pkill -9 rustc; pkill -9 botserver` then retry with `CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security Directives — Mandatory
|
||||||
|
|
||||||
|
For error handling, never use `unwrap()`, `expect()`, `panic!()`, or `todo!()`. Use `value?`, `value.ok_or_else(|| Error::NotFound)?`, `value.unwrap_or_default()`, or `if let Some(v) = value { ... }`.
|
||||||
|
|
||||||
|
For command execution, never use `Command::new("cmd").arg(user_input).output()`. Use `SafeCommand::new("allowed_command")?.arg("safe_arg")?.execute()` from `crate::security::command_guard`.
|
||||||
|
|
||||||
|
For error responses, never return `Json(json!({ "error": e.to_string() }))`. Use `log_and_sanitize(&e, "context", None)` from `crate::security::error_sanitizer` and return `(StatusCode::INTERNAL_SERVER_ERROR, sanitized)`.
|
||||||
|
|
||||||
|
For SQL, never use `format!("SELECT * FROM {}", user_table)`. Use `sanitize_identifier` and `validate_table_name` from `crate::security::sql_guard`.
|
||||||
|
|
||||||
|
Rate limits: general 100 req/s, auth 10 req/s, API 50 req/s per token, WebSocket 10 msgs/s. Use the `governor` crate with per-IP and per-user tracking. All state-changing endpoints (POST/PUT/DELETE/PATCH) must require CSRF tokens via `tower_csrf` bound to the user session; Bearer Token endpoints are exempt. Every response must include these security headers: `Content-Security-Policy`, `Strict-Transport-Security`, `X-Frame-Options: DENY`, `X-Content-Type-Options: nosniff`, `Referrer-Policy: strict-origin-when-cross-origin`, and `Permissions-Policy: geolocation=(), microphone=(), camera=()`.
|
||||||
|
|
||||||
|
For dependencies, app crates track `Cargo.lock`; lib crates do not. Critical deps use exact versions (`=1.0.1`); regular deps use caret (`1.0`). Run `cargo audit` weekly and update only via PR with testing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Mandatory Code Patterns
|
||||||
|
|
||||||
|
Use `Self` not the type name in `impl` blocks. Always derive both `PartialEq` and `Eq` together. Use inline format args: `format!("Hello {name}")` not `format!("Hello {}", name)`. Combine identical match arms: `A | B => do_thing()`. Maximum 450 lines per file — split proactively at 350 lines into `types.rs`, `handlers.rs`, `operations.rs`, `utils.rs`, and `mod.rs`, re-exporting all public items in `mod.rs`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Fixing Workflow
|
||||||
|
|
||||||
|
Read the entire error list first. Group errors by file. For each file: view it, fix all errors, then write once. Only verify with `cargo check` after all fixes are applied — never compile after each individual fix. `cargo clippy --workspace` must pass with zero warnings.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Modes
|
||||||
|
|
||||||
|
In local standalone mode (no incus), botserver manages all services itself. Run `cargo run -- --install` once to download and extract PostgreSQL, Valkey, MinIO, and Vault binaries into `botserver-stack/bin/`, initialize data directories, and download the LLM model. Then `cargo run` starts everything and serves at `http://localhost:8080`. Use `./reset.sh` to wipe and restart the local environment.
|
||||||
|
|
||||||
|
In container (Incus) production mode, services run in separate named containers. Start them all with `sudo incus start system tables vault directory drive cache llm vector_db`. Access the system container with `sudo incus exec system -- bash`. View botserver logs with `sudo incus exec system -- journalctl -u botserver -f`. The container layout is: `system` runs BotServer on 8080; `tables` runs PostgreSQL on 5432; `vault` runs Vault on 8200; `directory` runs Zitadel on 8080 internally (external port 9000 via iptables NAT); `drive` runs MinIO on 9100; `cache` runs Valkey on 6379; `llm` runs llama.cpp on 8081; `vector_db` runs Qdrant on 6333.
|
||||||
|
|
||||||
|
Use the `LOAD_ONLY` variable in `/opt/gbo/bin/.env` to filter which bots are loaded and monitored by DriveMonitor, for example `LOAD_ONLY=default,salesianos`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Debugging & Testing
|
||||||
|
|
||||||
|
To watch for errors live: `tail -f botserver.log | grep -i "error\|tool"`. To debug a specific tool: grep `Tool error` in logs, fix the `.bas` file in MinIO at `/{bot}.gbai/{bot}.gbdialog/{tool}.bas`, then wait for DriveMonitor to recompile (automatic on file change, in-memory only, no local `.ast` cache). Test in browser at `http://localhost:3000/{botname}`.
|
||||||
|
|
||||||
|
Common BASIC errors: `=== is not a valid operator` means you used JavaScript-style `===` — replace with `==` or use `--` for string separators. `Syntax error` means bad BASIC syntax — check parentheses and commas. `Tool execution failed` means a runtime error — check logs for stack trace.
|
||||||
|
|
||||||
|
For Playwright testing, navigate to `http://localhost:3000/<botname>`, snapshot to verify welcome message and suggestion buttons including Portuguese accents, click a suggestion, wait 3–5 seconds, snapshot, fill data, submit, then verify DB records and backend logs. If the browser hangs, run `pkill -9 -f brave; pkill -9 -f chrome; pkill -9 -f chromium`, wait 3 seconds, and navigate again. The chat window may overlap other apps — click the middle (restore) button to minimize it or navigate directly via URL.
|
||||||
|
|
||||||
|
WhatsApp routing is global — one number serves all bots, with routing determined by the `whatsapp-id` key in each bot's `config.csv`. The bot name is sent as the first message to route correctly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Bot Scripts Architecture
|
||||||
|
|
||||||
|
`start.bas` is the entry point executed on WebSocket connect and on the first user message (once per session). It loads suggestion buttons via `ADD_SUGGESTION_TOOL` and marks the session in Redis to prevent re-runs. `{tool}.bas` files implement individual tools (e.g. `detecta.bas`). `tables.bas` is a special file — never call it with `CALL`; it is parsed automatically at compile time by `process_table_definitions()` and its table definitions are synced to the database via `sync_bot_tables()`. `init_folha.bas` handles initialization for specific features.
|
||||||
|
|
||||||
|
The `CALL` keyword can invoke in-memory procedures or `.bas` scripts by name. If the target is not in memory, botserver looks for `{name}.bas` in the bot's gbdialog folder in Drive. The `DETECT` keyword analyzes a database table for anomalies: it requires the table to exist (defined in `tables.bas`) and calls the BotModels API at `/api/anomaly/detect`.
|
||||||
|
|
||||||
|
Tool buttons use `MessageType::TOOL_EXEC` (id 6). When the frontend sends `message_type: 6` via WebSocket, the backend executes the named tool directly in `stream_response()`, bypassing KB injection and LLM entirely. The result appears in chat without any "/tool" prefix text. Other message types are: 0 EXTERNAL, 1 USER, 2 BOT_RESPONSE, 3 CONTINUE, 4 SUGGESTION, 5 CONTEXT_CHANGE.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Submodule Push Rule — Mandatory
|
||||||
|
|
||||||
|
Every time you push the main repo, you must also push all submodules. CI builds based on submodule commits — if a submodule is not pushed, CI deploys old code. Always push botserver, botui, and botlib to both `origin` and `alm` remotes before or alongside the main repo push.
|
||||||
|
|
||||||
|
The deploy workflow is: push to ALM → CI triggers on alm-ci → builds inside system container via SSH (to match glibc 2.36 on Debian 12 Bookworm, not the CI runner's glibc 2.41) → deploys binary → service auto-restarts. Verify by checking service status and logs about 10 minutes after pushing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Zitadel Setup (Directory Service)
|
||||||
|
|
||||||
|
Zitadel runs in the `directory` container on port 8080 internally. External port 9000 is forwarded to it via iptables NAT on the system container. The database is `PROD-DIRECTORY` on the `tables` container. The PAT file is at `/opt/gbo/conf/directory/admin-pat.txt` on the directory container. Admin credentials are username `admin`, password `Admin123!`. Current version is Zitadel v4.13.1. **Known bug**: Web console UI will return 404 for environment.json when accessed via reverse proxy public domain. Use http://<host-ip>:9000/ui/console for administrative interface instead.
|
||||||
|
|
||||||
|
To reinstall: drop and recreate `PROD-DIRECTORY` on the tables container, write the init YAML to `/opt/gbo/conf/directory/zitadel-init-steps.yaml` (defining org name, admin user, and PAT expiry), then start Zitadel with env vars for the PostgreSQL host/port/database/credentials, `ZITADEL_EXTERNALSECURE=false`, `ZITADEL_EXTERNALDOMAIN=<directory-ip>`, `ZITADEL_EXTERNALPORT=9000`, and `ZITADEL_TLS_ENABLED=false`. Pass `--masterkey MasterkeyNeedsToHave32Characters`, `--tlsMode disabled`, and `--steps <yaml-path>`. Bootstrap takes about 90 seconds; verify with `curl -sf http://localhost:8080/debug/healthz`.
|
||||||
|
|
||||||
|
Key API endpoints: Use **v2 API endpoints** for all operations: `POST /v2/organizations/{org_id}/domains` to add domains, `POST /v2/users/new` to create users, `POST /oauth/v2/token` for access tokens, `GET /debug/healthz` for health. When calling externally via port 9000, include `Host: <directory-ip>` header. The v1 Management API is deprecated and not functional in this version.
|
||||||
|
|
||||||
|
|
||||||
|
## Frontend Standards & Performance
|
||||||
|
|
||||||
|
HTMX-first: the server returns HTML fragments, not JSON. Use `hx-get`, `hx-post`, `hx-target`, `hx-swap`, and WebSocket via htmx-ws. All assets must be local — no CDN links.
|
||||||
|
|
||||||
|
Release profile must use `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, and `panic = "abort"`. Use `default-features = false` and opt into only needed features. Run `cargo tree --duplicates`, `cargo machete`, and `cargo audit` weekly.
|
||||||
|
|
||||||
|
Testing: unit tests live in per-crate `tests/` folders or `#[cfg(test)]` modules, run with `cargo test -p <crate>`. Integration tests live in `bottest/`, run with `cargo test -p bottest`. Aim for 80%+ coverage on critical paths; all error paths and security guards must be tested.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Core Directives Summary
|
||||||
|
|
||||||
|
Fix offline first — read all errors before compiling again. Batch by file — fix all errors in a file at once and write once. Verify last — only run `cargo check` after all fixes are applied. Delete dead code — never keep unused code. Git workflow — always push to all repositories (origin and alm). Target zero warnings and zero errors — loop until clean.
|
||||||
11600
Cargo.lock
generated
Normal file
11600
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -9,6 +9,7 @@ members = [
|
||||||
"bottest",
|
"bottest",
|
||||||
"botui",
|
"botui",
|
||||||
]
|
]
|
||||||
|
exclude = ["backup-to-s3"]
|
||||||
|
|
||||||
[workspace.lints.rust]
|
[workspace.lints.rust]
|
||||||
|
|
||||||
|
|
@ -109,6 +110,7 @@ url = "2.5"
|
||||||
dirs = "5.0"
|
dirs = "5.0"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
walkdir = "2.5.0"
|
walkdir = "2.5.0"
|
||||||
|
notify = "8.0"
|
||||||
|
|
||||||
# ─── COMPRESSION / ARCHIVES ───
|
# ─── COMPRESSION / ARCHIVES ───
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
|
|
@ -174,7 +176,7 @@ indicatif = "0.18.0"
|
||||||
|
|
||||||
# ─── MEMORY ALLOCATOR ───
|
# ─── MEMORY ALLOCATOR ───
|
||||||
tikv-jemallocator = "0.6"
|
tikv-jemallocator = "0.6"
|
||||||
tikv-jemalloc-ctl = { version = "0.6", default-features = false }
|
tikv-jemalloc-ctl = { version = "0.6", default-features = false, features = ["stats"] }
|
||||||
|
|
||||||
# ─── SECRETS / VAULT ───
|
# ─── SECRETS / VAULT ───
|
||||||
vaultrs = "0.7"
|
vaultrs = "0.7"
|
||||||
|
|
@ -196,7 +198,7 @@ csv = "1.3"
|
||||||
tonic = { version = "0.14.2", default-features = false }
|
tonic = { version = "0.14.2", default-features = false }
|
||||||
|
|
||||||
# ─── STATIC FILES ───
|
# ─── STATIC FILES ───
|
||||||
rust-embed = "8.5"
|
rust-embed = { version = "8.5", features = ["interpolate-folder-path"] }
|
||||||
mime_guess = "2.0"
|
mime_guess = "2.0"
|
||||||
|
|
||||||
# ─── TAURI (Desktop/Mobile) ───
|
# ─── TAURI (Desktop/Mobile) ───
|
||||||
|
|
|
||||||
|
|
@ -1,204 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# DEPENDENCIES-DEV.sh - Development Dependencies for General Bots
|
|
||||||
#
|
|
||||||
# This script installs additional packages needed for BUILDING botserver from source.
|
|
||||||
# Only install these if you plan to compile the code yourself.
|
|
||||||
#
|
|
||||||
# Usage: sudo ./DEPENDENCIES-DEV.sh
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Colors
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
NC='\033[0m'
|
|
||||||
|
|
||||||
echo -e "${GREEN}========================================${NC}"
|
|
||||||
echo -e "${GREEN} General Bots Development Dependencies${NC}"
|
|
||||||
echo -e "${GREEN}========================================${NC}"
|
|
||||||
|
|
||||||
# Check root
|
|
||||||
if [ "$EUID" -ne 0 ]; then
|
|
||||||
echo -e "${RED}Error: Run as root (use sudo)${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Detect OS
|
|
||||||
if [ -f /etc/os-release ]; then
|
|
||||||
. /etc/os-release
|
|
||||||
OS=$ID
|
|
||||||
else
|
|
||||||
echo -e "${RED}Error: Cannot detect OS${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "${YELLOW}OS: $OS${NC}"
|
|
||||||
|
|
||||||
install_debian_ubuntu() {
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y \
|
|
||||||
build-essential \
|
|
||||||
gcc \
|
|
||||||
g++ \
|
|
||||||
clang \
|
|
||||||
llvm-dev \
|
|
||||||
libclang-dev \
|
|
||||||
cmake \
|
|
||||||
make \
|
|
||||||
git \
|
|
||||||
pkg-config \
|
|
||||||
libssl-dev \
|
|
||||||
libpq-dev \
|
|
||||||
liblzma-dev \
|
|
||||||
zlib1g-dev \
|
|
||||||
libabseil-dev \
|
|
||||||
protobuf-compiler \
|
|
||||||
libprotobuf-dev \
|
|
||||||
automake \
|
|
||||||
bison \
|
|
||||||
flex \
|
|
||||||
gperf \
|
|
||||||
libtool \
|
|
||||||
m4 \
|
|
||||||
nasm \
|
|
||||||
python3 \
|
|
||||||
python3-pip \
|
|
||||||
nodejs \
|
|
||||||
npm
|
|
||||||
|
|
||||||
# Cross-compilation toolchains
|
|
||||||
apt-get install -y \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
gcc-x86-64-linux-gnu || true
|
|
||||||
}
|
|
||||||
|
|
||||||
install_fedora_rhel() {
|
|
||||||
dnf groupinstall -y "Development Tools"
|
|
||||||
dnf install -y \
|
|
||||||
gcc \
|
|
||||||
gcc-c++ \
|
|
||||||
clang \
|
|
||||||
llvm-devel \
|
|
||||||
clang-devel \
|
|
||||||
cmake \
|
|
||||||
make \
|
|
||||||
git \
|
|
||||||
pkgconf-devel \
|
|
||||||
openssl-devel \
|
|
||||||
libpq-devel \
|
|
||||||
xz-devel \
|
|
||||||
zlib-devel \
|
|
||||||
abseil-cpp-devel \
|
|
||||||
protobuf-compiler \
|
|
||||||
protobuf-devel \
|
|
||||||
automake \
|
|
||||||
bison \
|
|
||||||
flex \
|
|
||||||
gperf \
|
|
||||||
libtool \
|
|
||||||
m4 \
|
|
||||||
nasm \
|
|
||||||
python3 \
|
|
||||||
python3-pip \
|
|
||||||
nodejs \
|
|
||||||
npm
|
|
||||||
}
|
|
||||||
|
|
||||||
install_arch() {
|
|
||||||
pacman -Sy --noconfirm \
|
|
||||||
base-devel \
|
|
||||||
gcc \
|
|
||||||
clang \
|
|
||||||
llvm \
|
|
||||||
cmake \
|
|
||||||
make \
|
|
||||||
git \
|
|
||||||
pkgconf \
|
|
||||||
openssl \
|
|
||||||
postgresql-libs \
|
|
||||||
xz \
|
|
||||||
zlib \
|
|
||||||
abseil-cpp \
|
|
||||||
protobuf \
|
|
||||||
automake \
|
|
||||||
bison \
|
|
||||||
flex \
|
|
||||||
gperf \
|
|
||||||
libtool \
|
|
||||||
m4 \
|
|
||||||
nasm \
|
|
||||||
python \
|
|
||||||
python-pip \
|
|
||||||
nodejs \
|
|
||||||
npm
|
|
||||||
}
|
|
||||||
|
|
||||||
install_alpine() {
|
|
||||||
apk add --no-cache \
|
|
||||||
build-base \
|
|
||||||
gcc \
|
|
||||||
g++ \
|
|
||||||
clang \
|
|
||||||
llvm-dev \
|
|
||||||
clang-dev \
|
|
||||||
cmake \
|
|
||||||
make \
|
|
||||||
git \
|
|
||||||
pkgconf-dev \
|
|
||||||
openssl-dev \
|
|
||||||
postgresql-dev \
|
|
||||||
xz-dev \
|
|
||||||
zlib-dev \
|
|
||||||
abseil-cpp-dev \
|
|
||||||
protobuf-dev \
|
|
||||||
protoc \
|
|
||||||
automake \
|
|
||||||
bison \
|
|
||||||
flex \
|
|
||||||
gperf \
|
|
||||||
libtool \
|
|
||||||
m4 \
|
|
||||||
nasm \
|
|
||||||
python3 \
|
|
||||||
py3-pip \
|
|
||||||
nodejs \
|
|
||||||
npm
|
|
||||||
}
|
|
||||||
|
|
||||||
case $OS in
|
|
||||||
ubuntu|debian|linuxmint|pop)
|
|
||||||
install_debian_ubuntu
|
|
||||||
;;
|
|
||||||
fedora|rhel|centos|rocky|almalinux)
|
|
||||||
install_fedora_rhel
|
|
||||||
;;
|
|
||||||
arch|manjaro)
|
|
||||||
install_arch
|
|
||||||
;;
|
|
||||||
alpine)
|
|
||||||
install_alpine
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo -e "${RED}Unsupported OS: $OS${NC}"
|
|
||||||
echo "Required development packages:"
|
|
||||||
echo " - build-essential/base-devel"
|
|
||||||
echo " - gcc, g++, clang"
|
|
||||||
echo " - cmake, make, git"
|
|
||||||
echo " - Development headers for:"
|
|
||||||
echo " - OpenSSL, PostgreSQL, XZ, zlib"
|
|
||||||
echo " - Abseil, Protobuf, LLVM"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo -e "${GREEN}Development dependencies installed!${NC}"
|
|
||||||
echo ""
|
|
||||||
echo "Install Rust if not already installed:"
|
|
||||||
echo " curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh"
|
|
||||||
echo ""
|
|
||||||
echo "Then build with:"
|
|
||||||
echo " cargo build --release"
|
|
||||||
77
DEPENDENCIES.ps1
Normal file
77
DEPENDENCIES.ps1
Normal file
|
|
@ -0,0 +1,77 @@
|
||||||
|
<#
|
||||||
|
.SYNOPSIS
|
||||||
|
Installs runtime dependencies for General Bots on Windows.
|
||||||
|
|
||||||
|
.DESCRIPTION
|
||||||
|
This script downloads and configures the system libraries required to build
|
||||||
|
and run BotServer on Windows. It downloads PostgreSQL binaries (for libpq)
|
||||||
|
and sets the PQ_LIB_DIR environment variable permanently.
|
||||||
|
|
||||||
|
.EXAMPLE
|
||||||
|
PS> .\DEPENDENCIES.ps1
|
||||||
|
#>
|
||||||
|
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
|
||||||
|
# ─── COLORS ───
|
||||||
|
function Write-Step { param($msg) Write-Host " * $msg" -ForegroundColor Green }
|
||||||
|
function Write-Warn { param($msg) Write-Host " ! $msg" -ForegroundColor Yellow }
|
||||||
|
function Write-Err { param($msg) Write-Host " x $msg" -ForegroundColor Red }
|
||||||
|
|
||||||
|
Write-Host "========================================" -ForegroundColor Green
|
||||||
|
Write-Host " General Bots Runtime Dependencies" -ForegroundColor Green
|
||||||
|
Write-Host " (Windows)" -ForegroundColor Green
|
||||||
|
Write-Host "========================================" -ForegroundColor Green
|
||||||
|
Write-Host ""
|
||||||
|
|
||||||
|
# ─── PostgreSQL binaries (libpq.lib for Diesel ORM) ───
|
||||||
|
$PgsqlDir = "C:\pgsql\pgsql"
|
||||||
|
$PgsqlLib = "$PgsqlDir\lib\libpq.lib"
|
||||||
|
$PgsqlZipUrl = "https://get.enterprisedb.com/postgresql/postgresql-17.4-1-windows-x64-binaries.zip"
|
||||||
|
$PgsqlZip = "$env:TEMP\pgsql.zip"
|
||||||
|
|
||||||
|
if (Test-Path $PgsqlLib) {
|
||||||
|
Write-Step "PostgreSQL binaries already present at $PgsqlDir"
|
||||||
|
} else {
|
||||||
|
Write-Host "`nDownloading PostgreSQL binaries..." -ForegroundColor Cyan
|
||||||
|
Write-Host " URL: $PgsqlZipUrl"
|
||||||
|
Write-Host " This may take a few minutes (~300MB)...`n"
|
||||||
|
|
||||||
|
Invoke-WebRequest -Uri $PgsqlZipUrl -OutFile $PgsqlZip -UseBasicParsing
|
||||||
|
|
||||||
|
Write-Host "Extracting to C:\pgsql ..."
|
||||||
|
if (Test-Path "C:\pgsql") { Remove-Item "C:\pgsql" -Recurse -Force }
|
||||||
|
Expand-Archive -Path $PgsqlZip -DestinationPath "C:\pgsql" -Force
|
||||||
|
Remove-Item $PgsqlZip -Force -ErrorAction SilentlyContinue
|
||||||
|
|
||||||
|
if (Test-Path $PgsqlLib) {
|
||||||
|
Write-Step "PostgreSQL binaries installed successfully."
|
||||||
|
} else {
|
||||||
|
Write-Err "Failed to find libpq.lib after extraction!"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set PQ_LIB_DIR permanently for the current user
|
||||||
|
$CurrentPqDir = [System.Environment]::GetEnvironmentVariable("PQ_LIB_DIR", "User")
|
||||||
|
if ($CurrentPqDir -ne "$PgsqlDir\lib") {
|
||||||
|
[System.Environment]::SetEnvironmentVariable("PQ_LIB_DIR", "$PgsqlDir\lib", "User")
|
||||||
|
$env:PQ_LIB_DIR = "$PgsqlDir\lib"
|
||||||
|
Write-Step "PQ_LIB_DIR set to '$PgsqlDir\lib' (User environment variable)"
|
||||||
|
} else {
|
||||||
|
Write-Step "PQ_LIB_DIR already configured."
|
||||||
|
}
|
||||||
|
|
||||||
|
# ─── Summary ───
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "========================================" -ForegroundColor Green
|
||||||
|
Write-Host " Dependencies installed!" -ForegroundColor Green
|
||||||
|
Write-Host "========================================" -ForegroundColor Green
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "You can now build and run:" -ForegroundColor Cyan
|
||||||
|
Write-Host " cargo build -p botserver"
|
||||||
|
Write-Host " cargo build -p botui"
|
||||||
|
Write-Host " .\restart.ps1"
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host "NOTE: If this is the first time, restart your terminal" -ForegroundColor Yellow
|
||||||
|
Write-Host " so PQ_LIB_DIR takes effect." -ForegroundColor Yellow
|
||||||
|
|
@ -1,44 +1,44 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# DEPENDENCIES.sh - Runtime Dependencies for General Bots
|
# DEPENDENCIES.sh - Runtime Dependencies for General Bots
|
||||||
#
|
#
|
||||||
# This script installs all system packages required to RUN botserver binary.
|
# This script installs all system packages required to RUN botserver binary.
|
||||||
# These are the minimal dependencies needed for production deployment.
|
# These are the minimal dependencies needed for production deployment.
|
||||||
#
|
#
|
||||||
# Usage: sudo ./DEPENDENCIES.sh
|
# Usage: sudo ./DEPENDENCIES.sh
|
||||||
#
|
#
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Colors
|
# Colors
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
NC='\033[0m'
|
NC='\033[0m'
|
||||||
|
|
||||||
echo -e "${GREEN}========================================${NC}"
|
echo -e "${GREEN}========================================${NC}"
|
||||||
echo -e "${GREEN} General Bots Runtime Dependencies${NC}"
|
echo -e "${GREEN} General Bots Runtime Dependencies${NC}"
|
||||||
echo -e "${GREEN}========================================${NC}"
|
echo -e "${GREEN}========================================${NC}"
|
||||||
|
|
||||||
# Check root
|
# Check root
|
||||||
if [ "$EUID" -ne 0 ]; then
|
if [ "$EUID" -ne 0 ]; then
|
||||||
echo -e "${RED}Error: Run as root (use sudo)${NC}"
|
echo -e "${RED}Error: Run as root (use sudo)${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Detect OS
|
# Detect OS
|
||||||
if [ -f /etc/os-release ]; then
|
if [ -f /etc/os-release ]; then
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
OS=$ID
|
OS=$ID
|
||||||
else
|
else
|
||||||
echo -e "${RED}Error: Cannot detect OS${NC}"
|
echo -e "${RED}Error: Cannot detect OS${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${YELLOW}OS: $OS${NC}"
|
echo -e "${YELLOW}OS: $OS${NC}"
|
||||||
|
|
||||||
|
install_debian_ubuntu() {
|
||||||
|
|
||||||
install_debian_ubuntu() {
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y \
|
apt-get install -y \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
libssl3 \
|
libssl3 \
|
||||||
|
|
@ -47,21 +47,14 @@ install_debian_ubuntu() {
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
wget \
|
wget \
|
||||||
libabseil20210324 \
|
|
||||||
libclang1 \
|
libclang1 \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
snapd
|
snapd
|
||||||
|
|
||||||
# LXC for containers
|
|
||||||
snap install lxd || apt-get install -y lxd || true
|
|
||||||
|
|
||||||
# Initialize LXD
|
}
|
||||||
if command -v lxd &> /dev/null && ! lxc list &> /dev/null 2>&1; then
|
|
||||||
lxd init --auto || true
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
install_fedora_rhel() {
|
install_fedora_rhel() {
|
||||||
dnf install -y \
|
dnf install -y \
|
||||||
libpq \
|
libpq \
|
||||||
openssl-libs \
|
openssl-libs \
|
||||||
|
|
@ -75,9 +68,9 @@ install_fedora_rhel() {
|
||||||
pkgconf-pkg-config \
|
pkgconf-pkg-config \
|
||||||
lxc \
|
lxc \
|
||||||
lxc-templates
|
lxc-templates
|
||||||
}
|
}
|
||||||
|
|
||||||
install_arch() {
|
install_arch() {
|
||||||
pacman -Sy --noconfirm \
|
pacman -Sy --noconfirm \
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
openssl \
|
openssl \
|
||||||
|
|
@ -90,9 +83,9 @@ install_arch() {
|
||||||
clang \
|
clang \
|
||||||
pkgconf \
|
pkgconf \
|
||||||
lxc
|
lxc
|
||||||
}
|
}
|
||||||
|
|
||||||
install_alpine() {
|
install_alpine() {
|
||||||
apk add --no-cache \
|
apk add --no-cache \
|
||||||
libpq \
|
libpq \
|
||||||
openssl \
|
openssl \
|
||||||
|
|
@ -105,9 +98,9 @@ install_alpine() {
|
||||||
clang \
|
clang \
|
||||||
pkgconf \
|
pkgconf \
|
||||||
lxc
|
lxc
|
||||||
}
|
}
|
||||||
|
|
||||||
case $OS in
|
case $OS in
|
||||||
ubuntu|debian|linuxmint|pop)
|
ubuntu|debian|linuxmint|pop)
|
||||||
install_debian_ubuntu
|
install_debian_ubuntu
|
||||||
;;
|
;;
|
||||||
|
|
@ -132,9 +125,9 @@ case $OS in
|
||||||
echo " - LXC (containers)"
|
echo " - LXC (containers)"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo -e "${GREEN}Runtime dependencies installed!${NC}"
|
echo -e "${GREEN}Runtime dependencies installed!${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
echo "You can now run:"
|
echo "You can now run:"
|
||||||
echo " ./botserver"
|
echo " ./botserver"
|
||||||
|
|
|
||||||
127
PROD.md
Normal file
127
PROD.md
Normal file
|
|
@ -0,0 +1,127 @@
|
||||||
|
# Production Environment Guide (Compact)
|
||||||
|
|
||||||
|
## CRITICAL RULES — READ FIRST
|
||||||
|
|
||||||
|
NEVER INCLUDE HERE CREDENTIALS OR COMPANY INFORMATION, THIS IS COMPANY AGNOSTIC.
|
||||||
|
Always manage services with `systemctl` inside the `system` Incus container. Never run `/opt/gbo/bin/botserver` or `/opt/gbo/bin/botui` directly — they will fail because they won't load the `.env` file containing Vault credentials and paths. The correct commands are `sudo incus exec system -- systemctl start|stop|restart|status botserver` and the same for `ui`. Systemctl handles environment loading, auto-restart, logging, and dependencies.
|
||||||
|
|
||||||
|
Never push secrets (API keys, passwords, tokens) to git. Never commit `init.json` (it contains Vault unseal keys). All secrets must come from Vault — only `VAULT_*` variables are allowed in `.env`. Never deploy manually via scp or ssh; always use CI/CD. Always push all submodules (botserver, botui, botlib) before or alongside the main repo. Always ask before pushing to ALM.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Infrastructure Overview
|
||||||
|
|
||||||
|
The host machine is `PROD-GBO1`, accessed via `ssh user@<hostname>`, running Incus (an LXD fork) as hypervisor. All services run inside named Incus containers. You enter containers with `sudo incus exec <container> -- <command>` and list them with `sudo incus list`.
|
||||||
|
|
||||||
|
The containers and their roles are: `system` runs botserver on port 5858 and botui on port 5859; `alm-ci` runs the Forgejo Actions CI runner; `alm` hosts the Forgejo git server; `tables` runs PostgreSQL on port 5432; `cache` runs Valkey/Redis on port 6379; `drive` runs MinIO object storage on port 9100; `vault` runs HashiCorp Vault on port 8200; `vector` runs Qdrant on port 6333.
|
||||||
|
|
||||||
|
Externally, botserver is reachable at `https://system.example.com` and botui at `https://chat.example.com`. Internally, botui's `BOTSERVER_URL` must be `http://localhost:5858` — never the external HTTPS URL, because the Rust proxy runs server-side and needs direct localhost access.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Services Detail
|
||||||
|
|
||||||
|
Botserver runs as user `gbuser`, binary at `/opt/gbo/bin/botserver`, logs at `/opt/gbo/logs/out.log` and `/opt/gbo/logs/err.log`, systemd unit at `/etc/systemd/system/botserver.service`, env loaded from `/opt/gbo/bin/.env`. Bot BASIC scripts live under `/opt/gbo/data/<botname>.gbai/<botname>.gbdialog/*.bas`; compiled AST cache goes to `/opt/gbo/work/`.
|
||||||
|
|
||||||
|
The directory service runs Zitadel as user `root`, binary at `/opt/gbo/bin/zitadel`, logs at `/opt/gbo/logs/zitadel.log`, systemd unit at `/etc/systemd/system/directory.service`, and loads environment from the service configuration. Zitadel provides identity management and OAuth2 services for the platform.
|
||||||
|
|
||||||
|
Internally, Zitadel listens on port 8080 within the directory container. For external access:
|
||||||
|
- Via public domain (HTTPS): `https://login.example.com` (configured through proxy container)
|
||||||
|
- Via host IP (HTTP): `http://<host-ip>:9000` (direct container port forwarding)
|
||||||
|
- Via container IP (HTTP): `http://<directory-container-ip>:9000` (direct container access)
|
||||||
|
Access the Zitadel console at `https://login.example.com/ui/console` with admin credentials. Zitadel implements v1 Management API (deprecated) and v2 Organization/User services. Always use the v2 endpoints under `/v2/organizations` and `/v2/users` for all operations.
|
||||||
|
|
||||||
|
The botserver bootstrap also manages: Vault (secrets), PostgreSQL (database), Valkey (cache, password auth), MinIO (object storage), Zitadel (identity provider), and llama.cpp (LLM).
|
||||||
|
To obtain a PAT for Zitadel API access, check /opt/gbo/conf/directory/admin-pat.txt in the directory container. Use it with curl by setting the Authorization header: `Authorization: Bearer $(cat /opt/gbo/conf/directory/admin-pat.txt)` and include `-H \"Host: <IP> \"` for correct host resolution (replace with your directory container IP).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Operations
|
||||||
|
|
||||||
|
**Check status:** `sudo incus exec system -- systemctl status botserver --no-pager` (same for `ui`). To check process existence: `sudo incus exec system -- pgrep -f botserver`.
|
||||||
|
|
||||||
|
**View logs:** For systemd journal: `sudo incus exec system -- journalctl -u botserver --no-pager -n 50`. For application logs: `sudo incus exec system -- tail -50 /opt/gbo/logs/out.log` or `err.log`. For live tail: `sudo incus exec system -- tail -f /opt/gbo/logs/out.log`.
|
||||||
|
|
||||||
|
**Restart:** `sudo incus exec system -- systemctl restart botserver` and same for `ui`. Never run the binary directly.
|
||||||
|
|
||||||
|
**Emergency manual deploy:** Kill the old process with `sudo incus exec system -- killall botserver`, copy the new binary from `/opt/gbo/ci/botserver/target/debug/botserver` to `/opt/gbo/bin/botserver`, set permissions with `chmod +x` and `chown gbuser:gbuser`, then start with `systemctl start botserver`.
|
||||||
|
|
||||||
|
**Transfer bot files:** Archive locally with `tar czf /tmp/bots.tar.gz -C /opt/gbo/data <botname>.gbai`, copy to host with `scp`, then extract inside container with `sudo incus exec system -- bash -c 'tar xzf /tmp/bots.tar.gz -C /opt/gbo/data/'`. Clear compiled cache with `find /opt/gbo/data -name "*.ast" -delete` and same for `/opt/gbo/work`.
|
||||||
|
|
||||||
|
**Snapshots:** `sudo incus snapshot list system` to list, `sudo incus snapshot restore system <name>` to restore.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CI/CD Pipeline
|
||||||
|
|
||||||
|
Repositories exist on both GitHub and the internal ALM (Forgejo). The four repos are `gb` (main workspace), `botserver`, `botui`, and `botlib`. Always push submodules first (`cd botserver && git push alm main && git push origin main`), then update submodule references in the root repo and push that too.
|
||||||
|
|
||||||
|
The CI runner container (`alm-ci`) runs Debian Trixie with glibc 2.41, but the `system` container runs Debian 12 Bookworm with glibc 2.36. Binaries compiled on the CI runner are incompatible with the system container. The CI workflow (`botserver/.forgejo/workflows/botserver.yaml`) solves this by transferring source to the system container via `tar | ssh` and building there. The workflow triggers on pushes to `main`, clones repos, transfers source, builds inside system container, deploys the binary, and verifies botserver is running.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## DriveMonitor & Bot Configuration
|
||||||
|
|
||||||
|
DriveMonitor is a background service inside botserver that watches MinIO buckets and syncs changes to the local filesystem and database every 10 seconds. It monitors three directory types per bot: the `.gbdialog/` folder for BASIC scripts (downloads and recompiles on change), the `.gbot/` folder for `config.csv` (syncs to the `bot_configuration` database table), and the `.gbkb/` folder for knowledge base documents (downloads and indexes for vector search).
|
||||||
|
|
||||||
|
Bot configuration is stored in two PostgreSQL tables inside the `botserver` database. The `bot_configuration` table holds key-value pairs with columns `bot_id`, `config_key`, `config_value`, `config_type`, `is_encrypted`, and `updated_at`. The `gbot_config_sync` table tracks sync state with columns `bot_id`, `config_file_path`, `last_sync_at`, `file_hash`, and `sync_count`.
|
||||||
|
|
||||||
|
The `config.csv` format is a plain CSV with no header: each line is `key,value`, for example `llm-provider,groq` or `theme-color1,#cc0000`. DriveMonitor syncs it when the file ETag changes in MinIO, on botserver startup, or after a restart.
|
||||||
|
|
||||||
|
**Check config status:** Query `bot_configuration` via `sudo incus exec tables -- psql -h localhost -U postgres -d botserver -c "SELECT config_key, config_value FROM bot_configuration WHERE bot_id = (SELECT id FROM bots WHERE name = 'salesianos') ORDER BY config_key;"`. Check sync state via the `gbot_config_sync` table. Inspect the bucket directly with `sudo incus exec drive -- /opt/gbo/bin/mc cat local/salesianos.gbai/salesianos.gbot/config.csv`.
|
||||||
|
|
||||||
|
**Debug DriveMonitor:** Monitor live logs with `sudo incus exec system -- tail -f /opt/gbo/logs/out.log | grep -E "(DRIVE_MONITOR|check_gbot|config)"`. An empty `gbot_config_sync` table means DriveMonitor has not synced yet. If no new log entries appear after 30 seconds, the loop may be stuck — restart botserver with systemctl to clear the state.
|
||||||
|
|
||||||
|
**Common config issues:** If config.csv is missing from the bucket, create and upload it with `mc cp`. If the database shows stale values, restart botserver to force a fresh sync, or as a temporary fix update the database directly with `UPDATE bot_configuration SET config_value = 'groq', updated_at = NOW() WHERE ...`. To force a re-sync without restarting, copy config.csv over itself with `mc cp local/... local/...` to change the ETag.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MinIO (Drive) Operations
|
||||||
|
|
||||||
|
All bot files live in MinIO buckets. Use the `mc` CLI at `/opt/gbo/bin/mc` from inside the `drive` container. The bucket structure per bot is: `{bot}.gbai/` as root, `{bot}.gbai/{bot}.gbdialog/` for BASIC scripts, `{bot}.gbai/{bot}.gbot/` for config.csv, and `{bot}.gbai/{bot}.gbkb/` for knowledge base folders.
|
||||||
|
|
||||||
|
Common mc commands: `mc ls local/` lists all buckets; `mc ls local/salesianos.gbai/` lists a bucket; `mc cat local/.../start.bas` prints a file; `mc cp local/.../file /tmp/file` downloads; `mc cp /tmp/file local/.../file` uploads (this triggers DriveMonitor recompile); `mc stat local/.../config.csv` shows ETag and metadata; `mc mb local/newbot.gbai` creates a bucket; `mc rb local/oldbot.gbai` removes an empty bucket.
|
||||||
|
|
||||||
|
If mc is not found, use the full path `/opt/gbo/bin/mc`. If alias `local` is not configured, check with `mc config host list`. If MinIO is not running, check with `sudo incus exec drive -- systemctl status minio`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Vault Security Architecture
|
||||||
|
|
||||||
|
HashiCorp Vault is the single source of truth for all secrets. Botserver reads `VAULT_ADDR` and `VAULT_TOKEN` from `/opt/gbo/bin/.env` at startup, initializes a TLS/mTLS client, then reads credentials from Vault paths. If Vault is unavailable, it falls back to defaults. The `.env` file must only contain `VAULT_*` variables plus `PORT`, `DATA_DIR`, `WORK_DIR`, and `LOAD_ONLY`.
|
||||||
|
|
||||||
|
**Global Vault paths:** `gbo/tables` holds PostgreSQL credentials; `gbo/drive` holds MinIO access key and secret; `gbo/cache` holds Valkey password; `gbo/llm` holds LLM URL and API keys; `gbo/directory` holds Zitadel config; `gbo/email` holds SMTP credentials; `gbo/vectordb` holds Qdrant config; `gbo/jwt` holds JWT signing secret; `gbo/encryption` holds the master encryption key. Organization-scoped secrets follow patterns like `gbo/orgs/{org_id}/bots/{bot_id}` and tenant infrastructure uses `gbo/tenants/{tenant_id}/infrastructure`.
|
||||||
|
|
||||||
|
**Credential resolution:** For any service, botserver checks the most specific Vault path first (org+bot level), falls back to a default bot path, then falls back to the global path, and only uses environment variables as a last resort in development.
|
||||||
|
|
||||||
|
**Verify Vault health:** `sudo incus exec vault -- curl -k -sf https://localhost:8200/v1/sys/health` should return JSON with `"sealed":false`. To read a secret: set `VAULT_ADDR`, `VAULT_TOKEN`, and `VAULT_CACERT` then run `vault kv get secret/gbo/tables`. To test from the system container, use curl with `--cacert /opt/gbo/conf/system/certificates/ca/ca.crt` and `-H "X-Vault-Token: <token>"`.
|
||||||
|
|
||||||
|
**init.json** is stored at `/opt/gbo/bin/botserver-stack/conf/vault/vault-conf/init.json` and contains the root token and 5 unseal keys (3 needed to unseal). Never commit this file to git. Store it encrypted in a secure location.
|
||||||
|
|
||||||
|
**Vault troubleshooting — cannot connect:** Check that the vault container's systemd unit is running, verify the token in `.env` is not expired with `vault token lookup`, confirm the CA cert path in `.env` matches the actual file location, and test network connectivity from system to vault container. To generate a new token: `vault token create -policy="botserver" -ttl="8760h" -format=json` then update `.env` and restart botserver.
|
||||||
|
|
||||||
|
**Vault troubleshooting — secrets missing:** Run `vault kv get secret/gbo/tables` (and other paths) to check if secrets exist. If a path returns NOT FOUND, add secrets with `vault kv put secret/gbo/tables host=<ip> port=5432 database=botserver username=gbuser password=<pw>` and similar for other paths.
|
||||||
|
|
||||||
|
**Vault sealed after restart:** Run `vault operator unseal <key1>`, repeat with key2 and key3 (3 of 5 keys from init.json), then verify with `vault status`.
|
||||||
|
|
||||||
|
**TLS certificate errors:** Confirm `/opt/gbo/conf/system/certificates/ca/ca.crt` exists in the system container. If missing, copy it from the vault container using `incus file pull vault/opt/gbo/conf/vault/ca.crt /tmp/ca.crt` then place it at the expected path.
|
||||||
|
|
||||||
|
**Vault snapshots:** Stop vault, run `sudo incus snapshot create vault backup-$(date +%Y%m%d-%H%M)`, start vault. Restore with `sudo incus snapshot restore vault <name>` while vault is stopped.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting Quick Reference
|
||||||
|
|
||||||
|
**GLIBC mismatch (`GLIBC_2.39 not found`):** The binary was compiled on the CI runner (glibc 2.41) not inside the system container (glibc 2.36). The CI workflow must SSH into the system container to build. Check `botserver.yaml` to confirm this.
|
||||||
|
|
||||||
|
**botserver won't start:** Run `sudo incus exec system -- ldd /opt/gbo/bin/botserver | grep "not found"` to check for missing libraries. Run `sudo incus exec system -- timeout 10 /opt/gbo/bin/botserver 2>&1` to see startup errors. Confirm `/opt/gbo/data/` exists and is accessible.
|
||||||
|
|
||||||
|
**botui can't reach botserver:** Check that the `ui.service` systemd file has `BOTSERVER_URL=http://localhost:5858` — not the external HTTPS URL. Fix with `sed -i 's|BOTSERVER_URL=.*|BOTSERVER_URL=http://localhost:5858|'` on the service file, then `systemctl daemon-reload` and `systemctl restart ui`.
|
||||||
|
|
||||||
|
**Suggestions not showing:** Confirm bot `.bas` files exist under `/opt/gbo/data/<bot>.gbai/<bot>.gbdialog/`. Check logs for compilation errors. Clear the AST cache in `/opt/gbo/work/` and restart botserver.
|
||||||
|
|
||||||
|
**IPv6 DNS timeouts on external APIs (Groq, Cloudflare):** The container's DNS may return AAAA records without IPv6 connectivity. The container should have `IPV6=no` in its network config and `gai.conf` set appropriately. Check for `RES_OPTIONS=inet4` in `botserver.service` if issues persist.
|
||||||
|
|
||||||
|
**Logs show development paths instead of `/opt/gbo/data/`:** Botserver is using hardcoded dev paths. Check `.env` has `DATA_DIR=/opt/gbo/data/` and `WORK_DIR=/opt/gbo/work/`, verify the systemd unit has `EnvironmentFile=/opt/gbo/bin/.env`, and confirm Vault is reachable so service discovery works. Expected startup log lines include `info watcher:Watching data directory /opt/gbo/data` and `info botserver:BotServer started successfully on port 5858`.
|
||||||
|
|
||||||
|
**Migrations not running after push:** If `stat /opt/gbo/bin/botserver` shows old timestamp and `__diesel_schema_migrations` table has no new entries, CI did not rebuild. Make a trivial code change (e.g., add a comment) in botserver and push again to force rebuild.
|
||||||
383
PROMPT.md
383
PROMPT.md
|
|
@ -1,383 +0,0 @@
|
||||||
# General Bots Workspace - Master Development Guide
|
|
||||||
|
|
||||||
**Version:** 6.2.0 - DO NOT CHANGE
|
|
||||||
**Project:** General Bots Workspace (Rust Monorepo)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📁 WORKSPACE STRUCTURE
|
|
||||||
|
|
||||||
| Crate | Purpose | Port | Tech Stack |
|
|
||||||
|-------|---------|------|------------|
|
|
||||||
| **botserver** | Main API server, business logic | 8088 | Axum, Diesel, Rhai BASIC |
|
|
||||||
| **botui** | Web UI server (dev) + proxy | 3000 | Axum, HTML/HTMX/CSS |
|
|
||||||
| **botapp** | Desktop app wrapper | - | Tauri 2 |
|
|
||||||
| **botlib** | Shared library | - | Core types, errors |
|
|
||||||
| **botbook** | Documentation | - | mdBook |
|
|
||||||
| **bottest** | Integration tests | - | tokio-test |
|
|
||||||
| **botdevice** | IoT/Device support | - | Rust |
|
|
||||||
| **botmodels** | Data models visualization | - | - |
|
|
||||||
| **botplugin** | Browser extension | - | JS |
|
|
||||||
|
|
||||||
### Key Paths
|
|
||||||
- **Binary:** `target/debug/botserver`
|
|
||||||
- **Run from:** `botserver/` directory
|
|
||||||
- **Env file:** `botserver/.env`
|
|
||||||
- **Stack:** `botserver/botserver-stack/`
|
|
||||||
- **UI Files:** `botui/ui/suite/`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔥 ERROR FIXING WORKFLOW
|
|
||||||
|
|
||||||
### Mode 1: OFFLINE Batch Fix (PREFERRED)
|
|
||||||
|
|
||||||
When given error output:
|
|
||||||
|
|
||||||
```
|
|
||||||
1. Read ENTIRE error list first
|
|
||||||
2. Group errors by file
|
|
||||||
3. For EACH file with errors:
|
|
||||||
a. View file → understand context
|
|
||||||
b. Fix ALL errors in that file
|
|
||||||
c. Write once with all fixes
|
|
||||||
4. Move to next file
|
|
||||||
5. REPEAT until ALL errors addressed
|
|
||||||
6. ONLY THEN → verify with build/diagnostics
|
|
||||||
```
|
|
||||||
|
|
||||||
**NEVER run cargo build/check/clippy DURING fixing**
|
|
||||||
**Fix ALL errors OFFLINE first, verify ONCE at the end**
|
|
||||||
|
|
||||||
### Mode 2: Interactive Loop
|
|
||||||
|
|
||||||
```
|
|
||||||
LOOP UNTIL (0 warnings AND 0 errors):
|
|
||||||
1. Run diagnostics → pick file with issues
|
|
||||||
2. Read entire file
|
|
||||||
3. Fix ALL issues in that file
|
|
||||||
4. Write file once with all fixes
|
|
||||||
5. Verify with diagnostics
|
|
||||||
6. CONTINUE LOOP
|
|
||||||
END LOOP
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Error Patterns
|
|
||||||
|
|
||||||
| Error | Fix |
|
|
||||||
|-------|-----|
|
|
||||||
| `expected i64, found u64` | `value as i64` |
|
|
||||||
| `expected Option<T>, found T` | `Some(value)` |
|
|
||||||
| `expected T, found Option<T>` | `value.unwrap_or(default)` |
|
|
||||||
| `cannot multiply f32 by f64` | `f64::from(f32_val) * f64_val` |
|
|
||||||
| `no field X on type Y` | Check struct definition |
|
|
||||||
| `no variant X found` | Check enum definition |
|
|
||||||
| `function takes N arguments` | Match function signature |
|
|
||||||
| `cannot find function` | Add missing function or fix import |
|
|
||||||
| `unused variable` | Delete or use with `..` in patterns |
|
|
||||||
| `unused import` | Delete the import line |
|
|
||||||
| `cannot move out of X because borrowed` | Use scoping `{ }` to limit borrow |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🧠 MEMORY MANAGEMENT
|
|
||||||
|
|
||||||
When compilation fails due to memory issues (process "Killed"):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pkill -9 cargo; pkill -9 rustc; pkill -9 botserver
|
|
||||||
CARGO_BUILD_JOBS=1 cargo check -p botserver 2>&1 | tail -200
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📏 FILE SIZE LIMITS - MANDATORY
|
|
||||||
|
|
||||||
### Maximum 1000 Lines Per File
|
|
||||||
|
|
||||||
When a file grows beyond this limit:
|
|
||||||
|
|
||||||
1. **Identify logical groups** - Find related functions
|
|
||||||
2. **Create subdirectory module** - e.g., `handlers/`
|
|
||||||
3. **Split by responsibility:**
|
|
||||||
- `crud.rs` - Create, Read, Update, Delete
|
|
||||||
- `ai.rs` - AI/ML handlers
|
|
||||||
- `export.rs` - Export/import
|
|
||||||
- `validation.rs` - Validation
|
|
||||||
- `mod.rs` - Re-exports
|
|
||||||
4. **Keep files focused** - Single responsibility
|
|
||||||
5. **Update mod.rs** - Re-export all public items
|
|
||||||
|
|
||||||
**NEVER let a single file exceed 1000 lines - split proactively at 800 lines**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🚀 PERFORMANCE & SIZE STANDARDS
|
|
||||||
|
|
||||||
### Binary Size Optimization
|
|
||||||
- **Release Profile**: Always maintain `opt-level = "z"`, `lto = true`, `codegen-units = 1`, `strip = true`, `panic = "abort"`.
|
|
||||||
- **Dependencies**:
|
|
||||||
- Run `cargo tree --duplicates` weekly to find and resolve duplicate versions.
|
|
||||||
- Run `cargo machete` to remove unused dependencies.
|
|
||||||
- Use `default-features = false` and explicitly opt-in to needed features.
|
|
||||||
|
|
||||||
### Memory Optimization
|
|
||||||
- **Strings**: Prefer `&str` over `String` where possible. Use `Cow<str>` for conditional ownership.
|
|
||||||
- **Collections**: Use `Vec::with_capacity` when size is known. Consider `SmallVec` for hot paths.
|
|
||||||
- **Allocations**: Minimize heap allocations in hot paths.
|
|
||||||
|
|
||||||
### Linting & Code Quality
|
|
||||||
- **Clippy**: Code MUST pass `cargo clippy --all-targets --all-features` with **0 warnings**.
|
|
||||||
- **No Allow**: Do not use `#[allow(clippy::...)]` unless absolutely necessary and documented. Fix the underlying issue.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔐 SECURITY DIRECTIVES - MANDATORY
|
|
||||||
|
|
||||||
### Error Handling - NO PANICS IN PRODUCTION
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// ❌ FORBIDDEN
|
|
||||||
value.unwrap()
|
|
||||||
value.expect("message")
|
|
||||||
panic!("error")
|
|
||||||
todo!()
|
|
||||||
unimplemented!()
|
|
||||||
|
|
||||||
// ✅ REQUIRED
|
|
||||||
value?
|
|
||||||
value.ok_or_else(|| Error::NotFound)?
|
|
||||||
value.unwrap_or_default()
|
|
||||||
value.unwrap_or_else(|e| { log::error!("{}", e); default })
|
|
||||||
if let Some(v) = value { ... }
|
|
||||||
match value { Ok(v) => v, Err(e) => return Err(e.into()) }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Command Execution - USE SafeCommand
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// ❌ FORBIDDEN
|
|
||||||
Command::new("some_command").arg(user_input).output()
|
|
||||||
|
|
||||||
// ✅ REQUIRED
|
|
||||||
use crate::security::command_guard::SafeCommand;
|
|
||||||
SafeCommand::new("allowed_command")?
|
|
||||||
.arg("safe_arg")?
|
|
||||||
.execute()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Error Responses - USE ErrorSanitizer
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// ❌ FORBIDDEN
|
|
||||||
Json(json!({ "error": e.to_string() }))
|
|
||||||
format!("Database error: {}", e)
|
|
||||||
|
|
||||||
// ✅ REQUIRED
|
|
||||||
use crate::security::error_sanitizer::log_and_sanitize;
|
|
||||||
let sanitized = log_and_sanitize(&e, "context", None);
|
|
||||||
(StatusCode::INTERNAL_SERVER_ERROR, sanitized)
|
|
||||||
```
|
|
||||||
|
|
||||||
### SQL - USE sql_guard
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// ❌ FORBIDDEN
|
|
||||||
format!("SELECT * FROM {}", user_table)
|
|
||||||
|
|
||||||
// ✅ REQUIRED
|
|
||||||
use crate::security::sql_guard::{sanitize_identifier, validate_table_name};
|
|
||||||
let safe_table = sanitize_identifier(&user_table);
|
|
||||||
validate_table_name(&safe_table)?;
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## ❌ ABSOLUTE PROHIBITIONS
|
|
||||||
|
|
||||||
```
|
|
||||||
❌ NEVER use .unwrap() or .expect() in production code (tests OK)
|
|
||||||
❌ NEVER use panic!(), todo!(), unimplemented!()
|
|
||||||
❌ NEVER use Command::new() directly - use SafeCommand
|
|
||||||
❌ NEVER return raw error strings to HTTP clients
|
|
||||||
❌ NEVER use #[allow()] in source code - FIX the code instead
|
|
||||||
❌ NEVER add lint exceptions to Cargo.toml - FIX the code instead
|
|
||||||
❌ NEVER use _ prefix for unused variables - DELETE or USE them
|
|
||||||
❌ NEVER leave unused imports or dead code
|
|
||||||
❌ NEVER add comments - code must be self-documenting
|
|
||||||
❌ NEVER modify Cargo.toml lints section!
|
|
||||||
❌ NEVER use CDN links - all assets must be local
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## ✅ MANDATORY CODE PATTERNS
|
|
||||||
|
|
||||||
### Use Self in Impl Blocks
|
|
||||||
```rust
|
|
||||||
impl MyStruct {
|
|
||||||
fn new() -> Self { Self { } } // ✅ Not MyStruct
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Derive Eq with PartialEq
|
|
||||||
```rust
|
|
||||||
#[derive(PartialEq, Eq)] // ✅ Always both
|
|
||||||
struct MyStruct { }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Inline Format Args
|
|
||||||
```rust
|
|
||||||
format!("Hello {name}") // ✅ Not format!("{}", name)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Combine Match Arms
|
|
||||||
```rust
|
|
||||||
match x {
|
|
||||||
A | B => do_thing(), // ✅ Combine identical arms
|
|
||||||
C => other(),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🖥️ UI Architecture (botui + botserver)
|
|
||||||
|
|
||||||
### Two Servers During Development
|
|
||||||
|
|
||||||
| Server | Port | Purpose |
|
|
||||||
|--------|------|---------|
|
|
||||||
| **botui** | 3000 | Serves UI files + proxies API to botserver |
|
|
||||||
| **botserver** | 8088 | Backend API + embedded UI fallback |
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
```
|
|
||||||
Browser → localhost:3000 → botui (serves HTML/CSS/JS)
|
|
||||||
→ /api/* proxied to botserver:8088
|
|
||||||
→ /suite/* served from botui/ui/suite/
|
|
||||||
```
|
|
||||||
|
|
||||||
### Adding New Suite Apps
|
|
||||||
|
|
||||||
1. Create folder: `botui/ui/suite/<appname>/`
|
|
||||||
2. Add to `SUITE_DIRS` in `botui/src/ui_server/mod.rs`
|
|
||||||
3. Rebuild botui: `cargo build -p botui`
|
|
||||||
4. Add menu entry in `botui/ui/suite/index.html`
|
|
||||||
|
|
||||||
### Hot Reload
|
|
||||||
|
|
||||||
- **UI files (HTML/CSS/JS)**: Edit & refresh browser (no restart)
|
|
||||||
- **botui Rust code**: Rebuild + restart botui
|
|
||||||
- **botserver Rust code**: Rebuild + restart botserver
|
|
||||||
|
|
||||||
### Production (Single Binary)
|
|
||||||
|
|
||||||
When `botui/ui/suite/` folder not found, botserver uses **embedded UI** compiled into binary via `rust-embed`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🎨 FRONTEND STANDARDS
|
|
||||||
|
|
||||||
### HTMX-First Approach
|
|
||||||
- Use HTMX to minimize JavaScript
|
|
||||||
- Server returns HTML fragments, not JSON
|
|
||||||
- Use `hx-get`, `hx-post`, `hx-target`, `hx-swap`
|
|
||||||
- WebSocket via htmx-ws extension
|
|
||||||
|
|
||||||
### Local Assets Only - NO CDN
|
|
||||||
```html
|
|
||||||
<!-- ✅ CORRECT -->
|
|
||||||
<script src="js/vendor/htmx.min.js"></script>
|
|
||||||
|
|
||||||
<!-- ❌ WRONG -->
|
|
||||||
<script src="https://unpkg.com/htmx.org@1.9.10"></script>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Vendor Libraries Location
|
|
||||||
```
|
|
||||||
ui/suite/js/vendor/
|
|
||||||
├── htmx.min.js
|
|
||||||
├── htmx-ws.js
|
|
||||||
├── marked.min.js
|
|
||||||
└── gsap.min.js
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📋 PROJECT-SPECIFIC PROMPTS
|
|
||||||
|
|
||||||
Each crate has its own PROMPT.md with specific guidelines:
|
|
||||||
|
|
||||||
| Crate | PROMPT.md Location | Focus |
|
|
||||||
|-------|-------------------|-------|
|
|
||||||
| botserver | `botserver/PROMPT.md` | API, security, Rhai BASIC |
|
|
||||||
| botui | `botui/PROMPT.md` | UI, HTMX, CSS design system |
|
|
||||||
| botapp | `botapp/PROMPT.md` | Tauri, desktop features |
|
|
||||||
| botlib | `botlib/PROMPT.md` | Shared types, errors |
|
|
||||||
| botbook | `botbook/PROMPT.md` | Documentation, mdBook |
|
|
||||||
| bottest | `bottest/PROMPT.md` | Test infrastructure |
|
|
||||||
|
|
||||||
### Special Prompts
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `botserver/src/tasks/PROMPT.md` | AutoTask LLM executor |
|
|
||||||
| `botserver/src/auto_task/APP_GENERATOR_PROMPT.md` | App generation |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🚀 STARTING DEVELOPMENT
|
|
||||||
|
|
||||||
### Start Both Servers
|
|
||||||
```bash
|
|
||||||
# Terminal 1: botserver
|
|
||||||
cd botserver && cargo run -- --noconsole
|
|
||||||
|
|
||||||
# Terminal 2: botui
|
|
||||||
cd botui && BOTSERVER_URL="http://localhost:8088" cargo run
|
|
||||||
```
|
|
||||||
|
|
||||||
### Build Commands
|
|
||||||
```bash
|
|
||||||
# Check single crate
|
|
||||||
cargo check -p botserver
|
|
||||||
|
|
||||||
# Build workspace
|
|
||||||
cargo build
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
cargo test -p bottest
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📋 CONTINUATION PROMPT
|
|
||||||
|
|
||||||
When starting a new session or continuing work:
|
|
||||||
|
|
||||||
```
|
|
||||||
Continue on gb/ workspace. Follow PROMPT.md strictly:
|
|
||||||
|
|
||||||
1. Check current state with build/diagnostics
|
|
||||||
2. Fix ALL warnings and errors - NO #[allow()] attributes
|
|
||||||
3. Delete unused code, don't suppress warnings
|
|
||||||
4. Remove unused parameters, don't prefix with _
|
|
||||||
5. Verify after each fix batch
|
|
||||||
6. Loop until 0 warnings, 0 errors
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔑 REMEMBER
|
|
||||||
|
|
||||||
- **OFFLINE FIRST** - Fix all errors from list before compiling
|
|
||||||
- **ZERO WARNINGS, ZERO ERRORS** - The only acceptable state
|
|
||||||
- **FIX, DON'T SUPPRESS** - No #[allow()], no Cargo.toml lint exceptions
|
|
||||||
- **SECURITY FIRST** - No unwrap, no raw errors, no direct commands
|
|
||||||
- **READ BEFORE FIX** - Always understand context first
|
|
||||||
- **BATCH BY FILE** - Fix ALL errors in a file at once
|
|
||||||
- **WRITE ONCE** - Single edit per file with all fixes
|
|
||||||
- **VERIFY LAST** - Only compile/diagnostics after ALL fixes
|
|
||||||
- **DELETE DEAD CODE** - Don't keep unused code around
|
|
||||||
- **Version 6.2.0** - Do not change without approval
|
|
||||||
- **GIT WORKFLOW** - ALWAYS push to ALL repositories (github, pragmatismo)
|
|
||||||
717
README.md
717
README.md
|
|
@ -1,85 +1,644 @@
|
||||||
# General Bots Workspace
|
# General Bots Workspace
|
||||||
|
|
||||||
**Version:** 6.1.0
|
## ⚠️ CRITICAL SECURITY WARNING
|
||||||
|
|
||||||
|
**NEVER CREATE FILES WITH SECRETS IN THE REPOSITORY ROOT**
|
||||||
|
|
||||||
|
Secret files MUST be placed in `/tmp/` only:
|
||||||
|
- ✅ `/tmp/vault-token-gb` - Vault root token
|
||||||
|
- ✅ `/tmp/vault-unseal-key-gb` - Vault unseal key
|
||||||
|
- ❌ `vault-unseal-keys` - FORBIDDEN (tracked by git)
|
||||||
|
- ❌ `start-and-unseal.sh` - FORBIDDEN (contains secrets)
|
||||||
|
|
||||||
|
**Files added to .gitignore:** `vault-unseal-keys`, `start-and-unseal.sh`, `vault-token-*`
|
||||||
|
|
||||||
|
**Why `/tmp/`?**
|
||||||
|
- Cleared on reboot (ephemeral)
|
||||||
|
- Not tracked by git
|
||||||
|
- Standard Unix security practice
|
||||||
|
- Prevents accidental commits
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
**Version:** 6.3.0
|
||||||
**Type:** Rust Workspace (Monorepo with Independent Subproject Repos)
|
**Type:** Rust Workspace (Monorepo with Independent Subproject Repos)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Structure
|
## Overview
|
||||||
|
|
||||||
This workspace contains multiple General Bots projects:
|
General Bots is a comprehensive automation platform built with Rust, providing a unified workspace for building AI-powered bots, web interfaces, desktop applications, and integration tools. The workspace follows a modular architecture with independent subprojects that can be developed and deployed separately while sharing common libraries and standards.
|
||||||
|
|
||||||
|
For comprehensive documentation, see **[docs.pragmatismo.com.br](https://docs.pragmatismo.com.br)** or the **[BotBook](./botbook)** for detailed guides, API references, and tutorials.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📁 Workspace Structure
|
||||||
|
|
||||||
|
| Crate | Purpose | Port | Tech Stack |
|
||||||
|
|-------|---------|------|------------|
|
||||||
|
| **botserver** | Main API server, business logic | 9000 | Axum, Diesel, Rhai BASIC |
|
||||||
|
| **botui** | Web UI server (dev) + proxy | 3000 | Axum, HTML/HTMX/CSS |
|
||||||
|
| **botapp** | Desktop app wrapper | - | Tauri 2 |
|
||||||
|
| **botlib** | Shared library | - | Core types, errors |
|
||||||
|
| **botbook** | Documentation | - | mdBook |
|
||||||
|
| **bottest** | Integration tests | - | tokio-test |
|
||||||
|
| **botdevice** | IoT/Device support | - | Rust |
|
||||||
|
| **botmodels** | Data models visualization | - | - |
|
||||||
|
| **botplugin** | Browser extension | - | JS |
|
||||||
|
|
||||||
|
### Key Paths
|
||||||
|
- **Binary:** `target/debug/botserver`
|
||||||
|
- **Run from:** `botserver/` directory
|
||||||
|
- **Env file:** `botserver/.env`
|
||||||
|
- **Stack:** `botserver-stack/`
|
||||||
|
- **UI Files:** `botui/ui/suite/`
|
||||||
|
- **Local Bot Data:** `/opt/gbo/data/` (place `.gbai` packages here)
|
||||||
|
|
||||||
|
### Local Bot Data Directory
|
||||||
|
|
||||||
|
Place local bot packages in `/opt/gbo/data/` for automatic loading and monitoring:
|
||||||
|
|
||||||
|
**Directory Structure:**
|
||||||
```
|
```
|
||||||
gb/
|
/opt/gbo/data/
|
||||||
├── PROMPT.md ← Workspace-level development guide (READ THIS FIRST)
|
└── mybot.gbai/
|
||||||
├── Cargo.toml ← Workspace configuration
|
├── mybot.gbdialog/
|
||||||
├── README.md ← This file
|
│ ├── start.bas
|
||||||
│
|
│ └── main.bas
|
||||||
├── botapp/ ← Desktop application (Tauri)
|
└── mybot.gbot/
|
||||||
├── botserver/ ← Main server (API + business logic)
|
└── config.csv
|
||||||
├── botlib/ ← Shared library (types, utilities)
|
```
|
||||||
├── botui/ ← Web UI (HTML/CSS/JS)
|
|
||||||
├── botbook/ ← Documentation
|
**Features:**
|
||||||
├── bottest/ ← Integration tests
|
- **Auto-loading:** Bots automatically mounted on server startup
|
||||||
├── botdevice/ ← Device integration
|
- **Auto-compilation:** `.bas` files compiled to `.ast` on change
|
||||||
├── botmodels/ ← AI models
|
- **Auto-creation:** New bots automatically added to database
|
||||||
├── botplugin/ ← Plugin system
|
- **Hot-reload:** Changes trigger immediate recompilation
|
||||||
├── bottemplates/ ← Templates
|
- **Monitored by:** LocalFileMonitor and ConfigWatcher services
|
||||||
└── target/ ← Build artifacts
|
|
||||||
|
**Usage:**
|
||||||
|
1. Create bot directory structure in `/opt/gbo/data/`
|
||||||
|
2. Add `.bas` files to `<bot_name>.gbai/<bot_name>.gbdialog/`
|
||||||
|
3. Server automatically detects and loads the bot
|
||||||
|
4. Optional: Add `config.csv` for bot configuration
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🏗️ BotServer Component Architecture
|
||||||
|
|
||||||
|
### 🔧 Infrastructure Components (Auto-Managed)
|
||||||
|
|
||||||
|
BotServer automatically installs, configures, and manages all infrastructure components on first run. **DO NOT manually start these services** - BotServer handles everything.
|
||||||
|
|
||||||
|
**Automatic Service Lifecycle:**
|
||||||
|
1. **Start**: When botserver starts, it automatically launches all infrastructure components (PostgreSQL, Vault, MinIO, Valkey, Qdrant, etc.)
|
||||||
|
2. **Credentials**: BotServer retrieves all service credentials (passwords, tokens, API keys) from Vault
|
||||||
|
3. **Connection**: BotServer uses these credentials to establish secure connections to each service
|
||||||
|
4. **Query**: All database queries, cache operations, and storage requests are authenticated using Vault-managed credentials
|
||||||
|
|
||||||
|
**Credential Flow:**
|
||||||
|
```
|
||||||
|
botserver starts
|
||||||
|
↓
|
||||||
|
Launch PostgreSQL, MinIO, Valkey, Qdrant
|
||||||
|
↓
|
||||||
|
Connect to Vault
|
||||||
|
↓
|
||||||
|
Retrieve service credentials (from database)
|
||||||
|
↓
|
||||||
|
Authenticate with each service using retrieved credentials
|
||||||
|
↓
|
||||||
|
Ready to handle requests
|
||||||
|
```
|
||||||
|
|
||||||
|
| Component | Purpose | Port | Binary Location | Credentials From |
|
||||||
|
|-----------|---------|------|-----------------|------------------|
|
||||||
|
| **Vault** | Secrets management | 8200 | `botserver-stack/bin/vault/vault` | Auto-unsealed |
|
||||||
|
| **PostgreSQL** | Primary database | 5432 | `botserver-stack/bin/tables/bin/postgres` | Vault → database |
|
||||||
|
| **MinIO** | Object storage (S3-compatible) | 9000/9001 | `botserver-stack/bin/drive/minio` | Vault → database |
|
||||||
|
| **Zitadel** | Identity/Authentication | 8300 | `botserver-stack/bin/directory/zitadel` | Vault → database |
|
||||||
|
| **Qdrant** | Vector database (embeddings) | 6333 | `botserver-stack/bin/vector_db/qdrant` | Vault → database |
|
||||||
|
| **Valkey** | Cache/Queue (Redis-compatible) | 6379 | `botserver-stack/bin/cache/valkey-server` | Vault → database |
|
||||||
|
| **Llama.cpp** | Local LLM server | 8081 | `botserver-stack/bin/llm/build/bin/llama-server` | Vault → database |
|
||||||
|
|
||||||
|
### 📦 Component Installation System
|
||||||
|
|
||||||
|
Components are defined in `botserver/3rdparty.toml` and managed by the `PackageManager` (`botserver/src/core/package_manager/`):
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[components.cache]
|
||||||
|
name = "Valkey Cache (Redis-compatible)"
|
||||||
|
url = "https://github.com/valkey-io/valkey/archive/refs/tags/8.0.2.tar.gz"
|
||||||
|
filename = "valkey-8.0.2.tar.gz"
|
||||||
|
|
||||||
|
[components.llm]
|
||||||
|
name = "Llama.cpp Server"
|
||||||
|
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-ubuntu-x64.zip"
|
||||||
|
filename = "llama-b7345-bin-ubuntu-x64.zip"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Installation Flow:**
|
||||||
|
1. **Download:** Components downloaded to `botserver-installers/` (cached)
|
||||||
|
2. **Extract/Build:** Binaries placed in `botserver-stack/bin/<component>/`
|
||||||
|
3. **Configure:** Config files generated in `botserver-stack/conf/<component>/`
|
||||||
|
4. **Start:** Components started with proper TLS certificates
|
||||||
|
5. **Monitor:** Components monitored and auto-restarted if needed
|
||||||
|
|
||||||
|
**Bootstrap Process:**
|
||||||
|
- First run: Full bootstrap (downloads, installs, configures all components)
|
||||||
|
- Subsequent runs: Only starts existing components (uses cached binaries)
|
||||||
|
- Config stored in: `botserver-stack/conf/system/bootstrap.json`
|
||||||
|
|
||||||
|
### 🚀 PROPER STARTUP PROCEDURES
|
||||||
|
|
||||||
|
**❌ FORBIDDEN:**
|
||||||
|
- NEVER manually start infrastructure components (Vault, PostgreSQL, MinIO, etc.)
|
||||||
|
- NEVER run `cargo run` or `cargo build` for botserver directly without ./restart.sh
|
||||||
|
- NEVER modify botserver-stack/ files manually (use botserver API)
|
||||||
|
|
||||||
|
**✅ REQUIRED:**
|
||||||
|
|
||||||
|
**Option 1: Development (Recommended)**
|
||||||
|
```bash
|
||||||
|
./restart.sh
|
||||||
|
```
|
||||||
|
This script:
|
||||||
|
1. Kills existing processes cleanly
|
||||||
|
2. Builds botserver and botui sequentially (no race conditions)
|
||||||
|
3. Starts botserver in background with logging to `botserver.log`
|
||||||
|
4. Starts botui in background with logging to `botui.log`
|
||||||
|
5. Shows process IDs and access URLs
|
||||||
|
|
||||||
|
**Option 2: Production/Release**
|
||||||
|
```bash
|
||||||
|
# Build release binary first
|
||||||
|
cargo build --release -p botserver
|
||||||
|
|
||||||
|
# Start with release binary
|
||||||
|
RUST_LOG=info ./target/release/botserver --noconsole 2>&1 | tee botserver.log &
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 3: Using Exec (Systemd/Supervisord)**
|
||||||
|
```bash
|
||||||
|
# In systemd service or similar
|
||||||
|
ExecStart=/home/rodriguez/src/gb/target/release/botserver --noconsole
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔒 Component Communication
|
||||||
|
|
||||||
|
All components communicate through internal networks with mTLS:
|
||||||
|
- **Vault**: mTLS for secrets access
|
||||||
|
- **PostgreSQL**: TLS encrypted connections
|
||||||
|
- **MinIO**: TLS with client certificates
|
||||||
|
- **Zitadel**: mTLS for user authentication
|
||||||
|
|
||||||
|
Certificates auto-generated in: `botserver-stack/conf/system/certificates/`
|
||||||
|
|
||||||
|
### 📊 Component Status
|
||||||
|
|
||||||
|
Check component status anytime:
|
||||||
|
```bash
|
||||||
|
# Check if all components are running
|
||||||
|
ps aux | grep -E "vault|postgres|minio|zitadel|qdrant|valkey" | grep -v grep
|
||||||
|
|
||||||
|
# View component logs
|
||||||
|
tail -f botserver-stack/logs/vault/vault.log
|
||||||
|
tail -f botserver-stack/logs/tables/postgres.log
|
||||||
|
tail -f botserver-stack/logs/drive/minio.log
|
||||||
|
|
||||||
|
# Test component connectivity
|
||||||
|
cd botserver-stack/bin/vault && ./vault status
|
||||||
|
cd botserver-stack/bin/cache && ./valkey-cli ping
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## CRITICAL: PROMPT.md Files
|
## 🏗️ Component Dependency Graph
|
||||||
|
|
||||||
**Each project has a PROMPT.md that defines its development rules.**
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Client Layer │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ botui (Web UI) │ botapp (Desktop) │ botplugin (Ext) │
|
||||||
|
│ HTMX + Axum │ Tauri 2 Wrapper │ Browser Extension │
|
||||||
|
└─────────┬───────────────────┬──────────────────┬─────────────────┘
|
||||||
|
│ │ │
|
||||||
|
└───────────────────┼──────────────────┘
|
||||||
|
│
|
||||||
|
┌─────────▼─────────┐
|
||||||
|
│ botlib │
|
||||||
|
│ (Shared Types) │
|
||||||
|
└─────────┬─────────┘
|
||||||
|
│
|
||||||
|
┌───────────────────┼───────────────────┐
|
||||||
|
│ │ │
|
||||||
|
┌─────▼─────┐ ┌─────▼─────┐ ┌─────▼─────┐
|
||||||
|
│ botserver │ │ bottest │ │ botdevice │
|
||||||
|
│ API Core │ │ Tests │ │ IoT/Device │
|
||||||
|
└───────────┘ └───────────┘ └───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
The diagnostics tool reads and respects these PROMPT.md files.
|
### Dependency Rules
|
||||||
|
|
||||||
### Hierarchy
|
| Crate | Depends On | Why |
|
||||||
|
|-------|-----------|-----|
|
||||||
|
| **botserver** | botlib | Shared types, error handling, models |
|
||||||
|
| **botui** | botlib | Common data structures, API client |
|
||||||
|
| **botapp** | botlib | Shared types, desktop-specific utilities |
|
||||||
|
| **bottest** | botserver, botlib | Integration testing with real components |
|
||||||
|
| **botdevice** | botlib | Device types, communication protocols |
|
||||||
|
| **botplugin** | - | Standalone browser extension (JS) |
|
||||||
|
|
||||||
1. **`PROMPT.md`** (this directory) - Workspace-wide rules
|
**Key Principle:** `botlib` contains ONLY shared types and utilities. No business logic. All business logic lives in botserver or specialized crates.
|
||||||
2. **`botapp/PROMPT.md`** - Desktop app specifics
|
|
||||||
3. **`botserver/PROMPT.md`** - Server specifics
|
|
||||||
4. **`botlib/PROMPT.md`** - Library specifics
|
|
||||||
5. **`botui/PROMPT.md`** - UI specifics
|
|
||||||
6. **`botbook/PROMPT.md`** - Documentation specifics
|
|
||||||
7. **`bottest/PROMPT.md`** - Test specifics
|
|
||||||
|
|
||||||
**ALWAYS read the relevant PROMPT.md before working on a project.**
|
## 📦 Module Responsibility Matrix
|
||||||
|
|
||||||
---
|
### botserver/src/ Module Structure
|
||||||
|
|
||||||
## Main Directive
|
| Module | Responsibility | Key Types | Dependencies |
|
||||||
|
|--------|---------------|-----------|--------------|
|
||||||
|
| **core/bot/** | WebSocket handling, bot orchestration | BotOrchestrator, UserMessage | basic, shared |
|
||||||
|
| **core/session/** | Session management, conversation history | SessionManager, UserSession | shared, database |
|
||||||
|
| **basic/** | Rhai BASIC scripting engine | ScriptService, Engine | rhai, keywords |
|
||||||
|
| **basic/keywords/** | BASIC keyword implementations (TALK, HEAR, etc.) | Keyword functions | basic, state |
|
||||||
|
| **llm/** | Multi-vendor LLM API integration | LLMClient, ModelConfig | reqwest, shared |
|
||||||
|
| **drive/** | S3 file storage and monitoring | DriveMonitor, compile_tool | s3, basic |
|
||||||
|
| **security/** | Security guards (command, SQL, error) | SafeCommand, ErrorSanitizer | state |
|
||||||
|
| **shared/** | Database models, schema definitions | Bot, Session, Message | diesel |
|
||||||
|
| **tasks/** | AutoTask execution system | TaskRunner, TaskScheduler | core/basic |
|
||||||
|
| **auto_task/** | LLM-powered app generation | AppGenerator, template engine | llm, tasks |
|
||||||
|
| **learn/** | Knowledge base management | KBManager, vector storage | database, drive |
|
||||||
|
| **attendance/** | LLM-assisted customer service | AttendantManager, queue | core/bot |
|
||||||
|
|
||||||
**LOOP AND COMPACT UNTIL 0 WARNINGS - MAXIMUM YOLO**
|
### Data Flow Patterns
|
||||||
|
|
||||||
- 0 warnings
|
```
|
||||||
- 0 errors
|
1. User Request Flow:
|
||||||
- Trust project diagnostics
|
Client → WebSocket → botserver/src/core/bot/mod.rs
|
||||||
- Respect all rules
|
↓
|
||||||
- No `#[allow()]` in source code
|
BotOrchestrator::stream_response()
|
||||||
- Real code fixes only
|
↓
|
||||||
|
┌───────────┴───────────┐
|
||||||
|
│ │
|
||||||
|
LLM API Call Script Execution
|
||||||
|
(llm/mod.rs) (basic/mod.rs)
|
||||||
|
│ │
|
||||||
|
└───────────┬───────────┘
|
||||||
|
↓
|
||||||
|
Response → WebSocket → Client
|
||||||
|
|
||||||
|
2. File Sync Flow:
|
||||||
|
S3 Drive → drive_monitor/src/drive_monitor/mod.rs
|
||||||
|
↓
|
||||||
|
Download .bas files
|
||||||
|
↓
|
||||||
|
compile_file() → Generate .ast
|
||||||
|
↓
|
||||||
|
Store in ./work/{bot_name}.gbai/
|
||||||
|
|
||||||
|
3. Script Execution Flow:
|
||||||
|
.bas file → ScriptService::compile()
|
||||||
|
↓
|
||||||
|
preprocess_basic_script()
|
||||||
|
↓
|
||||||
|
engine.compile() → AST
|
||||||
|
↓
|
||||||
|
ScriptService::run() → Execute
|
||||||
|
↓
|
||||||
|
TALK commands → WebSocket messages
|
||||||
|
```
|
||||||
|
|
||||||
|
### Common Architectural Patterns
|
||||||
|
|
||||||
|
| Pattern | Where Used | Purpose |
|
||||||
|
|---------|-----------|---------|
|
||||||
|
| **State via Arc<AppState>** | All handlers | Shared async state (DB, cache, config) |
|
||||||
|
| **Extension(state) extractor** | Axum handlers | Inject Arc<AppState> into route handlers |
|
||||||
|
| **tokio::spawn_blocking** | CPU-intensive tasks | Offload blocking work from async runtime |
|
||||||
|
| **WebSocket with split()** | Real-time comms | Separate sender/receiver for WS streams |
|
||||||
|
| **ErrorSanitizer for responses** | All HTTP errors | Prevent leaking sensitive info in errors |
|
||||||
|
| **SafeCommand for execution** | Command running | Whitelist-based command validation |
|
||||||
|
| **Rhai for scripting** | BASIC interpreter | Embeddable scripting language |
|
||||||
|
| **Diesel ORM** | Database access | Type-safe SQL queries |
|
||||||
|
| **Redis for cache** | Session data | Fast key-value storage |
|
||||||
|
| **S3 for storage** | File system | Scalable object storage |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
|
### 🚀 Simple Startup (ALWAYS USE restart.sh)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
./restart.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**⚠️ CRITICAL: ALWAYS use restart.sh - NEVER start servers individually!**
|
||||||
|
|
||||||
|
The script handles BOTH servers properly:
|
||||||
|
1. Stop existing processes cleanly
|
||||||
|
2. Build botserver and botui sequentially (no race conditions)
|
||||||
|
3. Start botserver in background → **automatically starts all infrastructure services (PostgreSQL, Vault, MinIO, Valkey, Qdrant)**
|
||||||
|
4. BotServer retrieves credentials from Vault and authenticates with all services
|
||||||
|
5. Start botui in background → proxy to botserver
|
||||||
|
6. Show process IDs and monitoring commands
|
||||||
|
|
||||||
|
**Infrastructure services are fully automated - no manual configuration required!**
|
||||||
|
|
||||||
|
**Monitor startup:**
|
||||||
|
```bash
|
||||||
|
tail -f botserver.log botui.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Access:**
|
||||||
|
- Web UI: http://localhost:3000
|
||||||
|
- API: http://localhost:9000
|
||||||
|
|
||||||
|
### 📊 Monitor & Debug
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -f botserver.log botui.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Quick status check:**
|
||||||
|
```bash
|
||||||
|
ps aux | grep -E "botserver|botui" | grep -v grep
|
||||||
|
```
|
||||||
|
|
||||||
|
**Quick error scan:**
|
||||||
|
```bash
|
||||||
|
grep -E " E |W |CLIENT:" botserver.log | tail -20
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔧 Manual Startup (If needed)
|
||||||
|
|
||||||
|
**⚠️ WARNING: Only use if restart.sh fails. Always prefer restart.sh!**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd botserver && cargo run -- --noconsole > ../botserver.log 2>&1 &
|
||||||
|
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run > ../botui.log 2>&1 &
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🛑 Stop Servers
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f botserver; pkill -f botui
|
||||||
|
```
|
||||||
|
|
||||||
|
### ⚠️ Common Issues
|
||||||
|
|
||||||
|
**Vault init error?** Delete stale state:
|
||||||
|
```bash
|
||||||
|
rm -rf botserver-stack/data/vault botserver-stack/conf/vault/init.json && ./restart.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Port in use?** Find and kill:
|
||||||
|
```bash
|
||||||
|
lsof -ti:9000 | xargs kill -9
|
||||||
|
lsof -ti:3000 | xargs kill -9
|
||||||
|
```
|
||||||
|
|
||||||
|
**⚠️ IMPORTANT: Stack Services Management**
|
||||||
|
All infrastructure services (PostgreSQL, Vault, Redis, Qdrant, MinIO, etc.) are **automatically started by botserver** and managed through `botserver-stack/` directory, NOT global system installations. The system uses:
|
||||||
|
|
||||||
|
- **Local binaries:** `botserver-stack/bin/` (PostgreSQL, Vault, Redis, etc.)
|
||||||
|
- **Configurations:** `botserver-stack/conf/`
|
||||||
|
- **Data storage:** `botserver-stack/data/`
|
||||||
|
- **Service logs:** `botserver-stack/logs/` (check here for troubleshooting)
|
||||||
|
- **Credentials:** Stored in Vault, retrieved by botserver at startup
|
||||||
|
|
||||||
|
**Do NOT install or reference global PostgreSQL, Redis, or other services.** When botserver starts, it automatically:
|
||||||
|
1. Launches all required stack services
|
||||||
|
2. Connects to Vault
|
||||||
|
3. Retrieves credentials from the `bot_configuration` database table
|
||||||
|
4. Authenticates with each service using retrieved credentials
|
||||||
|
5. Begins handling requests with authenticated connections
|
||||||
|
|
||||||
|
If you encounter service errors, check the individual service logs in `./botserver-stack/logs/[service]/` directories.
|
||||||
|
|
||||||
|
### UI File Deployment - Production Options
|
||||||
|
|
||||||
|
**Option 1: Embedded UI (Recommended for Production)**
|
||||||
|
|
||||||
|
The `embed-ui` feature compiles UI files directly into the botui binary, eliminating the need for separate file deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build with embedded UI files
|
||||||
|
cargo build --release -p botui --features embed-ui
|
||||||
|
|
||||||
|
# The binary now contains all UI files - no additional deployment needed!
|
||||||
|
# The botui binary is self-contained and production-ready
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefits of embed-ui:**
|
||||||
|
- ✅ Single binary deployment (no separate UI files)
|
||||||
|
- ✅ Faster startup (no filesystem access)
|
||||||
|
- ✅ Smaller attack surface
|
||||||
|
- ✅ Simpler deployment process
|
||||||
|
|
||||||
|
**Option 2: Filesystem Deployment (Development Only)**
|
||||||
|
|
||||||
|
For development, UI files are served from the filesystem:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# UI files must exist at botui/ui/suite/
|
||||||
|
# This is automatically available in development builds
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 3: Manual File Deployment (Legacy)**
|
||||||
|
|
||||||
|
If you need to deploy UI files separately (not recommended):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deploy UI files to production location
|
||||||
|
./botserver/deploy/deploy-ui.sh /opt/gbo
|
||||||
|
|
||||||
|
# Verify deployment
|
||||||
|
ls -la /opt/gbo/bin/ui/suite/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
See `botserver/deploy/README.md` for deployment scripts.
|
||||||
|
|
||||||
|
### Start Both Servers (Automated)
|
||||||
|
```bash
|
||||||
|
# Use restart script (RECOMMENDED)
|
||||||
|
./restart.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start Both Servers (Manual)
|
||||||
|
```bash
|
||||||
|
# Terminal 1: botserver
|
||||||
|
cd botserver && cargo run -- --noconsole
|
||||||
|
|
||||||
|
# Terminal 2: botui
|
||||||
|
cd botui && BOTSERVER_URL="http://localhost:9000" cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build Commands
|
||||||
|
```bash
|
||||||
|
# Check single crate
|
||||||
|
cargo check -p botserver
|
||||||
|
|
||||||
|
# Build workspace
|
||||||
cargo build
|
cargo build
|
||||||
cargo test
|
|
||||||
|
# Run tests
|
||||||
|
cargo test -p bottest
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Development Workflow
|
## 🤖 AI Agent Guidelines
|
||||||
|
|
||||||
1. Read `PROMPT.md` (workspace-level rules)
|
> **For LLM instructions, coding rules, security directives, testing workflows, and error handling patterns, see [AGENTS.md](./AGENTS.md).**
|
||||||
2. Read `<project>/PROMPT.md` (project-specific rules)
|
|
||||||
3. Use diagnostics tool to check warnings
|
---
|
||||||
4. Fix all warnings with full file rewrites
|
|
||||||
5. Verify with diagnostics after each file
|
## 📖 Glossary
|
||||||
6. Never suppress warnings with `#[allow()]`
|
|
||||||
|
| Term | Definition | Usage |
|
||||||
|
|------|-----------|-------|
|
||||||
|
| **Bot** | AI agent with configuration, scripts, and knowledge bases | Primary entity in system |
|
||||||
|
| **Session** | Single conversation instance between user and bot | Stored in `sessions` table |
|
||||||
|
| **Dialog** | Collection of BASIC scripts (.bas files) for bot logic | Stored in `{bot_name}.gbdialog/` |
|
||||||
|
| **Tool** | Reusable function callable by LLM | Defined in .bas files, compiled to .ast |
|
||||||
|
| **Knowledge Base (KB)** | Vector database of documents for semantic search | Managed in `learn/` module |
|
||||||
|
| **Scheduler** | Time-triggered task execution | Cron-like scheduling in BASIC scripts |
|
||||||
|
| **Drive** | S3-compatible storage for files | Abstracted in `drive/` module |
|
||||||
|
| **Rhai** | Embedded scripting language for BASIC dialect | Rhai engine in `basic/` module |
|
||||||
|
| **WebSocket Adapter** | Component that sends messages to connected clients | `web_adapter` in state |
|
||||||
|
| **AutoTask** | LLM-generated task automation system | In `auto_task/` and `tasks/` modules |
|
||||||
|
| **Orchestrator** | Coordinates LLM, tools, KBs, and user input | `BotOrchestrator` in `core/bot/` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 🖥️ UI Architecture (botui + botserver)
|
||||||
|
|
||||||
|
### Two Servers During Development
|
||||||
|
|
||||||
|
| Server | Port | Purpose |
|
||||||
|
|--------|------|---------|
|
||||||
|
| **botui** | 3000 | Serves UI files + proxies API to botserver |
|
||||||
|
| **botserver** | 9000 | Backend API + embedded UI fallback |
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
```
|
||||||
|
Browser → localhost:3000 → botui (serves HTML/CSS/JS)
|
||||||
|
→ /api/* proxied to botserver:9000
|
||||||
|
→ /suite/* served from botui/ui/suite/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adding New Suite Apps
|
||||||
|
|
||||||
|
1. Create folder: `botui/ui/suite/<appname>/`
|
||||||
|
2. Add to `SUITE_DIRS` in `botui/src/ui_server/mod.rs`
|
||||||
|
3. Rebuild botui: `cargo build -p botui`
|
||||||
|
4. Add menu entry in `botui/ui/suite/index.html`
|
||||||
|
|
||||||
|
### Hot Reload
|
||||||
|
|
||||||
|
- **UI files (HTML/CSS/JS)**: Edit & refresh browser (no restart)
|
||||||
|
- **botui Rust code**: Rebuild + restart botui
|
||||||
|
- **botserver Rust code**: Rebuild + restart botserver
|
||||||
|
|
||||||
|
### Production (Single Binary)
|
||||||
|
|
||||||
|
When `botui/ui/suite/` folder not found, botserver uses **embedded UI** compiled into binary via `rust-embed`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎨 Frontend Standards
|
||||||
|
|
||||||
|
### HTMX-First Approach
|
||||||
|
- Use HTMX to minimize JavaScript
|
||||||
|
- Server returns HTML fragments, not JSON
|
||||||
|
- Use `hx-get`, `hx-post`, `hx-target`, `hx-swap`
|
||||||
|
- WebSocket via htmx-ws extension
|
||||||
|
|
||||||
|
### Local Assets Only - NO CDN
|
||||||
|
```html
|
||||||
|
<!-- ✅ CORRECT -->
|
||||||
|
<script src="js/vendor/htmx.min.js"></script>
|
||||||
|
|
||||||
|
<!-- ❌ WRONG -->
|
||||||
|
<script src="https://unpkg.com/htmx.org@1.9.10"></script>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Vendor Libraries Location
|
||||||
|
```
|
||||||
|
ui/suite/js/vendor/
|
||||||
|
├── htmx.min.js
|
||||||
|
├── htmx-ws.js
|
||||||
|
├── marked.min.js
|
||||||
|
└── gsap.min.js
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📋 Project-Specific Guidelines
|
||||||
|
|
||||||
|
Each crate has its own README.md with specific guidelines:
|
||||||
|
|
||||||
|
| Crate | README.md Location | Focus |
|
||||||
|
|-------|-------------------|-------|
|
||||||
|
| botserver | `botserver/README.md` | API, security, Rhai BASIC |
|
||||||
|
| botui | `botui/README.md` | UI, HTMX, CSS design system |
|
||||||
|
| botapp | `botapp/README.md` | Tauri, desktop features |
|
||||||
|
| botlib | `botlib/README.md` | Shared types, errors |
|
||||||
|
| botbook | `botbook/README.md` | Documentation, mdBook |
|
||||||
|
| bottest | `bottest/README.md` | Test infrastructure |
|
||||||
|
|
||||||
|
### Special Prompts
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `botserver/src/tasks/README.md` | AutoTask LLM executor |
|
||||||
|
| `botserver/src/auto_task/APP_GENERATOR_PROMPT.md` | App generation |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
For complete documentation, guides, and API references:
|
||||||
|
|
||||||
|
- **[docs.pragmatismo.com.br](https://docs.pragmatismo.com.br)** - Full online documentation
|
||||||
|
- **[BotBook](./botbook)** - Local comprehensive guide with tutorials and examples
|
||||||
|
- **[General Bots Repository](https://github.com/GeneralBots/BotServer)** - Main project repository
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔧 Immediate Technical Debt
|
||||||
|
|
||||||
|
### Critical Issues to Address
|
||||||
|
|
||||||
|
1. **Error Handling Debt**: 955 instances of `unwrap()`/`expect()` in production code
|
||||||
|
2. **Performance Debt**: 12,973 excessive `clone()`/`to_string()` calls
|
||||||
|
3. **File Size Debt**: 7 files exceed 450 lines (largest: 3220 lines)
|
||||||
|
4. **Test Coverage**: Missing integration tests for critical paths
|
||||||
|
5. **Documentation**: Missing inline documentation for complex algorithms
|
||||||
|
|
||||||
|
### Weekly Maintenance Tasks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check for duplicate dependencies
|
||||||
|
cargo tree --duplicates
|
||||||
|
|
||||||
|
# Remove unused dependencies
|
||||||
|
cargo machete
|
||||||
|
|
||||||
|
# Check binary size
|
||||||
|
cargo build --release && ls -lh target/release/botserver
|
||||||
|
|
||||||
|
# Performance profiling
|
||||||
|
cargo bench
|
||||||
|
|
||||||
|
# Security audit
|
||||||
|
cargo audit
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -87,42 +646,56 @@ cargo test
|
||||||
|
|
||||||
**Note:** Each subproject has its own git repository. This root repository only tracks workspace-level files:
|
**Note:** Each subproject has its own git repository. This root repository only tracks workspace-level files:
|
||||||
|
|
||||||
- `PROMPT.md` - Development guide
|
|
||||||
- `Cargo.toml` - Workspace configuration
|
- `Cargo.toml` - Workspace configuration
|
||||||
- `README.md` - This file
|
- `README.md` - This file
|
||||||
- `.gitignore` - Ignore patterns
|
- `.gitignore` - Ignore patterns
|
||||||
|
- `ADDITIONAL-SUGGESTIONS.md` - Enhancement ideas
|
||||||
|
- `TODO-*.md` - Task tracking files
|
||||||
|
|
||||||
Subprojects (botapp, botserver, etc.) are **not** git submodules - they are independent repositories.
|
Subprojects (botapp, botserver, botui, etc.) are **independent repositories referenced as git submodules**.
|
||||||
|
|
||||||
|
### ⚠️ CRITICAL: Submodule Push Workflow
|
||||||
|
|
||||||
|
When making changes to any submodule (botserver, botui, botlib, etc.):
|
||||||
|
|
||||||
|
1. **Commit and push changes within the submodule directory:**
|
||||||
|
```bash
|
||||||
|
cd botserver
|
||||||
|
git add .
|
||||||
|
git commit -m "Your changes"
|
||||||
|
git push pragmatismo main
|
||||||
|
git push github main
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Update the global gb repository submodule reference:**
|
||||||
|
```bash
|
||||||
|
cd .. # Back to gb root
|
||||||
|
git add botserver
|
||||||
|
git commit -m "Update botserver submodule to latest commit"
|
||||||
|
git push pragmatismo main
|
||||||
|
git push github main
|
||||||
|
```
|
||||||
|
|
||||||
|
**Failure to push the global gb repository will cause submodule changes to not trigger CI/CD pipelines.**
|
||||||
|
|
||||||
|
Both repositories must be pushed for changes to take effect in production.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Rules Summary
|
## Development Workflow
|
||||||
|
|
||||||
```
|
1. Read this README.md (workspace structure)
|
||||||
✅ FULL FILE REWRITES ONLY
|
2. Read **[AGENTS.md](./AGENTS.md)** (coding rules & workflows)
|
||||||
✅ BATCH ALL FIXES BEFORE WRITING
|
3. **BEFORE creating any .md file, search botbook/ for existing documentation**
|
||||||
✅ VERIFY WITH DIAGNOSTICS AFTER EACH FILE
|
4. Read `<project>/README.md` (project-specific rules)
|
||||||
✅ TRUST PROJECT DIAGNOSTICS
|
5. Use diagnostics tool to check warnings
|
||||||
✅ RESPECT ALL RULES
|
6. Fix all warnings with full file rewrites
|
||||||
|
7. Verify with diagnostics after each file
|
||||||
❌ NEVER use #[allow()] in source code
|
8. Never suppress warnings with `#[allow()]`
|
||||||
❌ NEVER use partial edits
|
|
||||||
❌ NEVER run cargo check/clippy manually
|
|
||||||
❌ NEVER leave unused code
|
|
||||||
❌ NEVER use .unwrap()/.expect()
|
|
||||||
❌ NEVER use panic!/todo!/unimplemented!()
|
|
||||||
❌ NEVER add comments
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Links
|
|
||||||
|
|
||||||
- Main Server: http://localhost:8081
|
|
||||||
- Desktop App: Uses Tauri to wrap botui
|
|
||||||
- Documentation: See botbook/
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|
|
||||||
2
botapp
2
botapp
|
|
@ -1 +1 @@
|
||||||
Subproject commit 1a1e17fa1012e4db10a0f716c9b63a03b4863c9f
|
Subproject commit 0b556948f970832e8606f886853793e2bc8dc35c
|
||||||
2
botbook
2
botbook
|
|
@ -1 +1 @@
|
||||||
Subproject commit 827e011ac05084396aaf2c3098409bf5e02b5cf9
|
Subproject commit 82a236f369e58fe0eda4df704b9ee74a725874e8
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit 97778e06dd804be55ff761c7fe2788af0ef50626
|
Subproject commit 35411f4f9e64e54b1039360ab654d537cd2958c9
|
||||||
2
botlib
2
botlib
|
|
@ -1 +1 @@
|
||||||
Subproject commit bfaa68dc35e96ced2915d43ffe6fca8267a9a598
|
Subproject commit e7caed45a44ab319c64d90f84281dbdbcba905b7
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit 462a6dfa51b12f22e87712e613a559f66f9013cb
|
Subproject commit e088a8e69eb8fe064bf1510a720d42abe159ab00
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit 17a3caebabddbe843c2b7fd93f624b0ccd9c44fb
|
Subproject commit 1727e48307fdb7b54c726af8cd6b12669764e908
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit 84458b2a6905af7db72b15f5e833bb7942ccdaa9
|
Subproject commit 666acb9360328f1d7353481b79f5809db91e5c76
|
||||||
|
|
@ -1 +1 @@
|
||||||
Subproject commit dd3d8c74dd58a1cc6d6b18d22108819519aaf9c3
|
Subproject commit 3110dd587290047f283300d674ad325f4f9b3046
|
||||||
2
bottest
2
bottest
|
|
@ -1 +1 @@
|
||||||
Subproject commit 706391b272e0fb7c5b2646cc4cc72180195e07f4
|
Subproject commit 346120cb0b916f72abd2fdad577ae1c606aba1a2
|
||||||
2
botui
2
botui
|
|
@ -1 +1 @@
|
||||||
Subproject commit 661edc09fa1063673e84b63d2dcb5cfbe0f91232
|
Subproject commit aeb30b1a33980630ccdad5804b8af76c1ec9073f
|
||||||
1113
prompts/automate-incus.md
Normal file
1113
prompts/automate-incus.md
Normal file
File diff suppressed because it is too large
Load diff
146
prompts/crmex.md
Normal file
146
prompts/crmex.md
Normal file
|
|
@ -0,0 +1,146 @@
|
||||||
|
# Email Campaigns — Feature Plan
|
||||||
|
|
||||||
|
## Existing Foundation (botserver/src/marketing/)
|
||||||
|
- `campaigns.rs` — CrmCampaign model, CRUD handlers
|
||||||
|
- `metrics.rs` — CampaignMetrics, ChannelBreakdown, open/click/conversion rates
|
||||||
|
- `lists.rs` — recipient lists
|
||||||
|
- `templates.rs` — content templates
|
||||||
|
- `triggers.rs` — event-based sending
|
||||||
|
- `email/tracking.rs` — open/click tracking pixels
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features to Build
|
||||||
|
|
||||||
|
### 1. Insights Dashboard
|
||||||
|
**What:** Time series views of delivery + engagement metrics per campaign.
|
||||||
|
|
||||||
|
**Data points per time bucket (hourly/daily):**
|
||||||
|
- Sent, delivered, bounced, failed
|
||||||
|
- Opens (unique + total), clicks, replies, unsubscribes
|
||||||
|
- Delivery rate, open rate, click-to-open rate (CTOR)
|
||||||
|
|
||||||
|
**Filters/pivots:**
|
||||||
|
- By mailbox provider (Gmail, Outlook, Yahoo, etc. — parsed from MX/SMTP response)
|
||||||
|
- By sender identity (from address / domain)
|
||||||
|
- By campaign or list
|
||||||
|
- Message search → show exact SMTP response from provider
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- Add `email_delivery_events` table: `(id, campaign_id, recipient_id, event_type, provider, smtp_response, ts)`
|
||||||
|
- API: `GET /api/campaigns/:id/insights?from=&to=&group_by=provider|identity|day`
|
||||||
|
- UI: HTMX + chart.js time series (local vendor)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Advisor Recommendations
|
||||||
|
**What:** Analyze sending config + results and surface actionable fixes.
|
||||||
|
|
||||||
|
**Checks to run:**
|
||||||
|
| Check | Signal | Recommendation |
|
||||||
|
|---|---|---|
|
||||||
|
| SPF/DKIM/DMARC | DNS lookup | "Add missing record" |
|
||||||
|
| Bounce rate > 5% | delivery_events | "Clean list — remove hard bounces" |
|
||||||
|
| Open rate < 15% | metrics | "Improve subject line / send time" |
|
||||||
|
| Spam complaints > 0.1% | FBL data | "Remove complainers immediately" |
|
||||||
|
| Sending from new IP | warmup_schedule | "Follow warmup plan" |
|
||||||
|
| List age > 6 months | list.last_sent | "Re-engagement campaign before bulk send" |
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- `marketing/advisor.rs` — `AdvisorEngine::analyze(campaign_id) -> Vec<Recommendation>`
|
||||||
|
- API: `GET /api/campaigns/:id/advisor`
|
||||||
|
- Runs automatically after each campaign completes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. IP Warmup (like OneSignal / Mailchimp)
|
||||||
|
**What:** Gradually increase daily send volume over 4–6 weeks to build sender reputation.
|
||||||
|
|
||||||
|
**Warmup schedule (standard):**
|
||||||
|
| Day | Max emails/day |
|
||||||
|
|---|---|
|
||||||
|
| 1–2 | 50 |
|
||||||
|
| 3–4 | 100 |
|
||||||
|
| 5–7 | 500 |
|
||||||
|
| 8–10 | 1,000 |
|
||||||
|
| 11–14 | 5,000 |
|
||||||
|
| 15–21 | 10,000 |
|
||||||
|
| 22–28 | 50,000 |
|
||||||
|
| 29+ | unlimited |
|
||||||
|
|
||||||
|
**Rules:**
|
||||||
|
- Only send to most engaged subscribers first (opened in last 90 days)
|
||||||
|
- Stop warmup if bounce rate > 3% or complaint rate > 0.1%
|
||||||
|
- Resume next day at same volume if paused
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- `marketing/warmup.rs` — `WarmupSchedule`, `WarmupEngine::get_daily_limit(ip, day) -> u32`
|
||||||
|
- `warmup_schedules` table: `(id, ip, started_at, current_day, status, paused_reason)`
|
||||||
|
- Scheduler checks warmup limit before each send batch
|
||||||
|
- API: `GET /api/warmup/status`, `POST /api/warmup/start`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Optimized Shared Delivery
|
||||||
|
**What:** Auto-select best sending IP based on real-time reputation signals.
|
||||||
|
|
||||||
|
**Logic:**
|
||||||
|
- Track per-IP: bounce rate, complaint rate, delivery rate (last 24h)
|
||||||
|
- Score each IP: `score = delivery_rate - (bounce_rate * 10) - (complaint_rate * 100)`
|
||||||
|
- Route each send to highest-scored IP for that destination provider
|
||||||
|
- Rotate IPs to spread load and preserve reputation
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- `marketing/ip_router.rs` — `IpRouter::select(destination_domain) -> IpAddr`
|
||||||
|
- `ip_reputation` table: `(ip, provider, bounces, complaints, delivered, window_start)`
|
||||||
|
- Plugs into Stalwart send path via botserver API
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Modern Email Marketing Features
|
||||||
|
|
||||||
|
| Feature | Description |
|
||||||
|
|---|---|
|
||||||
|
| **Send time optimization** | ML-based per-contact best send time (based on past open history) |
|
||||||
|
| **A/B testing** | Split subject/content, auto-pick winner after N hours |
|
||||||
|
| **Suppression list** | Global unsubscribe/bounce/complaint list, auto-applied to all sends |
|
||||||
|
| **Re-engagement flows** | Auto-trigger "we miss you" to contacts inactive > 90 days |
|
||||||
|
| **Transactional + marketing separation** | Separate IPs/domains for transactional vs bulk |
|
||||||
|
| **One-click unsubscribe** | RFC 8058 `List-Unsubscribe-Post` header on all bulk sends |
|
||||||
|
| **Preview & spam score** | Pre-send SpamAssassin score check |
|
||||||
|
| **Link tracking** | Redirect all links through tracker, record clicks per contact |
|
||||||
|
| **Webhook events** | Push delivery events to external URLs (Stalwart webhook → botserver) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## DB Tables to Add
|
||||||
|
|
||||||
|
```sql
|
||||||
|
email_delivery_events (id, campaign_id, recipient_id, event_type, provider, smtp_code, smtp_response, ts)
|
||||||
|
warmup_schedules (id, ip, started_at, current_day, daily_limit, status, paused_reason)
|
||||||
|
ip_reputation (id, ip, provider, delivered, bounced, complained, window_start)
|
||||||
|
advisor_recommendations (id, campaign_id, check_name, severity, message, created_at, dismissed)
|
||||||
|
ab_tests (id, campaign_id, variant_a, variant_b, split_pct, winner, decided_at)
|
||||||
|
suppression_list (id, org_id, email, reason, added_at)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Create
|
||||||
|
```
|
||||||
|
botserver/src/marketing/
|
||||||
|
├── warmup.rs — IP warmup engine + schedule
|
||||||
|
├── advisor.rs — recommendation engine
|
||||||
|
├── ip_router.rs — optimized IP selection
|
||||||
|
├── ab_test.rs — A/B test logic
|
||||||
|
├── suppression.rs — global suppression list
|
||||||
|
└── send_time.rs — send time optimization
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Existing Code to Extend
|
||||||
|
- `marketing/metrics.rs` → add time-series queries + provider breakdown
|
||||||
|
- `marketing/campaigns.rs` → add warmup_enabled, ab_test_id fields
|
||||||
|
- `email/tracking.rs` → already has open/click tracking, extend with provider parsing
|
||||||
|
- `core/shared/schema/` → add new tables above
|
||||||
59
prompts/folha.md
Normal file
59
prompts/folha.md
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
# detector - Detecção de Desvios na Folha
|
||||||
|
|
||||||
|
## Objetivo
|
||||||
|
- Bot detector deve usar start.bas para inserir dados via init_folha.bas
|
||||||
|
- detecta.bas deve detectar anomalias nos dados inseridos
|
||||||
|
|
||||||
|
## ✅ Status Atual
|
||||||
|
|
||||||
|
### Correção REM em mod.rs (FEITA)
|
||||||
|
**Arquivo:** `botserver/src/basic/mod.rs` linha ~588-594
|
||||||
|
|
||||||
|
Filtro adicionado para `REM ` e `REM\t` no `compile_tool_script`:
|
||||||
|
```rust
|
||||||
|
!(trimmed.starts_with("PARAM ") ||
|
||||||
|
trimmed.starts_with("PARAM\t") ||
|
||||||
|
trimmed.starts_with("DESCRIPTION ") ||
|
||||||
|
trimmed.starts_with("DESCRIPTION\t") ||
|
||||||
|
trimmed.starts_with("REM ") || // <-- ADICIONADO
|
||||||
|
trimmed.starts_with("REM\t") || // <-- ADICIONADO
|
||||||
|
trimmed.starts_with('\'') ||
|
||||||
|
trimmed.starts_with('#') ||
|
||||||
|
trimmed.is_empty())
|
||||||
|
```
|
||||||
|
|
||||||
|
### Arquivos Envolvidos (VERIFICADOS)
|
||||||
|
- `/opt/gbo/data/detector.gbai/detector.gbdialog/start.bas` ✅ OK
|
||||||
|
- Contém botões de sugestão: detecta e init_folha
|
||||||
|
- `/opt/gbo/data/detector.gbai/detector.gbdialog/init_folha.bas` ✅ OK
|
||||||
|
- 4 INSERT statements para dados de exemplo
|
||||||
|
- `/opt/gbo/data/detector.gbai/detector.gbdialog/detecta.bas` ✅ OK
|
||||||
|
- Usa DETECT keyword
|
||||||
|
- `/opt/gbo/data/detector.gbai/detector.gbdialog/tables.bas` ✅ OK
|
||||||
|
- TABLE folha_salarios definida
|
||||||
|
|
||||||
|
### Botserver (RODANDO)
|
||||||
|
- ✅ Botserver compilado com sucesso
|
||||||
|
- ✅ Botserver rodando em http://localhost:8080
|
||||||
|
- ✅ Health check OK
|
||||||
|
|
||||||
|
## Próximos Passos (Pendentes)
|
||||||
|
|
||||||
|
1. **Testar via navegador** - Necessário instalar Playwright browsers
|
||||||
|
- Navegar para http://localhost:3000/detector
|
||||||
|
- Clicar em "⚙️ Inicializar Dados de Teste"
|
||||||
|
- Verificar se INSERT funciona
|
||||||
|
- Clicar em "🔍 Detectar Desvios na Folha"
|
||||||
|
- Verificar se DETECT funciona
|
||||||
|
|
||||||
|
2. **Verificar se há warnings relevantes**
|
||||||
|
- Alguns warnings de código podem precisar ser corrigidos
|
||||||
|
|
||||||
|
## Cache
|
||||||
|
- AST limpo: `rm ./botserver-stack/data/system/work/detector.gbai/detector.gbdialog/*.ast`
|
||||||
|
- Reiniciado: `./restart.sh`
|
||||||
|
- Botserver: ✅ Rodando
|
||||||
|
|
||||||
|
## Arquivos de Trabalho
|
||||||
|
- Work directory: `./botserver-stack/data/system/work/detector.gbai/detector.gbdialog/`
|
||||||
|
- Todos os arquivos BASIC estão presentes e parecem válidos
|
||||||
272
prompts/integratedsuite.md
Normal file
272
prompts/integratedsuite.md
Normal file
|
|
@ -0,0 +1,272 @@
|
||||||
|
# Integrated Suite — Conversational Interface Plan
|
||||||
|
|
||||||
|
> **Pattern:** Every suite app exposes its own `PROMPT.md` + internal tools.
|
||||||
|
> The shared chat bar activates app-specific context when the user is inside that app.
|
||||||
|
> WhatsApp campaigns is the first full example.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
User (WhatsApp / Suite chat bar)
|
||||||
|
↓
|
||||||
|
BotOrchestrator (core/bot/mod.rs)
|
||||||
|
↓
|
||||||
|
detect active app context
|
||||||
|
↓
|
||||||
|
load app PROMPT.md + app InternalTools
|
||||||
|
↓
|
||||||
|
LLM with tools → tool_executor.rs
|
||||||
|
↓
|
||||||
|
app data / actions
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key existing pieces
|
||||||
|
| File | Role |
|
||||||
|
|---|---|
|
||||||
|
| `core/bot/mod.rs` | `get_session_tools()` + `ToolExecutor::execute_tool_call()` |
|
||||||
|
| `tasks/PROMPT.md` | Pattern for app-level LLM prompt |
|
||||||
|
| `marketing/whatsapp.rs` | WhatsApp campaign send/metrics |
|
||||||
|
| `marketing/campaigns.rs` | Campaign CRUD |
|
||||||
|
| `marketing/lists.rs` | Recipient lists |
|
||||||
|
| `botui/ui/suite/campaigns/` | Campaigns UI |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Standard: Every Suite App
|
||||||
|
|
||||||
|
### 1. `PROMPT.md` per app folder
|
||||||
|
Location: `botserver/src/<app>/PROMPT.md`
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# <App> — Internal Tools Guide
|
||||||
|
|
||||||
|
You are the <App> assistant. When the user is in <App>, you have access to:
|
||||||
|
- tool: list_<entities>
|
||||||
|
- tool: create_<entity>
|
||||||
|
- tool: search_<entity>
|
||||||
|
- tool: <app_specific_action>
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- Always confirm destructive actions before executing
|
||||||
|
- Show results as structured summaries, not raw JSON
|
||||||
|
- If user uploads a file, parse it and confirm before acting
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. `tools.rs` per app
|
||||||
|
Location: `botserver/src/<app>/tools.rs`
|
||||||
|
|
||||||
|
Registers `Vec<Tool>` (LLM function-calling schema) + handler mapping.
|
||||||
|
Loaded by `get_session_tools()` when session's active app = this app.
|
||||||
|
|
||||||
|
### 3. App context detection
|
||||||
|
`core/bot/mod.rs` reads `session.active_app` (set by UI via `POST /api/session/context`).
|
||||||
|
Loads `<app>/PROMPT.md` as system prompt prefix + `<app>/tools.rs` tools.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## WhatsApp Campaigns — Full Conversational Flow
|
||||||
|
|
||||||
|
### Meta Rules (enforced in tools)
|
||||||
|
- Only approved Message Templates for marketing (non-session-initiated)
|
||||||
|
- 24h session window for free-form after user replies
|
||||||
|
- Media: image/video/document via Media Upload API before send
|
||||||
|
- Opt-out: always honor STOP, add to suppression list immediately
|
||||||
|
- Rate: respect per-phone-number rate limits (1000 msg/s business tier)
|
||||||
|
- Template category: MARKETING requires explicit opt-in from recipient
|
||||||
|
|
||||||
|
### Conversation Flow (WhatsApp → campaign creation)
|
||||||
|
|
||||||
|
```
|
||||||
|
User sends to bot number:
|
||||||
|
"I want to send a campaign"
|
||||||
|
↓
|
||||||
|
Bot: "Great! Send me:
|
||||||
|
1. Your contact list (.xlsx or .csv)
|
||||||
|
2. The message text
|
||||||
|
3. An image (optional)
|
||||||
|
4. When to send (or 'now')"
|
||||||
|
↓
|
||||||
|
User uploads contacts.xlsx
|
||||||
|
↓
|
||||||
|
[tool: parse_contact_file]
|
||||||
|
→ extract phone numbers, names
|
||||||
|
→ validate E.164 format
|
||||||
|
→ show preview: "Found 342 contacts. First 3: +55..."
|
||||||
|
↓
|
||||||
|
User sends message text
|
||||||
|
↓
|
||||||
|
[tool: check_template_compliance]
|
||||||
|
→ check if free-form or needs approved template
|
||||||
|
→ if template needed: list available approved templates
|
||||||
|
→ suggest closest match
|
||||||
|
↓
|
||||||
|
User sends image (optional)
|
||||||
|
↓
|
||||||
|
[tool: upload_media]
|
||||||
|
→ upload to Meta Media API
|
||||||
|
→ return media_id
|
||||||
|
↓
|
||||||
|
Bot: "Ready to send to 342 contacts at 14:00 today.
|
||||||
|
Preview: [image] Hello {name}, ...
|
||||||
|
Estimated cost: $X
|
||||||
|
Confirm? (yes/no)"
|
||||||
|
↓
|
||||||
|
User: "yes"
|
||||||
|
↓
|
||||||
|
[tool: create_and_schedule_campaign]
|
||||||
|
→ create campaign record
|
||||||
|
→ apply warmup limit if IP warming
|
||||||
|
→ schedule via TaskScheduler
|
||||||
|
```
|
||||||
|
|
||||||
|
### WhatsApp Campaign Tools (`marketing/whatsapp_tools.rs`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Tool definitions for LLM function calling
|
||||||
|
pub fn whatsapp_campaign_tools() -> Vec<Tool> {
|
||||||
|
vec![
|
||||||
|
Tool::new("parse_contact_file", "Parse uploaded xlsx/csv into contact list"),
|
||||||
|
Tool::new("list_templates", "List approved WhatsApp message templates"),
|
||||||
|
Tool::new("check_template_compliance", "Check if message needs approved template"),
|
||||||
|
Tool::new("upload_media", "Upload image/video to Meta Media API"),
|
||||||
|
Tool::new("preview_campaign", "Show campaign preview with cost estimate"),
|
||||||
|
Tool::new("create_and_schedule_campaign", "Create campaign and schedule send"),
|
||||||
|
Tool::new("get_campaign_status", "Get delivery/read metrics for a campaign"),
|
||||||
|
Tool::new("pause_campaign", "Pause an in-progress campaign"),
|
||||||
|
Tool::new("list_campaigns", "List recent campaigns with metrics"),
|
||||||
|
Tool::new("add_to_suppression", "Add number to opt-out list"),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### WhatsApp PROMPT.md (`marketing/WHATSAPP_PROMPT.md`)
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# WhatsApp Campaign Assistant
|
||||||
|
|
||||||
|
You help users create and manage WhatsApp marketing campaigns.
|
||||||
|
|
||||||
|
## Meta Platform Rules (MANDATORY)
|
||||||
|
- Marketing messages MUST use pre-approved templates outside 24h session window
|
||||||
|
- Always check opt-in status before adding to campaign
|
||||||
|
- Honor STOP/unsubscribe immediately via add_to_suppression tool
|
||||||
|
- Never send more than warmup daily limit if IP is warming up
|
||||||
|
- Image must be uploaded via upload_media before referencing in campaign
|
||||||
|
|
||||||
|
## Conversation Style
|
||||||
|
- Guide step by step: contacts → message → media → schedule → confirm
|
||||||
|
- Show cost estimate before confirming
|
||||||
|
- After send: proactively share open/read rates when available
|
||||||
|
|
||||||
|
## File Handling
|
||||||
|
- .xlsx/.csv → use parse_contact_file tool
|
||||||
|
- Images → use upload_media tool
|
||||||
|
- Always confirm parsed data before proceeding
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integrated Suite Chat Bar — Standard
|
||||||
|
|
||||||
|
### How it works
|
||||||
|
1. User opens any suite app (CRM, Campaigns, Drive, etc.)
|
||||||
|
2. Chat bar at bottom activates with app context
|
||||||
|
3. `POST /api/session/context { app: "campaigns" }` sets `session.active_app`
|
||||||
|
4. BotOrchestrator loads `campaigns/PROMPT.md` + `campaigns/tools.rs`
|
||||||
|
5. User can ask natural language questions or trigger actions
|
||||||
|
|
||||||
|
### Examples per app
|
||||||
|
|
||||||
|
| App | Example query | Tool activated |
|
||||||
|
|---|---|---|
|
||||||
|
| **Campaigns** | "How did last week's campaign perform?" | `get_campaign_metrics` |
|
||||||
|
| **CRM** | "Show deals closing this month" | `list_deals` with filter |
|
||||||
|
| **Drive** | "Find the Q1 report" | `search_files` |
|
||||||
|
| **Tasks** | "Create a task to follow up with Acme" | `create_task` |
|
||||||
|
| **People** | "Who hasn't been contacted in 30 days?" | `list_contacts` with filter |
|
||||||
|
| **Mail** | "Summarize unread emails from clients" | `list_emails` + LLM summary |
|
||||||
|
| **Sheet** | "What's the total revenue in column D?" | `query_sheet` |
|
||||||
|
| **Learn** | "What does our refund policy say?" | `search_kb` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
### Phase 1 — Infrastructure (1 sprint)
|
||||||
|
- [ ] `core/bot/mod.rs` — read `session.active_app`, load app PROMPT + tools
|
||||||
|
- [ ] `core/tool_context.rs` — app tool registry: `register_app_tools(app_name) -> Vec<Tool>`
|
||||||
|
- [ ] `POST /api/session/context` — set active app from UI
|
||||||
|
- [ ] Suite chat bar UI component (`botui/ui/suite/partials/chatbar.html`)
|
||||||
|
|
||||||
|
### Phase 2 — WhatsApp Campaigns (1 sprint)
|
||||||
|
- [ ] `marketing/whatsapp_tools.rs` — 10 tools above
|
||||||
|
- [ ] `marketing/WHATSAPP_PROMPT.md`
|
||||||
|
- [ ] `marketing/file_parser.rs` — xlsx/csv → contact list
|
||||||
|
- [ ] Meta warmup enforcement in send path
|
||||||
|
- [ ] Conversational campaign creation flow (state machine in session)
|
||||||
|
|
||||||
|
### Phase 3 — App-by-app rollout (1 app/sprint)
|
||||||
|
Priority order based on value:
|
||||||
|
1. CRM (deals, contacts, pipeline queries)
|
||||||
|
2. Campaigns (email + WhatsApp)
|
||||||
|
3. Tasks (create, assign, status)
|
||||||
|
4. Drive (search, summarize docs)
|
||||||
|
5. Mail (summarize, draft reply)
|
||||||
|
6. People (segment, find contacts)
|
||||||
|
7. Sheet (query, calculate)
|
||||||
|
8. Learn (KB search)
|
||||||
|
|
||||||
|
### Phase 4 — Cross-app intelligence
|
||||||
|
- [ ] Global search across all apps via single query
|
||||||
|
- [ ] "What happened today?" — aggregates activity across CRM + Mail + Tasks
|
||||||
|
- [ ] Proactive suggestions: "You have 3 deals closing this week and no follow-up tasks"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## File Structure to Create
|
||||||
|
|
||||||
|
```
|
||||||
|
botserver/src/
|
||||||
|
├── marketing/
|
||||||
|
│ ├── whatsapp_tools.rs ← NEW: LLM tool definitions + handlers
|
||||||
|
│ ├── WHATSAPP_PROMPT.md ← NEW: WhatsApp assistant system prompt
|
||||||
|
│ ├── file_parser.rs ← NEW: xlsx/csv → contacts
|
||||||
|
│ └── warmup.rs ← NEW: (from campaigns.md plan)
|
||||||
|
├── core/
|
||||||
|
│ ├── tool_registry.rs ← NEW: app → tools mapping
|
||||||
|
│ └── bot/
|
||||||
|
│ └── app_context.rs ← NEW: load app prompt + tools per session
|
||||||
|
├── crm/
|
||||||
|
│ ├── tools.rs ← NEW
|
||||||
|
│ └── PROMPT.md ← NEW
|
||||||
|
├── tasks/
|
||||||
|
│ └── tools.rs ← NEW (PROMPT.md exists)
|
||||||
|
└── <each app>/
|
||||||
|
├── tools.rs ← NEW per app
|
||||||
|
└── PROMPT.md ← NEW per app
|
||||||
|
|
||||||
|
botui/ui/suite/
|
||||||
|
└── partials/
|
||||||
|
└── chatbar.html ← NEW: shared chat bar component
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Chat Bar UI (`partials/chatbar.html`)
|
||||||
|
|
||||||
|
```html
|
||||||
|
<div id="suite-chatbar" class="chatbar">
|
||||||
|
<div id="chatbar-messages" hx-ext="ws" ws-connect="/ws/suite-chat"></div>
|
||||||
|
<form ws-send>
|
||||||
|
<input type="hidden" name="app_context" value="{{ active_app }}">
|
||||||
|
<input type="file" id="chatbar-file" name="file" accept=".xlsx,.csv,.png,.jpg,.pdf" style="display:none">
|
||||||
|
<button type="button" onclick="document.getElementById('chatbar-file').click()">📎</button>
|
||||||
|
<input type="text" name="message" placeholder="Ask anything about {{ active_app }}...">
|
||||||
|
<button type="submit">→</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
```
|
||||||
|
|
||||||
|
File uploads go to `POST /api/suite/upload` → stored in Drive → media_id passed to tool.
|
||||||
434
prompts/switcher.md
Normal file
434
prompts/switcher.md
Normal file
|
|
@ -0,0 +1,434 @@
|
||||||
|
# SWITCHER Feature - Response Format Modifiers
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Add a switcher interface that allows users to toggle response modifiers that influence how the AI generates responses. Unlike suggestions (which are one-time actions), switchers are persistent toggles that remain active until deactivated.
|
||||||
|
|
||||||
|
## Location
|
||||||
|
`botui/ui/suite/chat/` - alongside existing suggestion buttons
|
||||||
|
|
||||||
|
## Syntax
|
||||||
|
|
||||||
|
### Standard Switcher (predefined prompt)
|
||||||
|
```
|
||||||
|
ADD SWITCHER "tables" AS "Tabelas"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom Switcher (with custom prompt)
|
||||||
|
```
|
||||||
|
ADD SWITCHER "sempre mostrar 10 perguntas" AS "Mostrar Perguntas"
|
||||||
|
```
|
||||||
|
|
||||||
|
## What Switcher Does
|
||||||
|
|
||||||
|
The switcher:
|
||||||
|
1. **Injects the prompt** into every LLM request
|
||||||
|
2. **The prompt** can be:
|
||||||
|
- **Standard**: References a predefined prompt by ID (`"tables"`, `"cards"`, etc.)
|
||||||
|
- **Custom**: Any custom instruction string (`"sempre mostrar 10 perguntas"`)
|
||||||
|
3. **Influences** the AI response format
|
||||||
|
4. **Persists** until toggled OFF
|
||||||
|
|
||||||
|
## Available Standard Switchers
|
||||||
|
|
||||||
|
| ID | Label | Color | Description |
|
||||||
|
|----|--------|--------|-------------|
|
||||||
|
| tables | Tabelas | #4CAF50 | Format responses as tables |
|
||||||
|
| infographic | Infográfico | #2196F3 | Visual, graphical representations |
|
||||||
|
| cards | Cards | #FF9800 | Card-based layout |
|
||||||
|
| list | Lista | #9C27B0 | Bulleted lists |
|
||||||
|
| comparison | Comparação | #E91E63 | Side-by-side comparisons |
|
||||||
|
| timeline | Timeline | #00BCD4 | Chronological ordering |
|
||||||
|
| markdown | Markdown | #607D8B | Standard markdown |
|
||||||
|
| chart | Gráfico | #F44336 | Charts and diagrams |
|
||||||
|
|
||||||
|
## Predefined Prompts (Backend)
|
||||||
|
|
||||||
|
Each standard ID maps to a predefined prompt in the backend:
|
||||||
|
|
||||||
|
```
|
||||||
|
ID: tables
|
||||||
|
Prompt: "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML usando <table>, <thead>, <tbody>, <tr>, <th>, <td>. Cada dado deve ser uma célula. Use cabeçalhos claros na primeira linha. Se houver dados numéricos, alinhe à direita. Se houver texto, alinhe à esquerda. Use cores sutis em linhas alternadas (nth-child). NÃO use markdown tables, use HTML puro."
|
||||||
|
|
||||||
|
ID: infographic
|
||||||
|
Prompt: "REGRAS DE FORMATO: Crie representações visuais HTML usando SVG, progress bars, stat cards, e elementos gráficos. Use elementos como: <svg> para gráficos, <div style="width:X%;background:color"> para barras de progresso, ícones emoji, badges coloridos. Organize informações visualmente com grids, flexbox, e espaçamento. Inclua legendas e rótulos visuais claros."
|
||||||
|
|
||||||
|
ID: cards
|
||||||
|
Prompt: "REGRAS DE FORMATO: Retorne informações em formato de cards HTML. Cada card deve ter: <div class="card" style="border:1px solid #ddd;border-radius:8px;padding:16px;margin:8px;box-shadow:0 2px 4px rgba(0,0,0,0.1)">. Dentro do card use: título em <h3> ou <strong>, subtítulo em <p> style="color:#666", ícone emoji ou ícone SVG no topo, badges de status. Organize cards em grid usando display:grid ou flex-wrap."
|
||||||
|
|
||||||
|
ID: list
|
||||||
|
Prompt: "REGRAS DE FORMATO: Use apenas listas HTML: <ul> para bullets e <ol> para números numerados. Cada item em <li>. Use sublistas aninhadas quando apropriado. NÃO use parágrafos de texto, converta tudo em itens de lista. Adicione ícones emoji no início de cada <li> quando possível. Use classes CSS para estilização: .list-item, .sub-list."
|
||||||
|
|
||||||
|
ID: comparison
|
||||||
|
Prompt: "REGRAS DE FORMATO: Crie comparações lado a lado em HTML. Use grid de 2 colunas: <div style="display:grid;grid-template-columns:1fr 1fr;gap:20px">. Cada lado em uma <div class="comparison-side"> com borda colorida distinta. Use headers claros para cada lado. Adicione seção de "Diferenças Chave" com bullet points. Use cores contrastantes para cada lado (ex: azul vs laranja). Inclua tabela de comparação resumida no final."
|
||||||
|
|
||||||
|
ID: timeline
|
||||||
|
Prompt: "REGRAS DE FORMATO: Organize eventos cronologicamente em formato de timeline HTML. Use <div class="timeline"> com border-left vertical. Cada evento em <div class="timeline-item"> com: data em <span class="timeline-date" style="font-weight:bold;color:#666">, título em <h3>, descrição em <p>. Adicione círculo indicador na timeline line. Ordene do mais antigo para o mais recente. Use espaçamento claro entre eventos."
|
||||||
|
|
||||||
|
ID: markdown
|
||||||
|
Prompt: "REGRAS DE FORMATO: Use exclusivamente formato Markdown padrão. Sintaxe permitida: **negrito**, *itálico*, `inline code`, ```bloco de código```, # cabeçalhos, - bullets, 1. números, [links](url), , | tabela | markdown |. NÃO use HTML tags exceto para blocos de código. Siga estritamente a sintaxe CommonMark."
|
||||||
|
|
||||||
|
ID: chart
|
||||||
|
Prompt: "REGRAS DE FORMATO: Crie gráficos e diagramas em HTML SVG. Use elementos SVG: <svg width="X" height="Y">, <line> para gráficos de linha, <rect> para gráficos de barra, <circle> para gráficos de pizza, <path> para gráficos de área. Inclua eixos com labels, grid lines, legendas. Use cores distintas para cada série de dados (ex: vermelho, azul, verde). Adicione tooltips com valores ao hover. Se o usuário pedir gráfico de pizza com "pizza vermelha", use fill="#FF0000" no SVG."
|
||||||
|
```
|
||||||
|
|
||||||
|
## UI Design
|
||||||
|
|
||||||
|
### HTML Structure
|
||||||
|
```html
|
||||||
|
<div class="switchers-container" id="switchers">
|
||||||
|
<div class="switchers-label">Formato:</div>
|
||||||
|
<div class="switchers-chips" id="switchersChips">
|
||||||
|
<!-- Switcher chips will be rendered here -->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Placement
|
||||||
|
Position the switchers container **above** the suggestions container:
|
||||||
|
```html
|
||||||
|
<footer>
|
||||||
|
<div class="switchers-container" id="switchers"></div>
|
||||||
|
<div class="suggestions-container" id="suggestions"></div>
|
||||||
|
<!-- ... existing form ... -->
|
||||||
|
</footer>
|
||||||
|
```
|
||||||
|
|
||||||
|
### CSS Styling
|
||||||
|
|
||||||
|
#### Container
|
||||||
|
```css
|
||||||
|
.switchers-container {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 12px;
|
||||||
|
padding: 8px 16px;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
background: rgba(0, 0, 0, 0.02);
|
||||||
|
border-top: 1px solid rgba(0, 0, 0, 0.05);
|
||||||
|
}
|
||||||
|
|
||||||
|
.switchers-label {
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 600;
|
||||||
|
color: #666;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.5px;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Switcher Chips (Toggle Buttons)
|
||||||
|
```css
|
||||||
|
.switchers-chips {
|
||||||
|
display: flex;
|
||||||
|
gap: 8px;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.switcher-chip {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 6px;
|
||||||
|
padding: 6px 12px;
|
||||||
|
border-radius: 20px;
|
||||||
|
border: 2px solid transparent;
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 500;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s ease;
|
||||||
|
background: rgba(0, 0, 0, 0.05);
|
||||||
|
color: #666;
|
||||||
|
user-select: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.switcher-chip:hover {
|
||||||
|
background: rgba(0, 0, 0, 0.08);
|
||||||
|
transform: translateY(-1px);
|
||||||
|
}
|
||||||
|
|
||||||
|
.switcher-chip.active {
|
||||||
|
border-color: currentColor;
|
||||||
|
background: currentColor;
|
||||||
|
color: white;
|
||||||
|
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
|
||||||
|
}
|
||||||
|
|
||||||
|
.switcher-chip-icon {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## JavaScript Implementation
|
||||||
|
|
||||||
|
### State Management
|
||||||
|
```javascript
|
||||||
|
// Track active switchers
|
||||||
|
var activeSwitchers = new Set();
|
||||||
|
|
||||||
|
// Switcher definitions (from ADD SWITCHER commands in start.bas)
|
||||||
|
var switcherDefinitions = [
|
||||||
|
{
|
||||||
|
id: 'tables',
|
||||||
|
label: 'Tabelas',
|
||||||
|
icon: '📊',
|
||||||
|
color: '#4CAF50'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'infographic',
|
||||||
|
label: 'Infográfico',
|
||||||
|
icon: '📈',
|
||||||
|
color: '#2196F3'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'cards',
|
||||||
|
label: 'Cards',
|
||||||
|
icon: '🃏',
|
||||||
|
color: '#FF9800'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'list',
|
||||||
|
label: 'Lista',
|
||||||
|
icon: '📋',
|
||||||
|
color: '#9C27B0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'comparison',
|
||||||
|
label: 'Comparação',
|
||||||
|
icon: '⚖️',
|
||||||
|
color: '#E91E63'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'timeline',
|
||||||
|
label: 'Timeline',
|
||||||
|
icon: '📅',
|
||||||
|
color: '#00BCD4'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'markdown',
|
||||||
|
label: 'Markdown',
|
||||||
|
icon: '📝',
|
||||||
|
color: '#607D8B'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'chart',
|
||||||
|
label: 'Gráfico',
|
||||||
|
icon: '📉',
|
||||||
|
color: '#F44336'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
### Render Switchers
|
||||||
|
```javascript
|
||||||
|
function renderSwitchers() {
|
||||||
|
var container = document.getElementById("switcherChips");
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
container.innerHTML = switcherDefinitions.map(function(sw) {
|
||||||
|
var isActive = activeSwitchers.has(sw.id);
|
||||||
|
return (
|
||||||
|
'<div class="switcher-chip' + (isActive ? ' active' : '') + '" ' +
|
||||||
|
'data-switch-id="' + sw.id + '" ' +
|
||||||
|
'style="--switcher-color: ' + sw.color + '; ' +
|
||||||
|
(isActive ? 'color: ' + sw.color + ' background: ' + sw.color + '; ' : '') +
|
||||||
|
'">' +
|
||||||
|
'<span class="switcher-chip-icon">' + sw.icon + '</span>' +
|
||||||
|
'<span>' + sw.label + '</span>' +
|
||||||
|
'</div>'
|
||||||
|
);
|
||||||
|
}).join('');
|
||||||
|
|
||||||
|
// Add click handlers
|
||||||
|
container.querySelectorAll('.switcher-chip').forEach(function(chip) {
|
||||||
|
chip.addEventListener('click', function() {
|
||||||
|
toggleSwitcher(this.getAttribute('data-switch-id'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Toggle Handler
|
||||||
|
```javascript
|
||||||
|
function toggleSwitcher(switcherId) {
|
||||||
|
if (activeSwitchers.has(switcherId)) {
|
||||||
|
activeSwitchers.delete(switcherId);
|
||||||
|
} else {
|
||||||
|
activeSwitchers.add(switcherId);
|
||||||
|
}
|
||||||
|
renderSwitchers();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Message Enhancement
|
||||||
|
When sending a user message, prepend active switcher prompts:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function sendMessage(messageContent) {
|
||||||
|
// ... existing code ...
|
||||||
|
|
||||||
|
var content = messageContent || input.value.trim();
|
||||||
|
if (!content) return;
|
||||||
|
|
||||||
|
// Prepend active switcher prompts
|
||||||
|
var enhancedContent = content;
|
||||||
|
if (activeSwitchers.size > 0) {
|
||||||
|
// Get prompts for active switchers from backend
|
||||||
|
var activePrompts = [];
|
||||||
|
activeSwitchers.forEach(function(id) {
|
||||||
|
// Backend has predefined prompts for each ID
|
||||||
|
activePrompts.push(getSwitcherPrompt(id));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Inject prompts before user message
|
||||||
|
if (activePrompts.length > 0) {
|
||||||
|
enhancedContent = activePrompts.join('\n\n') + '\n\n---\n\n' + content;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send enhanced content
|
||||||
|
addMessage("user", content);
|
||||||
|
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
ws.send(JSON.stringify({
|
||||||
|
bot_id: currentBotId,
|
||||||
|
user_id: currentUserId,
|
||||||
|
session_id: currentSessionId,
|
||||||
|
channel: "web",
|
||||||
|
content: enhancedContent,
|
||||||
|
message_type: MessageType.USER,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSwitcherPrompt(switcherId) {
|
||||||
|
// Get predefined prompt from backend or API
|
||||||
|
// For example, tables ID maps to:
|
||||||
|
// "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML..."
|
||||||
|
var switcher = switcherDefinitions.find(function(s) { return s.id === switcherId; });
|
||||||
|
if (!switcher) return "";
|
||||||
|
|
||||||
|
// This could be fetched from backend or stored locally
|
||||||
|
return SWITCHER_PROMPTS[switcherId] || "";
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Bot Integration (start.bas)
|
||||||
|
|
||||||
|
The bot receives the switcher prompt injected into the user message and simply passes it to the LLM.
|
||||||
|
|
||||||
|
### Example in start.bas
|
||||||
|
|
||||||
|
```basic
|
||||||
|
REM Switcher prompts are automatically injected by frontend
|
||||||
|
REM Just pass user_input to LLM - no parsing needed!
|
||||||
|
|
||||||
|
REM If user types: "mostra os cursos"
|
||||||
|
REM And "Tabelas" switcher is active
|
||||||
|
REM Frontend sends: "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML... --- mostra os cursos"
|
||||||
|
|
||||||
|
REM Bot passes directly to LLM:
|
||||||
|
response$ = CALL_LLM(user_input)
|
||||||
|
|
||||||
|
REM The LLM will follow the REGRAS DE FORMATO instructions
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Active Switchers
|
||||||
|
|
||||||
|
When multiple switchers are active, all prompts are injected:
|
||||||
|
|
||||||
|
```basic
|
||||||
|
REM Frontend injects multiple REGRAS DE FORMATO blocks
|
||||||
|
REM Example with "Tabelas" and "Gráfico" active:
|
||||||
|
REM
|
||||||
|
REM "REGRAS DE FORMATO: SEMPRE retorne suas respostas em formato de tabela HTML...
|
||||||
|
REM REGRAS DE FORMATO: Crie gráficos e diagramas em HTML SVG...
|
||||||
|
REM ---
|
||||||
|
REM mostra os dados de vendas"
|
||||||
|
|
||||||
|
REM Bot passes to LLM:
|
||||||
|
response$ = CALL_LLM(user_input)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
1. ✅ Create prompts/switcher.md (this file)
|
||||||
|
2. ⬜ Define predefined prompts in backend (map IDs to prompt strings)
|
||||||
|
3. ⬜ Add HTML structure to chat.html (switchers container)
|
||||||
|
4. ⬜ Add CSS styles to chat.css (switcher chip styles)
|
||||||
|
5. ⬜ Add switcher definitions to chat.js
|
||||||
|
6. ⬜ Implement renderSwitchers() function
|
||||||
|
7. ⬜ Implement toggleSwitcher() function
|
||||||
|
8. ⬜ Modify sendMessage() to prepend switcher prompts
|
||||||
|
9. ⬜ Update salesianos bot start.bas to use ADD SWITCHER commands
|
||||||
|
10. ⬜ Test locally with all switcher options
|
||||||
|
11. ⬜ Verify multiple switchers can be active simultaneously
|
||||||
|
12. ⬜ Test persistence across page refreshes (optional - localStorage)
|
||||||
|
|
||||||
|
## Testing Checklist
|
||||||
|
|
||||||
|
- [ ] Switchers appear above suggestions
|
||||||
|
- [ ] Switchers are colorful and match their defined colors
|
||||||
|
- [ ] Clicking a switcher toggles it on/off
|
||||||
|
- [ ] Multiple switchers can be active simultaneously
|
||||||
|
- [ ] Active switchers have distinct visual state (border, background, shadow)
|
||||||
|
- [ ] Formatted responses match the selected format
|
||||||
|
- [ ] Toggling off removes the format modifier
|
||||||
|
- [ ] Works with empty active switchers (normal response)
|
||||||
|
- [ ] Works in combination with suggestions
|
||||||
|
- [ ] Responsive design on mobile devices
|
||||||
|
|
||||||
|
## Files to Modify
|
||||||
|
|
||||||
|
1. `botui/ui/suite/chat/chat.html` - Add switcher container HTML
|
||||||
|
2. `botui/ui/suite/chat/chat.css` - Add switcher styles
|
||||||
|
3. `botui/ui/suite/chat/chat.js` - Add switcher logic
|
||||||
|
4. `botserver/bots/salesianos/start.bas` - Add ADD SWITCHER commands
|
||||||
|
|
||||||
|
## Example start.bas
|
||||||
|
|
||||||
|
```basic
|
||||||
|
USE_WEBSITE("https://salesianos.br", "30d")
|
||||||
|
|
||||||
|
USE KB "carta"
|
||||||
|
USE KB "proc"
|
||||||
|
|
||||||
|
USE TOOL "inscricao"
|
||||||
|
USE TOOL "consultar_inscricao"
|
||||||
|
USE TOOL "agendamento_visita"
|
||||||
|
USE TOOL "informacoes_curso"
|
||||||
|
USE TOOL "documentos_necessarios"
|
||||||
|
USE TOOL "contato_secretaria"
|
||||||
|
USE TOOL "calendario_letivo"
|
||||||
|
|
||||||
|
ADD_SUGGESTION_TOOL "inscricao" AS "Fazer Inscrição"
|
||||||
|
ADD_SUGGESTION_TOOL "consultar_inscricao" AS "Consultar Inscrição"
|
||||||
|
ADD_SUGGESTION_TOOL "agendamento_visita" AS "Agendar Visita"
|
||||||
|
ADD_SUGGESTION_TOOL "informacoes_curso" AS "Informações de Cursos"
|
||||||
|
ADD_SUGGESTION_TOOL "documentos_necessarios" AS "Documentos Necessários"
|
||||||
|
ADD_SUGGESTION_TOOL "contato_secretaria" AS "Falar com Secretaria"
|
||||||
|
ADD_SUGGESTION_TOOL "segunda_via" AS "Segunda Via de Boleto"
|
||||||
|
ADD_SUGGESTION_TOOL "calendario_letivo" AS "Calendário Letivo"
|
||||||
|
ADD_SUGGESTION_TOOL "outros" AS "Outros"
|
||||||
|
|
||||||
|
ADD SWITCHER "tables" AS "Tabelas"
|
||||||
|
ADD SWITCHER "infographic" AS "Infográfico"
|
||||||
|
ADD SWITCHER "cards" AS "Cards"
|
||||||
|
ADD SWITCHER "list" AS "Lista"
|
||||||
|
ADD SWITCHER "comparison" AS "Comparação"
|
||||||
|
ADD SWITCHER "timeline" AS "Timeline"
|
||||||
|
ADD SWITCHER "markdown" AS "Markdown"
|
||||||
|
ADD SWITCHER "chart" AS "Gráfico"
|
||||||
|
|
||||||
|
TALK "Olá! Sou o assistente virtual da Escola Salesiana. Como posso ajudá-lo hoje com inscrições, visitas, informações sobre cursos, documentos ou calendário letivo? Você pode também escolher formatos de resposta acima da caixa de mensagem."
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Switchers are **persistent** until deactivated
|
||||||
|
- Multiple switchers can be active at once
|
||||||
|
- Switcher prompts are prepended to user messages with "---" separator
|
||||||
|
- The backend (LLM) should follow these format instructions
|
||||||
|
- UI should provide clear visual feedback for active switchers
|
||||||
|
- Color coding helps users quickly identify active formats
|
||||||
|
- Standard switchers use predefined prompts in backend
|
||||||
|
- Custom switchers allow any prompt string to be injected
|
||||||
154
prompts/usekb2.md
Normal file
154
prompts/usekb2.md
Normal file
|
|
@ -0,0 +1,154 @@
|
||||||
|
# USE KB 2.0: Group-Based Knowledge Base Access
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Modify the USE KB keyword to respect user group permissions, ensuring that THINK KB queries only return answers from knowledge base folders that belong to groups the logged-in user is a member of.
|
||||||
|
|
||||||
|
## Current Architecture
|
||||||
|
|
||||||
|
### USE KB Flow
|
||||||
|
1. User executes `USE KB "kb_name"` in BASIC script
|
||||||
|
2. `use_kb.rs:add_kb_to_session()` checks if KB exists in `kb_collections`
|
||||||
|
3. Creates default KB entry if not found
|
||||||
|
4. Adds association to `session_kb_associations` table
|
||||||
|
5. KB becomes active for the session
|
||||||
|
|
||||||
|
### THINK KB Flow
|
||||||
|
1. User executes `THINK KB "query"`
|
||||||
|
2. `think_kb.rs:think_kb_search()` gets all active KBs from `session_kb_associations`
|
||||||
|
3. For each active KB, calls `KnowledgeBaseManager.search()` on its Qdrant collection
|
||||||
|
4. Returns combined results from all active KBs
|
||||||
|
|
||||||
|
### Group System
|
||||||
|
- Groups stored in `rbac_groups` table
|
||||||
|
- User membership in `rbac_user_groups` table
|
||||||
|
- Group permissions via `rbac_group_roles` table
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
|
||||||
|
### 1. Database Schema Changes
|
||||||
|
|
||||||
|
Add new table `kb_group_associations`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE kb_group_associations (
|
||||||
|
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||||
|
kb_id UUID NOT NULL REFERENCES kb_collections(id) ON DELETE CASCADE,
|
||||||
|
group_id UUID NOT NULL REFERENCES rbac_groups(id) ON DELETE CASCADE,
|
||||||
|
granted_by UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||||
|
granted_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
UNIQUE(kb_id, group_id)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
Migration file: `botserver/migrations/6.2.0-01-kb-groups/up.sql`
|
||||||
|
|
||||||
|
### 2. Backend Logic Changes
|
||||||
|
|
||||||
|
#### Modify `think_kb_search()` in `think_kb.rs`
|
||||||
|
- Add user group lookup before searching
|
||||||
|
- Filter active KBs to only those accessible by user's groups
|
||||||
|
- Allow access if KB has no group associations (public KBs) OR user is in associated groups
|
||||||
|
|
||||||
|
```rust
|
||||||
|
async fn think_kb_search(
|
||||||
|
kb_manager: Arc<KnowledgeBaseManager>,
|
||||||
|
db_pool: DbPool,
|
||||||
|
session_id: Uuid,
|
||||||
|
bot_id: Uuid,
|
||||||
|
user_id: Uuid, // Add user_id parameter
|
||||||
|
query: &str,
|
||||||
|
) -> Result<serde_json::Value, String> {
|
||||||
|
// Get user's groups
|
||||||
|
let user_groups = get_user_groups(&db_pool, user_id)?;
|
||||||
|
|
||||||
|
// Get active KBs filtered by groups
|
||||||
|
let accessible_kbs = get_accessible_kbs_for_session(&db_pool, session_id, &user_groups)?;
|
||||||
|
|
||||||
|
// Search only accessible KBs
|
||||||
|
// ... rest of search logic
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Add `get_accessible_kbs_for_session()` function
|
||||||
|
```rust
|
||||||
|
fn get_accessible_kbs_for_session(
|
||||||
|
conn_pool: &DbPool,
|
||||||
|
session_id: Uuid,
|
||||||
|
user_groups: &[String],
|
||||||
|
) -> Result<Vec<(String, String, String)>, String> {
|
||||||
|
// Query that joins session_kb_associations with kb_group_associations
|
||||||
|
// Returns KBs where group_id IS NULL (public) OR group_id IN user_groups
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Modify `add_kb_to_session()` in `use_kb.rs`
|
||||||
|
- Add optional group access check
|
||||||
|
- Allow USE KB if user has access to the KB's groups
|
||||||
|
|
||||||
|
### 3. API Changes
|
||||||
|
|
||||||
|
Add new endpoints in `rbac.rs` for KB-group management:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Assign KB to group
|
||||||
|
POST /api/rbac/kbs/{kb_id}/groups/{group_id}
|
||||||
|
|
||||||
|
// Remove KB from group
|
||||||
|
DELETE /api/rbac/kbs/{kb_id}/groups/{group_id}
|
||||||
|
|
||||||
|
// Get groups for KB
|
||||||
|
GET /api/rbac/kbs/{kb_id}/groups
|
||||||
|
|
||||||
|
// Get KBs accessible by user
|
||||||
|
GET /api/rbac/users/{user_id}/accessible-kbs
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Frontend Changes
|
||||||
|
|
||||||
|
#### Update `botui/ui/suite/admin/groups.html`
|
||||||
|
- Add "Knowledge Bases" tab to group detail panel
|
||||||
|
- Show list of KBs assigned to the group
|
||||||
|
- Allow adding/removing KB assignments
|
||||||
|
|
||||||
|
#### Update `botui/ui/suite/drive/drive.html`
|
||||||
|
- Add group visibility indicators for KB folders
|
||||||
|
- Show which groups have access to each KB
|
||||||
|
|
||||||
|
### 5. Migration Strategy
|
||||||
|
|
||||||
|
1. Create new migration for `kb_group_associations` table
|
||||||
|
2. Run migration to create table
|
||||||
|
3. Assign existing KBs to default groups (e.g., "all_users" group)
|
||||||
|
4. Update application code
|
||||||
|
5. Deploy and test
|
||||||
|
|
||||||
|
### 6. Backward Compatibility
|
||||||
|
|
||||||
|
- Existing KBs without group associations remain public
|
||||||
|
- Existing USE KB calls continue to work
|
||||||
|
- THINK KB will filter results based on new permissions
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
1. ✅ Database migration for kb_group_associations
|
||||||
|
2. ✅ Modify think_kb_search to accept user_id and filter by groups
|
||||||
|
3. ✅ Update THINK KB keyword registration to pass user_id
|
||||||
|
4. ✅ Add group access check to USE KB
|
||||||
|
5. ✅ Add API endpoints for KB-group management
|
||||||
|
6. ✅ Update admin UI for group-KB assignment
|
||||||
|
7. ✅ Update drive UI to show group access
|
||||||
|
8. ✅ Add tests for group-based access control
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- All KB access checks must happen at the database level
|
||||||
|
- No client-side filtering of search results
|
||||||
|
- Group membership verified on each request
|
||||||
|
- Audit logging for KB access attempts
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
- Unit tests for group access functions
|
||||||
|
- Integration tests for THINK KB with group filtering
|
||||||
|
- UI tests for admin group-KB management
|
||||||
|
- End-to-end tests with different user group scenarios
|
||||||
44
prompts/v6.2.md
Normal file
44
prompts/v6.2.md
Normal file
|
|
@ -0,0 +1,44 @@
|
||||||
|
# v6.2 — Make KB "cartas" work end-to-end
|
||||||
|
|
||||||
|
## What we want
|
||||||
|
|
||||||
|
User clicks "Cartas" → `cartas.bas` runs → `USE KB "cartas"` → searches Qdrant → bot answers with KB content. No restarts.
|
||||||
|
|
||||||
|
## 3 Bugs we found
|
||||||
|
|
||||||
|
### Bug 1: KB files re-indexed every 10s (wasteful) ✅ FIXED
|
||||||
|
|
||||||
|
Every cycle, `check_gbkb_changes` replaces file_states with `indexed: false`, so DriveMonitor re-downloads and re-indexes all PDFs every 10 seconds.
|
||||||
|
|
||||||
|
**Fix:** Preserve `indexed: true` when etag hasn't changed.
|
||||||
|
**File:** `botserver/src/drive/drive_monitor/mod.rs:1376`
|
||||||
|
|
||||||
|
### Bug 2: USE KB looks for wrong collection name ✅ FIXED
|
||||||
|
|
||||||
|
When `kb_collections` has no entry for "cartas", `USE KB` creates a collection name using a random UUID (`salesianos_<random>_cartas`). But Qdrant has `salesianos_6deedba8_cartas`. They never match → search returns nothing.
|
||||||
|
|
||||||
|
**Fix:** Use `bot_id_short` (first 8 chars of bot UUID) consistently. Also changed `ON CONFLICT DO NOTHING` to `DO UPDATE` so stale entries get corrected.
|
||||||
|
**File:** `botserver/src/basic/keywords/use_kb.rs:221-244`
|
||||||
|
|
||||||
|
### Bug 3: KB indexing never writes to kb_collections table ✅ FIXED
|
||||||
|
|
||||||
|
`index_kb_folder` creates a Qdrant collection and indexes documents, but never writes a row to `kb_collections`. So when `USE KB "cartas"` runs, it queries `kb_collections` → empty → hits Bug 2's fallback path.
|
||||||
|
|
||||||
|
**Fix:** After indexing, upsert into `kb_collections` with correct collection name.
|
||||||
|
**File:** `botserver/src/core/kb/mod.rs:167-220`
|
||||||
|
|
||||||
|
Also changed `process_gbkb_folder` return type from `Result<()>` to `Result<IndexingResult>` so `handle_gbkb_change` can use `collection_name` and `documents_processed`.
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
- [x] Bug 1 code fix (file_states indexed flag)
|
||||||
|
- [x] Bug 2 code fix (USE KB collection name)
|
||||||
|
- [x] Bug 3 code fix (kb_collections upsert after indexing)
|
||||||
|
- [x] `cargo check -p botserver` passes
|
||||||
|
- [ ] Push botserver → origin + ALM
|
||||||
|
- [ ] Push main repo → origin + ALM
|
||||||
|
- [ ] Deploy to production (ask user first)
|
||||||
|
- [ ] Restart botserver (one-time for new binary)
|
||||||
|
- [ ] Test: click "Cartas" → verify KB search works
|
||||||
|
- [ ] Test: click "Procedimentos" → verify KB search works
|
||||||
|
- [ ] Verify PROMPT.md injection
|
||||||
10
reset.sh
Executable file
10
reset.sh
Executable file
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Cleaning up..."
|
||||||
|
rm -rf botserver-stack/ ./work/ .env
|
||||||
|
|
||||||
|
echo "Starting services..."
|
||||||
|
./restart.sh
|
||||||
|
|
||||||
|
echo "Reset complete!"
|
||||||
32
restart.ps1
Normal file
32
restart.ps1
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
$ErrorActionPreference = "Continue"
|
||||||
|
|
||||||
|
Write-Host "Stopping..."
|
||||||
|
Stop-Process -Name "botserver" -Force -ErrorAction SilentlyContinue
|
||||||
|
Stop-Process -Name "botui" -Force -ErrorAction SilentlyContinue
|
||||||
|
Stop-Process -Name "rustc" -Force -ErrorAction SilentlyContinue
|
||||||
|
|
||||||
|
Write-Host "Cleaning..."
|
||||||
|
Remove-Item -Path "botserver.log", "botui.log" -Force -ErrorAction SilentlyContinue
|
||||||
|
|
||||||
|
Write-Host "Building..."
|
||||||
|
cargo build -p botserver
|
||||||
|
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botserver"; exit 1 }
|
||||||
|
|
||||||
|
cargo build -p botui
|
||||||
|
if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build botui"; exit 1 }
|
||||||
|
|
||||||
|
Write-Host "Starting botserver..."
|
||||||
|
$env:PORT = "8080"
|
||||||
|
$env:RUST_LOG = "debug"
|
||||||
|
$env:PATH += ";C:\pgsql\pgsql\bin;C:\pgsql\pgsql\lib"
|
||||||
|
$botserverProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botserver.exe" -ArgumentList "--noconsole" -RedirectStandardOutput "botserver.log" -RedirectStandardError "botserver.log"
|
||||||
|
Write-Host " PID: $($botserverProcess.Id)"
|
||||||
|
|
||||||
|
Write-Host "Starting botui..."
|
||||||
|
$env:BOTSERVER_URL = "http://localhost:8080"
|
||||||
|
$env:PORT = "3000"
|
||||||
|
$botuiProcess = Start-Process -PassThru -NoNewWindow -FilePath ".\target\debug\botui.exe" -RedirectStandardOutput "botui.log" -RedirectStandardError "botui.log"
|
||||||
|
Write-Host " PID: $($botuiProcess.Id)"
|
||||||
|
|
||||||
|
Write-Host "Done. Logs are being written to botserver.log and botui.log"
|
||||||
|
Write-Host "To view logs, you can use: Get-Content botserver.log -Wait"
|
||||||
49
restart.sh
Executable file
49
restart.sh
Executable file
|
|
@ -0,0 +1,49 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "=== Fast Restart: botserver + botmodels only ==="
|
||||||
|
|
||||||
|
# Kill only the app services, keep infra running
|
||||||
|
pkill -f "botserver --noconsole" || true
|
||||||
|
pkill -f "botmodels" || true
|
||||||
|
|
||||||
|
# Clean logs
|
||||||
|
rm -f botserver.log botmodels.log
|
||||||
|
|
||||||
|
# Build only botserver (botui likely already built)
|
||||||
|
cargo build -p botserver
|
||||||
|
|
||||||
|
# Start botmodels
|
||||||
|
cd botmodels
|
||||||
|
source venv/bin/activate
|
||||||
|
uvicorn src.main:app --host 0.0.0.0 --port 8085 > ../botmodels.log 2>&1 &
|
||||||
|
echo " botmodels PID: $!"
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
# Wait for botmodels
|
||||||
|
for i in $(seq 1 20); do
|
||||||
|
if curl -s http://localhost:8085/api/health > /dev/null 2>&1; then
|
||||||
|
echo " botmodels ready"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Start botserver (keep botui running if already up)
|
||||||
|
if ! pgrep -f "botui" > /dev/null; then
|
||||||
|
echo "Starting botui..."
|
||||||
|
cargo build -p botui
|
||||||
|
cd botui
|
||||||
|
BOTSERVER_URL="http://localhost:8080" ./target/debug/botui > ../botui.log 2>&1 &
|
||||||
|
echo " botui PID: $!"
|
||||||
|
cd ..
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start botserver
|
||||||
|
BOTMODELS_HOST="http://localhost:8085" BOTMODELS_API_KEY="starter" RUST_LOG=info ./target/debug/botserver --noconsole > botserver.log 2>&1 &
|
||||||
|
echo " botserver PID: $!"
|
||||||
|
|
||||||
|
# Quick health check
|
||||||
|
sleep 2
|
||||||
|
curl -s http://localhost:8080/health > /dev/null 2>&1 && echo "✅ botserver ready" || echo "❌ botserver failed"
|
||||||
|
|
||||||
|
echo "Done. botserver $(pgrep -f 'botserver --noconsole') botui $(pgrep -f botui) botmodels $(pgrep -f botmodels)"
|
||||||
Loading…
Add table
Reference in a new issue