mirror of
https://github.com/donaldzou/WGDashboard.git
synced 2025-12-14 07:26:17 +00:00
Compare commits
913 Commits
v4.1.1
...
fix-system
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57652d9f8e | ||
|
|
bfc3e285db | ||
|
|
58dc5a70dc | ||
|
|
c94b8a17bc | ||
|
|
b18ceca439 | ||
|
|
b794c342b1 | ||
|
|
230fa166b8 | ||
|
|
0ed5640499 | ||
|
|
931d547046 | ||
|
|
30dd9f3e54 | ||
|
|
c98a83e5b1 | ||
|
|
10c9b5635f | ||
|
|
9dd5341fd9 | ||
|
|
d763867566 | ||
|
|
18531b71c7 | ||
|
|
bbeae93c05 | ||
|
|
3c297cbb96 | ||
|
|
954f99b354 | ||
|
|
3f75c0aa3b | ||
|
|
33d8a1f991 | ||
|
|
616238735a | ||
|
|
649c1ad292 | ||
|
|
145b18c28c | ||
|
|
34e43de1a1 | ||
|
|
b7ed934331 | ||
|
|
d5a7632bb6 | ||
|
|
6621c5f910 | ||
|
|
09a3fddd87 | ||
|
|
45510165a2 | ||
|
|
508dbf8dc2 | ||
|
|
eb1d52ffba | ||
|
|
5db7351f8c | ||
|
|
5ae3a56337 | ||
|
|
e7068b472e | ||
|
|
3dc94a35a1 | ||
|
|
f82abd71a3 | ||
|
|
602238d794 | ||
|
|
4d4a15740b | ||
|
|
524d50ee07 | ||
|
|
fc591b7fe8 | ||
|
|
c2f06193d0 | ||
|
|
f2ead12315 | ||
|
|
ca8700ac2a | ||
|
|
10a8d22efd | ||
|
|
fc3ec61373 | ||
|
|
094d1c0718 | ||
|
|
0d814ec03c | ||
|
|
5ccfe07e12 | ||
|
|
101ac5e985 | ||
|
|
113a780eec | ||
|
|
cf77610a56 | ||
|
|
84675fe521 | ||
|
|
5db5b35311 | ||
|
|
ff345c9609 | ||
|
|
6cccfec923 | ||
|
|
8231dd1463 | ||
|
|
d8ff020d8c | ||
|
|
238fb91360 | ||
|
|
9ecc16fcc1 | ||
|
|
7d9f60cf9b | ||
|
|
8ee16b4173 | ||
|
|
74861fa96b | ||
|
|
442a658487 | ||
|
|
3706b91b26 | ||
|
|
48cb54156b | ||
|
|
f3104c29ea | ||
|
|
689aee34ec | ||
|
|
3862ea4d28 | ||
|
|
a7e0eb52c2 | ||
|
|
537a88f618 | ||
|
|
93a5624294 | ||
|
|
37539990dd | ||
|
|
5cd8a80118 | ||
|
|
c74ecc3d75 | ||
|
|
404bccfb64 | ||
|
|
335c69f517 | ||
|
|
0d118f4d32 | ||
|
|
8f58a06731 | ||
|
|
4bfcc1abde | ||
|
|
25f39db690 | ||
|
|
da5ce7da9e | ||
|
|
e8126f3630 | ||
|
|
f842a7cc62 | ||
|
|
7250422aaa | ||
|
|
9661ff0b78 | ||
|
|
562201a342 | ||
|
|
1ee9f7bc68 | ||
|
|
18daa74ecd | ||
|
|
eadae9373f | ||
|
|
fb645dd84c | ||
|
|
0cffed3037 | ||
|
|
3bd5e02118 | ||
|
|
c4fe81fcbf | ||
|
|
40976870ee | ||
|
|
d526deb826 | ||
|
|
4ea3aa0a58 | ||
|
|
569ee8ac58 | ||
|
|
c42776a6d7 | ||
|
|
22af38579b | ||
|
|
a9ecf6c850 | ||
|
|
77112675ae | ||
|
|
0b054ae668 | ||
|
|
51ba9a86fa | ||
|
|
83eeaa0d73 | ||
|
|
a2316c8641 | ||
|
|
f231e9214c | ||
|
|
3673813e6a | ||
|
|
feb3c38113 | ||
|
|
73a969a6bf | ||
|
|
7ac6d6c498 | ||
|
|
b2f306789c | ||
|
|
1a26f757a8 | ||
|
|
1d66cda277 | ||
|
|
b52dad961b | ||
|
|
5a84136d87 | ||
|
|
73d701eb08 | ||
|
|
636ba5ebc8 | ||
|
|
627065e793 | ||
|
|
c9f395acbd | ||
|
|
919a5e9077 | ||
|
|
b5986fde82 | ||
|
|
cf95aded77 | ||
|
|
694a06ddb6 | ||
|
|
2eb3a17775 | ||
|
|
628464d2e1 | ||
|
|
ccaaa4bd21 | ||
|
|
1b285537ad | ||
|
|
eab31ba5d0 | ||
|
|
acc1233b11 | ||
|
|
91a3b52a4a | ||
|
|
b2532e305e | ||
|
|
06f7e7f74b | ||
|
|
a517867cdf | ||
|
|
e1d3ad11cc | ||
|
|
41dbdc8ccd | ||
|
|
df98ee8738 | ||
|
|
3be1cb6702 | ||
|
|
4644840009 | ||
|
|
4e75a95a73 | ||
|
|
92a2e26755 | ||
|
|
8ebd65cc0b | ||
|
|
15d51735d2 | ||
|
|
ee8afbd357 | ||
|
|
b3889bb1e3 | ||
|
|
8bbb4a48f7 | ||
|
|
2dce79bb85 | ||
|
|
1319c28f90 | ||
|
|
f95c6beeba | ||
|
|
6cf1eb6140 | ||
|
|
4b713ab66e | ||
|
|
43d055a8b4 | ||
|
|
28ce4bb1d6 | ||
|
|
604d53d2f0 | ||
|
|
62d3332522 | ||
|
|
9caed31185 | ||
|
|
030c1bdcba | ||
|
|
44af7eba11 | ||
|
|
41975973dc | ||
|
|
572c223854 | ||
|
|
8191668d60 | ||
|
|
632c4f3dc7 | ||
|
|
caacbfae03 | ||
|
|
3b3c071402 | ||
|
|
85fa427134 | ||
|
|
c3c7e50f08 | ||
|
|
45524eaee5 | ||
|
|
48ec4c7f6f | ||
|
|
593417a1fd | ||
|
|
e2c9941651 | ||
|
|
f865317600 | ||
|
|
c83a075886 | ||
|
|
56d894a8d1 | ||
|
|
bba7817c9b | ||
|
|
f360ef5d2f | ||
|
|
f6e625c5f8 | ||
|
|
e26639cdc4 | ||
|
|
38e0a939c2 | ||
|
|
2ec190ecfd | ||
|
|
549982db7f | ||
|
|
eab2c9d358 | ||
|
|
c5b72cb6d8 | ||
|
|
282a83829f | ||
|
|
20edd7cbcd | ||
|
|
a6311b4f63 | ||
|
|
b8792200c6 | ||
|
|
ba85879151 | ||
|
|
86bb847374 | ||
|
|
e61afba608 | ||
|
|
f418520f66 | ||
|
|
af3aebe34c | ||
|
|
8a568d787a | ||
|
|
0d943cb06f | ||
|
|
f9e9d32c52 | ||
|
|
6bf38d4346 | ||
|
|
2cd59fc310 | ||
|
|
1dfa1d62e1 | ||
|
|
854b9d252f | ||
|
|
0c0cf3a378 | ||
|
|
b4f29e63b4 | ||
|
|
afbb3df571 | ||
|
|
01caca707b | ||
|
|
8ed5826e6c | ||
|
|
2c1e36e54d | ||
|
|
1ed8b9f2d5 | ||
|
|
0ff8e9ed86 | ||
|
|
029081c610 | ||
|
|
63b9d15d34 | ||
|
|
7dcc3e7589 | ||
|
|
81e394a436 | ||
|
|
288068bf70 | ||
|
|
37546515be | ||
|
|
c32787ccd3 | ||
|
|
c9c7084db5 | ||
|
|
7b99441602 | ||
|
|
24940886f6 | ||
|
|
f777ac5f75 | ||
|
|
a511eb21fc | ||
|
|
f11a3c8c3b | ||
|
|
430a6053ef | ||
|
|
1867db5c99 | ||
|
|
f661cf0f83 | ||
|
|
b4814e281f | ||
|
|
9d4e5d8cb5 | ||
|
|
88f40b244a | ||
|
|
3e3047f23e | ||
|
|
ab4876e066 | ||
|
|
00d11880e8 | ||
|
|
c757cee988 | ||
|
|
ebbb681dd8 | ||
|
|
feff6ce027 | ||
|
|
39d01015e5 | ||
|
|
2aa2b15234 | ||
|
|
21c6d0b8f9 | ||
|
|
ffc9176225 | ||
|
|
d54609cc29 | ||
|
|
0a87453961 | ||
|
|
12d9058f1e | ||
|
|
f55c961e91 | ||
|
|
fbe4e7dc4c | ||
|
|
53079497a1 | ||
|
|
2d08171e7c | ||
|
|
e879ceb1bc | ||
|
|
37e2985b9a | ||
|
|
aaca74874d | ||
|
|
41c5b4bd64 | ||
|
|
71e43eb503 | ||
|
|
b52bb83c67 | ||
|
|
7322b7cbf0 | ||
|
|
62ffd97808 | ||
|
|
a4ee56648e | ||
|
|
f62e481fa0 | ||
|
|
fa26fce0cc | ||
|
|
2fbee4aacc | ||
|
|
530a7ef393 | ||
|
|
99cb546b59 | ||
|
|
084bec0f07 | ||
|
|
c199413d49 | ||
|
|
24eada4432 | ||
|
|
4df4aa07f4 | ||
|
|
91fd0f0e9a | ||
|
|
12f6244930 | ||
|
|
327ecbe34c | ||
|
|
2ca62293a9 | ||
|
|
f3cae0b005 | ||
|
|
8f15d5dcdd | ||
|
|
c8348f7be8 | ||
|
|
1839645360 | ||
|
|
2c73dc1df8 | ||
|
|
fefabe073f | ||
|
|
77b156c7f5 | ||
|
|
5ac84e109d | ||
|
|
83d105facd | ||
|
|
b7af06d59d | ||
|
|
4da32690a9 | ||
|
|
a49c2a1cc0 | ||
|
|
f633a9654a | ||
|
|
0d58a172a9 | ||
|
|
651784b1d1 | ||
|
|
68abc7ec1b | ||
|
|
9745e8b034 | ||
|
|
d946c108a3 | ||
|
|
2b66f9a5c4 | ||
|
|
4d321cf3f6 | ||
|
|
299d84b16a | ||
|
|
3d75f6bbbd | ||
|
|
6b194bba15 | ||
|
|
e63bccf274 | ||
|
|
4ca79ac1c9 | ||
|
|
aafef538f1 | ||
|
|
68d8546383 | ||
|
|
c43b3926b8 | ||
|
|
7e9cfc2872 | ||
|
|
e88936c05a | ||
|
|
9c1b4222d0 | ||
|
|
69ec55b638 | ||
|
|
67a455c403 | ||
|
|
5bf4df2d27 | ||
|
|
7797cc06d0 | ||
|
|
541d89e170 | ||
|
|
d775fb69e3 | ||
|
|
2a1a885056 | ||
|
|
a334ce1527 | ||
|
|
447cb5ccdc | ||
|
|
bca20e5b02 | ||
|
|
90675dcc2e | ||
|
|
e8deadaaff | ||
|
|
ecc4cc7670 | ||
|
|
4a5de5efd4 | ||
|
|
bdf557fde3 | ||
|
|
29600cb54c | ||
|
|
9f43fd7c92 | ||
|
|
6a6c1aa527 | ||
|
|
df7f9f2b14 | ||
|
|
568da8cc64 | ||
|
|
e16435f4fc | ||
|
|
76e9f3fd29 | ||
|
|
d0e46a517b | ||
|
|
c94345cb2f | ||
|
|
e2882acec1 | ||
|
|
3c2362177f | ||
|
|
5e92931108 | ||
|
|
d54e388b58 | ||
|
|
390cfa0cdf | ||
|
|
c6fc741aa8 | ||
|
|
a0e15e1671 | ||
|
|
8367cba259 | ||
|
|
f7bf709295 | ||
|
|
ab802ea5cf | ||
|
|
922d8eab58 | ||
|
|
409acc9f1a | ||
|
|
196dc78b4f | ||
|
|
61404d9c12 | ||
|
|
ceab5ead8c | ||
|
|
9b60acf3db | ||
|
|
90bb321a07 | ||
|
|
e56fa24a38 | ||
|
|
574aff605f | ||
|
|
9c6d0b56c3 | ||
|
|
e0761396b8 | ||
|
|
4b44eb5c80 | ||
|
|
eb66a44edf | ||
|
|
f8708b84e6 | ||
|
|
cc29091116 | ||
|
|
2f860772d2 | ||
|
|
2f5d1c0966 | ||
|
|
39c6817e65 | ||
|
|
2d63f56d64 | ||
|
|
a4a158a9e9 | ||
|
|
be78cb5321 | ||
|
|
1e483dc34d | ||
|
|
8ddf77973d | ||
|
|
d9a4858c4f | ||
|
|
c3e5406218 | ||
|
|
b92c345b3a | ||
|
|
ac9fd8f2ca | ||
|
|
20aae4769d | ||
|
|
1052c72863 | ||
|
|
4beb61c3af | ||
|
|
4b6c5db904 | ||
|
|
18493bb9b0 | ||
|
|
13a4bee725 | ||
|
|
7db0f7ec35 | ||
|
|
9936038603 | ||
|
|
ae9fb91c72 | ||
|
|
dcf7126f51 | ||
|
|
d3a512bf9e | ||
|
|
6809d97dd6 | ||
|
|
b89919546c | ||
|
|
cfa1c23506 | ||
|
|
e61b5d2a3f | ||
|
|
9089fd37e0 | ||
|
|
4eab083a30 | ||
|
|
11a07758aa | ||
|
|
ae712c1c98 | ||
|
|
9b2415f0f1 | ||
|
|
f130098937 | ||
|
|
e280a2e4a9 | ||
|
|
60bd4bc91b | ||
|
|
145c3d8f96 | ||
|
|
663c134e60 | ||
|
|
1e264ca4a1 | ||
|
|
207e9f7afd | ||
|
|
6e4c144af6 | ||
|
|
325c97cfe6 | ||
|
|
4d07845c7f | ||
|
|
93baa505c7 | ||
|
|
b81d4667b2 | ||
|
|
3dd065dd7b | ||
|
|
0a8692dcc0 | ||
|
|
26cc295167 | ||
|
|
48d9800b71 | ||
|
|
9424ad1f13 | ||
|
|
5c58f548c0 | ||
|
|
f59111025b | ||
|
|
e313776982 | ||
|
|
faa0bc952f | ||
|
|
87f8c60e2f | ||
|
|
e551c499db | ||
|
|
9aaa1edad6 | ||
|
|
d96b178a9c | ||
|
|
1c857c0781 | ||
|
|
ae160aef23 | ||
|
|
2ccce69180 | ||
|
|
a9f618891b | ||
|
|
85d1cc8be4 | ||
|
|
6315112b3b | ||
|
|
8cff8d85f6 | ||
|
|
d7a1007f41 | ||
|
|
0cd2c6864e | ||
|
|
0cb46e1444 | ||
|
|
cb9dfa1321 | ||
|
|
674fea7063 | ||
|
|
722cbb6054 | ||
|
|
042160e6bd | ||
|
|
06c44fe91f | ||
|
|
d92c636b69 | ||
|
|
0599503779 | ||
|
|
2cf337a606 | ||
|
|
b0bb320fb6 | ||
|
|
7a2a2846e1 | ||
|
|
90c35b67bd | ||
|
|
65287ba800 | ||
|
|
14af465aa3 | ||
|
|
95f0b60cac | ||
|
|
d69044231b | ||
|
|
72fde9860b | ||
|
|
481ada43d6 | ||
|
|
a9d74e834d | ||
|
|
43fd2fff2b | ||
|
|
b37c64f5a5 | ||
|
|
f1aa064b2d | ||
|
|
8abadd1070 | ||
|
|
3f9d9732a0 | ||
|
|
b9dc3c44a8 | ||
|
|
68e757aafc | ||
|
|
af045447e6 | ||
|
|
db6976a06a | ||
|
|
aa66a5ffb2 | ||
|
|
bf74150f62 | ||
|
|
2987216169 | ||
|
|
08a41f8f68 | ||
|
|
714a824823 | ||
|
|
2242dca27d | ||
|
|
681558126d | ||
|
|
927e637d88 | ||
|
|
3b97cb420d | ||
|
|
241fbd6be5 | ||
|
|
a619e7f571 | ||
|
|
491119d676 | ||
|
|
29a8c15d62 | ||
|
|
26741512ea | ||
|
|
a987d91ae1 | ||
|
|
380b9a73ab | ||
|
|
66bd1da571 | ||
|
|
79ad3c0a84 | ||
|
|
e69e7ff3c1 | ||
|
|
1483ef83d9 | ||
|
|
dbed799e20 | ||
|
|
4602b68425 | ||
|
|
2d3eaedaa7 | ||
|
|
4d60b21a5f | ||
|
|
50ee8374ee | ||
|
|
65eb23e8ce | ||
|
|
5c76b18ddd | ||
|
|
fc6f5d2535 | ||
|
|
6a0348e9dc | ||
|
|
40d3548c82 | ||
|
|
fc7bbf89c6 | ||
|
|
6f848e3df8 | ||
|
|
d80eb03707 | ||
|
|
85d4b8c487 | ||
|
|
90e6409b1e | ||
|
|
96b28a8e9b | ||
|
|
a818e87e96 | ||
|
|
dc715758a6 | ||
|
|
87069329d8 | ||
|
|
8a380a4545 | ||
|
|
a5e18cb761 | ||
|
|
e9da3e7b6a | ||
|
|
289fa23728 | ||
|
|
c6a44bfe09 | ||
|
|
c6af129960 | ||
|
|
6cb30bcd7f | ||
|
|
55027fd3cd | ||
|
|
41bf9b8baa | ||
|
|
c117ee61d5 | ||
|
|
7385932e52 | ||
|
|
cb90b69b3f | ||
|
|
8d0e31872a | ||
|
|
881925fd43 | ||
|
|
3a2f744f0a | ||
|
|
9dc9e668c5 | ||
|
|
39477c8de8 | ||
|
|
1b0bb95e81 | ||
|
|
cd5a4bec52 | ||
|
|
43070ab809 | ||
|
|
532fedbb62 | ||
|
|
585bf37783 | ||
|
|
cad364e407 | ||
|
|
45457c5b38 | ||
|
|
4e9142b5be | ||
|
|
207b365d40 | ||
|
|
1ec95a0d86 | ||
|
|
db4b9ccc7a | ||
|
|
a86d0c74d3 | ||
|
|
354f4e47df | ||
|
|
84167650b8 | ||
|
|
15c12a81f1 | ||
|
|
249ae584c3 | ||
|
|
b04f7b2d2c | ||
|
|
630ce459cb | ||
|
|
68fae3b23c | ||
|
|
3525cd1083 | ||
|
|
a7a30fb282 | ||
|
|
e9730f24a0 | ||
|
|
c35d22a82f | ||
|
|
b76d92bfeb | ||
|
|
afa578aa34 | ||
|
|
519ccda5ed | ||
|
|
832513a7fc | ||
|
|
0300c26952 | ||
|
|
243071d4cc | ||
|
|
c95937d08b | ||
|
|
58f944c72e | ||
|
|
173cc57490 | ||
|
|
4c8ba6b0a8 | ||
|
|
ab5abe9bcf | ||
|
|
be2ea8c6d5 | ||
|
|
7834fff541 | ||
|
|
16ec9d2938 | ||
|
|
ef8849e8a9 | ||
|
|
93a23671e4 | ||
|
|
8a77fbfefd | ||
|
|
78bedf9ad6 | ||
|
|
6ec757ab66 | ||
|
|
9ffb7f54c7 | ||
|
|
7d71299c51 | ||
|
|
bea37aee7f | ||
|
|
5323687ea5 | ||
|
|
d1372a4c43 | ||
|
|
c9249a164a | ||
|
|
6f105f2626 | ||
|
|
e4c08896f4 | ||
|
|
e85a0df9b7 | ||
|
|
a5b7eabd97 | ||
|
|
f3688431a3 | ||
|
|
44e714352d | ||
|
|
60da68c994 | ||
|
|
11288fac20 | ||
|
|
be10a644a0 | ||
|
|
a5a64eadc7 | ||
|
|
2cee252b14 | ||
|
|
050b4a5c9d | ||
|
|
6d4b5d4484 | ||
|
|
964a6c2e3e | ||
|
|
fe9d373444 | ||
|
|
cce31f9b0b | ||
|
|
ca779ed5ad | ||
|
|
14336529d9 | ||
|
|
2e57285120 | ||
|
|
2784059a0f | ||
|
|
04e78f4de7 | ||
|
|
c051ab56b4 | ||
|
|
eb0eaaae2e | ||
|
|
9631b97694 | ||
|
|
6f036876c8 | ||
|
|
bd47179ea4 | ||
|
|
17004f704c | ||
|
|
418c6bd88b | ||
|
|
ada4c4f816 | ||
|
|
fc34c1fc35 | ||
|
|
6e6cd9a7e5 | ||
|
|
16051981d7 | ||
|
|
91b499fb14 | ||
|
|
1f73adffd6 | ||
|
|
82bd313e7a | ||
|
|
14cbfe47b9 | ||
|
|
f2d4ff6dc4 | ||
|
|
bf33a70727 | ||
|
|
e1bdcbd581 | ||
|
|
051c1e7622 | ||
|
|
3b176474ff | ||
|
|
15f1b33ea6 | ||
|
|
0603d4076a | ||
|
|
ac94f10dc3 | ||
|
|
bbb92490e9 | ||
|
|
2cb63092c0 | ||
|
|
b9bcb59592 | ||
|
|
e083adc022 | ||
|
|
c3cd38fe9f | ||
|
|
a7c2db5e99 | ||
|
|
4926ee5117 | ||
|
|
e7723ac3db | ||
|
|
8dbfb93e4e | ||
|
|
6096366756 | ||
|
|
8e4cf12512 | ||
|
|
b61fa1f870 | ||
|
|
ac3cf9e4b1 | ||
|
|
e18463f059 | ||
|
|
ee54e08d18 | ||
|
|
3c07df6496 | ||
|
|
2117b828c8 | ||
|
|
28d9694432 | ||
|
|
7d977700e6 | ||
|
|
33942945d0 | ||
|
|
8d7d78db46 | ||
|
|
3268cc30ea | ||
|
|
8830ebe34f | ||
|
|
1d9adba6dd | ||
|
|
71be73777e | ||
|
|
7709f70ef1 | ||
|
|
9b528b84e1 | ||
|
|
c53a4d4861 | ||
|
|
766173df3d | ||
|
|
7f65cae891 | ||
|
|
bc56ecb85c | ||
|
|
50c3151301 | ||
|
|
d49ec0a81e | ||
|
|
6692028762 | ||
|
|
b71c357958 | ||
|
|
03b7621f3e | ||
|
|
2bcf24bd84 | ||
|
|
cc5aa05b12 | ||
|
|
3232c5c4ce | ||
|
|
72a52f5cd6 | ||
|
|
bda48a56e0 | ||
|
|
09cdcf8e53 | ||
|
|
a4d5b41ca7 | ||
|
|
9fa0d91d06 | ||
|
|
510f60bdeb | ||
|
|
30fe827253 | ||
|
|
8f0f4b168b | ||
|
|
cd11c4beb6 | ||
|
|
4d49cc413a | ||
|
|
3d50a58a31 | ||
|
|
d5701230fa | ||
|
|
ab945d6afe | ||
|
|
b4f8a36d43 | ||
|
|
608c1b4eb6 | ||
|
|
edf3c42157 | ||
|
|
c523cec113 | ||
|
|
6f8b987d42 | ||
|
|
d0d0642bdf | ||
|
|
924d760e3b | ||
|
|
f8c207ca2b | ||
|
|
ada1edd0b7 | ||
|
|
c79333db61 | ||
|
|
13778bed87 | ||
|
|
83b4d96f42 | ||
|
|
6cf96de0b4 | ||
|
|
481fadc7fc | ||
|
|
44f2c59e56 | ||
|
|
83589a912f | ||
|
|
fab9f03e7d | ||
|
|
928c83b13c | ||
|
|
72f20bc69b | ||
|
|
8293d5379d | ||
|
|
1d29445a8b | ||
|
|
5c308d757f | ||
|
|
5bf16004c9 | ||
|
|
417fa437b7 | ||
|
|
43f893c921 | ||
|
|
8ad9af4bc2 | ||
|
|
b3931bdc9d | ||
|
|
7ac153848b | ||
|
|
7689ebcac5 | ||
|
|
bf7adf9ca9 | ||
|
|
51ab8c556a | ||
|
|
149a8e910d | ||
|
|
310746b8cd | ||
|
|
6681303450 | ||
|
|
3228f37f09 | ||
|
|
421785bf6a | ||
|
|
308e8ca8c7 | ||
|
|
cb068ef70e | ||
|
|
acd3ec782f | ||
|
|
12d1e5b8d0 | ||
|
|
b165cdbd79 | ||
|
|
66f4507dee | ||
|
|
a724e1cb58 | ||
|
|
f942809de0 | ||
|
|
8043f77e02 | ||
|
|
1db9ba90d8 | ||
|
|
464fa59cb6 | ||
|
|
a8e2cbf55b | ||
|
|
fecd8dab38 | ||
|
|
13a833e3ed | ||
|
|
8d4784052a | ||
|
|
d9e9b41861 | ||
|
|
f0c3ef0aa1 | ||
|
|
3888831679 | ||
|
|
1a32fad324 | ||
|
|
d842ae9540 | ||
|
|
d0177b7504 | ||
|
|
e3842b25f3 | ||
|
|
a29b59c9cd | ||
|
|
f94eb97aa4 | ||
|
|
86017b79eb | ||
|
|
bc22fa5fad | ||
|
|
d6b70028ff | ||
|
|
0f7f9acd58 | ||
|
|
20633a6d1a | ||
|
|
a23856270d | ||
|
|
55543e370e | ||
|
|
8137a46c68 | ||
|
|
aeb9597c71 | ||
|
|
5067485e94 | ||
|
|
dfd456c7dc | ||
|
|
0ccb07e683 | ||
|
|
ca67a6897f | ||
|
|
0390227641 | ||
|
|
395b0982db | ||
|
|
f096ab4da7 | ||
|
|
d5ec9f7640 | ||
|
|
5732867407 | ||
|
|
31842f4c12 | ||
|
|
d5168d2da6 | ||
|
|
cac5ec836b | ||
|
|
ee9569e7d4 | ||
|
|
95df7de026 | ||
|
|
d3a5bd374d | ||
|
|
43ac3dddf1 | ||
|
|
1174328de3 | ||
|
|
dda54fb907 | ||
|
|
92ea808a5d | ||
|
|
2bc3a75c94 | ||
|
|
07ef97ce7c | ||
|
|
5fe3539331 | ||
|
|
4bc3bd5f13 | ||
|
|
4abce854d7 | ||
|
|
3b5c73992e | ||
|
|
c5abea2944 | ||
|
|
8e5cf14ebc | ||
|
|
7c8410ab86 | ||
|
|
369b4b92cc | ||
|
|
c50bb70383 | ||
|
|
d84b2060f0 | ||
|
|
afcf6024e6 | ||
|
|
3542bd6668 | ||
|
|
69b9116dd5 | ||
|
|
a2db4f06b1 | ||
|
|
b60b0fb511 | ||
|
|
0c1e9a6bb5 | ||
|
|
2692f92cb9 | ||
|
|
a43c8b4b00 | ||
|
|
6f15389411 | ||
|
|
f055241802 | ||
|
|
ac77c3a390 | ||
|
|
5cd99f2edc | ||
|
|
7c70fbec30 | ||
|
|
ac0dc3196f | ||
|
|
7f4da826b1 | ||
|
|
cc2af4371f | ||
|
|
2a79a03d38 | ||
|
|
47aac7fe33 | ||
|
|
d4055884b1 | ||
|
|
61658e847a | ||
|
|
bd714223ce | ||
|
|
060154cb89 | ||
|
|
99a1bfca9d | ||
|
|
4379f30628 | ||
|
|
b501244577 | ||
|
|
6f7b9815ca | ||
|
|
227bd088f7 | ||
|
|
b1d6ecb07c | ||
|
|
41772f28bd | ||
|
|
393dac1c99 | ||
|
|
db9d0be6c7 | ||
|
|
f0774ec273 | ||
|
|
d4a4d28b58 | ||
|
|
3ec97021bd | ||
|
|
56cf972373 | ||
|
|
2f3ae4c1af | ||
|
|
bc3dd04e12 | ||
|
|
4e8fc1b431 | ||
|
|
cd39aa2968 | ||
|
|
87ea3fc982 | ||
|
|
f74a511778 | ||
|
|
202461fe48 | ||
|
|
2af6687351 | ||
|
|
e603af5f24 | ||
|
|
84069ee882 | ||
|
|
edbb5cef92 | ||
|
|
a62c54b4ed | ||
|
|
41df7c04c3 | ||
|
|
9b783a8322 | ||
|
|
57db4df618 | ||
|
|
9d1081bd56 | ||
|
|
cc3773817b | ||
|
|
8956355e57 | ||
|
|
645db97c14 | ||
|
|
07a04dc507 | ||
|
|
a7317af413 | ||
|
|
ba6d6b8851 | ||
|
|
3726810108 | ||
|
|
4f92a7edf3 | ||
|
|
5d84b61f18 | ||
|
|
cd1329ec67 | ||
|
|
48a58b2b69 | ||
|
|
9b64aba8bf | ||
|
|
cae8264d98 | ||
|
|
95d8985336 | ||
|
|
b26ae90807 | ||
|
|
66d171c432 | ||
|
|
40463d9831 | ||
|
|
eb7dee013d | ||
|
|
1a878599b1 | ||
|
|
b9e25abdd9 | ||
|
|
c612022717 | ||
|
|
31e7f02b8d | ||
|
|
75747a2979 | ||
|
|
a85a8668a7 | ||
|
|
cca5fd859c | ||
|
|
2ed49abb1b | ||
|
|
409e6d49b2 | ||
|
|
93cfc482b8 | ||
|
|
85be6d53d0 | ||
|
|
bf6f58eb5e | ||
|
|
cd9d17ab18 | ||
|
|
e0bc6a10d0 | ||
|
|
ccfc1ad166 | ||
|
|
812060240f | ||
|
|
715a266ca4 | ||
|
|
fa4b3ece56 | ||
|
|
aac2a002bb | ||
|
|
8574acaf6e | ||
|
|
ddf4639354 | ||
|
|
9bd394f351 | ||
|
|
6899d48aae | ||
|
|
514e1ca8d0 | ||
|
|
eba5be010a | ||
|
|
f578d5c1c9 | ||
|
|
e58b1d670b | ||
|
|
c2642259b4 | ||
|
|
c47b0c9741 | ||
|
|
f425156cad | ||
|
|
7d7e31120e | ||
|
|
ccd247d154 | ||
|
|
53df6849f7 | ||
|
|
c2080bd1b3 | ||
|
|
9e93f8c2a5 | ||
|
|
907a142c8d | ||
|
|
d7bc8cd8e4 | ||
|
|
bb3e00a695 | ||
|
|
af5e7974c3 | ||
|
|
6a88959ec4 | ||
|
|
7b59149f90 | ||
|
|
3e01079caf | ||
|
|
d92e62e40b | ||
|
|
b4952dea7b | ||
|
|
00acb04329 | ||
|
|
939dd0591e | ||
|
|
febdb2a9e0 | ||
|
|
bcfd9fc1c9 | ||
|
|
e4964da5d4 | ||
|
|
a042298a4a | ||
|
|
908be168a9 | ||
|
|
d780bb3937 | ||
|
|
461e7e8913 | ||
|
|
819e8b73c3 | ||
|
|
627b7087a1 | ||
|
|
f23cf555e1 | ||
|
|
000978e4fb | ||
|
|
74782483bd | ||
|
|
c5f9387b92 | ||
|
|
57583b6747 | ||
|
|
434c236210 | ||
|
|
807bb97b6a | ||
|
|
da53bd44d1 | ||
|
|
3b01943649 | ||
|
|
3340f9c6ee | ||
|
|
b21cfe8504 | ||
|
|
97ab6ec299 | ||
|
|
7fda58e5c8 | ||
|
|
db6e820d1d | ||
|
|
8c2e1875ca | ||
|
|
bd95fe9af1 | ||
|
|
a517f89234 | ||
|
|
f994e7bfa8 | ||
|
|
28716924c9 | ||
|
|
b597f90f5b | ||
|
|
5912420467 | ||
|
|
90f35fd680 | ||
|
|
3e2d6e71b9 | ||
|
|
cbffdd829a | ||
|
|
d77e092948 | ||
|
|
1f5e10e784 | ||
|
|
68e3813c6c | ||
|
|
c9d78e3f67 | ||
|
|
b4f3fb3b30 | ||
|
|
bf7fb898f9 | ||
|
|
6c5e0543b4 | ||
|
|
578a1db62f | ||
|
|
4524a55b23 | ||
|
|
f942eaf1b6 | ||
|
|
6a4d16fae9 | ||
|
|
53b234252f | ||
|
|
9287e81ef1 | ||
|
|
5462326f79 | ||
|
|
b8b3992159 | ||
|
|
8214000713 | ||
|
|
94337a33d4 | ||
|
|
8ddee03338 | ||
|
|
8e2934533b | ||
|
|
71ee784003 | ||
|
|
597528e9b7 | ||
|
|
45fbbf9218 | ||
|
|
c7a4a01fee | ||
|
|
e1e147c8f0 | ||
|
|
b37d889de9 | ||
|
|
b61c9bfc5e | ||
|
|
e963788a81 | ||
|
|
3ef4798e09 |
5
.dockerignore
Normal file
5
.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
.git
|
||||
.github
|
||||
*.md
|
||||
tests/
|
||||
docs/
|
||||
31
.github/dependabot.yml
vendored
Normal file
31
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/src"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/src/static/app"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/.github"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "docker-compose"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
1
.github/workflows/codeql-analyze.yaml
vendored
1
.github/workflows/codeql-analyze.yaml
vendored
@@ -12,6 +12,7 @@
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
|
||||
50
.github/workflows/docker-analyze.yaml
vendored
50
.github/workflows/docker-analyze.yaml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Docker-Analyze
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Daily at midnight UTC
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
trigger-build:
|
||||
description: 'Trigger a manual build and push'
|
||||
default: 'true'
|
||||
|
||||
env:
|
||||
DOCKER_IMAGE: donaldzou/wgdashboard
|
||||
|
||||
jobs:
|
||||
docker_analyze:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
- name: Install Docker Scout
|
||||
run: |
|
||||
echo "Installing Docker Scout..."
|
||||
curl -fsSL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s --
|
||||
echo "Docker Scout installed successfully."
|
||||
- name: Analyze Docker image with Docker Scout
|
||||
id: analyze-image
|
||||
run: |
|
||||
echo "Analyzing Docker image with Docker Scout..."
|
||||
docker scout cves ${{ env.DOCKER_IMAGE }}:latest > scout-results.txt
|
||||
cat scout-results.txt
|
||||
echo "Docker Scout analysis completed."
|
||||
- name: Fail if critical CVEs are found
|
||||
run: |
|
||||
if grep -q "0C" scout-results.txt; then
|
||||
echo "No critical vulnerabilities found! Continueing."
|
||||
exit 0
|
||||
else
|
||||
echo "At least one critical vulnerabilities found! Exiting."
|
||||
exit 1
|
||||
fi
|
||||
44
.github/workflows/docker-build.yaml
vendored
44
.github/workflows/docker-build.yaml
vendored
@@ -1,44 +0,0 @@
|
||||
name: Docker-Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
trigger-build:
|
||||
description: 'Trigger a manual build and push'
|
||||
default: 'true'
|
||||
|
||||
env:
|
||||
DOCKER_IMAGE: donaldzou/wgdashboard
|
||||
|
||||
jobs:
|
||||
docker_build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and export (multi-arch)
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.DOCKER_IMAGE }}:latest
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7
|
||||
116
.github/workflows/docker.yml
vendored
Normal file
116
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Docker Build and Push
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- '*'
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
env:
|
||||
DOCKERHUB_PREFIX: docker.io
|
||||
GITHUB_CONTAINER_PREFIX: ghcr.io
|
||||
DOCKER_IMAGE: WGDashboard
|
||||
|
||||
jobs:
|
||||
docker_build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.DOCKERHUB_PREFIX }}
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GITHUB_CONTAINER_PREFIX }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: |
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Docker meta by docs https://github.com/docker/metadata-action
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.DOCKERHUB_PREFIX }}/donaldzou/${{ env.DOCKER_IMAGE }}
|
||||
${{ env.GITHUB_CONTAINER_PREFIX }}/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=sha,format=short,prefix=
|
||||
|
||||
- name: Build and export (multi-arch)
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
|
||||
docker_scan:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: docker_build
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.DOCKERHUB_PREFIX }}
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GITHUB_CONTAINER_PREFIX }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker Scout CVEs
|
||||
uses: docker/scout-action@v1
|
||||
with:
|
||||
command: cves
|
||||
image: ${{ env.GITHUB_CONTAINER_PREFIX }}/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE }}:main
|
||||
only-severities: critical,high
|
||||
only-fixed: true
|
||||
write-comment: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
exit-code: true
|
||||
|
||||
- name: Docker Scout Compare
|
||||
uses: docker/scout-action@v1
|
||||
with:
|
||||
command: compare
|
||||
# Set to Github for maximum compat
|
||||
image: ${{ env.GITHUB_CONTAINER_PREFIX }}/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE }}:main
|
||||
to: ${{ env.GITHUB_CONTAINER_PREFIX }}/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE }}:latest
|
||||
only-severities: critical,high
|
||||
ignore-unchanged: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
26
.github/workflows/stale.yml
vendored
Normal file
26
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '00 08 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue has not been updated for 20 days'
|
||||
stale-pr-message: 'This pull request has not been updated for 20 days'
|
||||
stale-issue-label: 'stale'
|
||||
exempt-issue-labels: 'enhancement,ongoing'
|
||||
days-before-stale: 20
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -18,6 +18,9 @@ src/db/wgdashboard.db
|
||||
node_modules/**
|
||||
*/proxy.js
|
||||
src/static/app/proxy.js
|
||||
.secrets
|
||||
*.ini
|
||||
*.pid
|
||||
|
||||
# Logs
|
||||
logs
|
||||
|
||||
87
README.md
87
README.md
@@ -1,46 +1,85 @@
|
||||
> [!NOTE]
|
||||
> **Help Wanted 🎉**: Localizing WGDashboard to other languages! If you're willing to help, please visit https://github.com/donaldzou/WGDashboard/issues/397. Many thanks!
|
||||
<hr>
|
||||
> [!TIP]
|
||||
> 🎉 To help us better understand and improve WGDashboard’s performance, we’re launching the **WGDashboard Testing Program**. As part of this program, participants will receive free WireGuard VPN access to our server in Toronto, Canada, valid for **24 hours** or up to **1GB of total traffic**—whichever comes first. If you’d like to join, visit [https://wg.wgdashboard.dev/](https://wg.wgdashboard.dev/) for more details!
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
<p align="center">
|
||||
<img alt="WGDashboard" src="./src/static/app/public/img/logo.png" width="128">
|
||||
<img alt="WGDashboard" src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Logos/Logo-2-Rounded-512x512.png" width="128">
|
||||
</p>
|
||||
<h1 align="center">WGDashboard</h1>
|
||||
<h1 align="center">
|
||||
<a href="https://wgdashboard.dev">WGDashboard</a>
|
||||
</h1>
|
||||
<p align="center">
|
||||
<img src="https://forthebadge.com/images/badges/made-with-python.svg">
|
||||
<img src="https://forthebadge.com/images/badges/made-with-javascript.svg">
|
||||
<img src="https://forthebadge.com/images/badges/license-mit.svg">
|
||||
<img src="https://img.shields.io/badge/Made_With-Python-blue?style=for-the-badge&logo=python&logoColor=ffffff">
|
||||
<img src="https://img.shields.io/badge/Made_With-Vue.js-42b883?style=for-the-badge&logo=vuedotjs&logoColor=ffffff">
|
||||
<img src="https://img.shields.io/badge/License-Apache_License_2.0-D22128?style=for-the-badge&logo=apache&logoColor=ffffff">
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://forthebadge.com/images/badges/built-with-love.svg">
|
||||
<a href="https://github.com/WGDashboard/WGDashboard/releases/latest"><img src="https://img.shields.io/github/v/release/donaldzou/wireguard-dashboard?style=for-the-badge"></a>
|
||||
<a href="https://wakatime.com/badge/github/donaldzou/WGDashboard"><img src="https://wakatime.com/badge/github/donaldzou/WGDashboard.svg?style=for-the-badge" alt="wakatime"></a>
|
||||
<a href="https://hitscounter.dev"><img src="https://hitscounter.dev/api/hit?url=https%3A%2F%2Fgithub.com%2Fdonaldzou%2FWGDashboard&label=Visitor&icon=github&color=%230a58ca&style=for-the-badge"></a>
|
||||
<img src="https://img.shields.io/docker/pulls/donaldzou/wgdashboard?logo=docker&label=Docker%20Image%20Pulls&labelColor=ffffff&style=for-the-badge">
|
||||
<img src="https://github.com/WGDashboard/WGDashboard/actions/workflows/docker.yml/badge.svg?style=for-the-badge">
|
||||
<img src="https://github.com/WGDashboard/WGDashboard/actions/workflows/codeql-analyze.yaml/badge.svg">
|
||||
</p>
|
||||
<p align="center"><b>This project is supported by</b></p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/donaldzou/wireguard-dashboard/releases/latest"><img src="https://img.shields.io/github/v/release/donaldzou/wireguard-dashboard"></a>
|
||||
<a href="https://wakatime.com/badge/github/donaldzou/WGDashboard"><img src="https://wakatime.com/badge/github/donaldzou/WGDashboard.svg" alt="wakatime"></a>
|
||||
<a href="https://hits.seeyoufarm.com"><img src="https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fdonaldzou%2FWGDashboard&count_bg=%2379C83D&title_bg=%23555555&icon=github.svg&icon_color=%23E7E7E7&title=Visitor&edge_flat=false"/></a>
|
||||
<a href="https://m.do.co/c/a84cb9aac585">
|
||||
<img src="https://opensource.nyc3.cdn.digitaloceanspaces.com/attribution/assets/SVG/DO_Logo_horizontal_blue.svg" width="201px">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">Monitoring WireGuard is not convenient, in most case, you'll need to login to your server and type <code>wg show</code>. That's why this project is being created, to view and manage all WireGuard configurations in a easy way.</p>
|
||||
<p align="center">With all these awesome features, while keeping it <b>easy to install and use</b></p>
|
||||
|
||||
<p align="center"><b><i>This project is not affiliate to the official WireGuard Project</i></b></p>
|
||||
|
||||
|
||||
<h3 align="center">Looking for help or want to chat about this project?</h4>
|
||||
<p align="center">
|
||||
Join our Discord Server for quick help, or you wanna chat about this project!
|
||||
You can reach out at
|
||||
</p>
|
||||
<p align="center">
|
||||
<a align="center" href="https://discord.gg/72TwzjeuWm"><img src="https://img.shields.io/discord/1276818723637956628?labelColor=ffffff&style=for-the-badge&logo=discord&label=Discord"></a>
|
||||
</p>
|
||||
<a align="center" href="https://discord.gg/72TwzjeuWm" target="_blank"><img src="https://img.shields.io/discord/1276818723637956628?labelColor=ffffff&style=for-the-badge&logo=discord&label=Discord"></a>
|
||||
<a align="center" href="https://www.reddit.com/r/WGDashboard/" target="_blank"><img src="https://img.shields.io/badge/Reddit-r%2FWGDashboard-FF4500?style=for-the-badge&logo=reddit"></a>
|
||||
<a align="center" href="https://app.element.io/#/room/#wgd:matrix.org" target="_blank"><img src="https://img.shields.io/badge/Matrix_Chatroom-%23WGD-000000?style=for-the-badge&logo=matrix"></a>
|
||||
</p>
|
||||
<h3 align="center">Want to support this project?</h4>
|
||||
<p align="center">
|
||||
You can support via <br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a align="center" href="https://github.com/sponsors/donaldzou" target="_blank"><img src="https://img.shields.io/badge/GitHub%20Sponsor-2e9a40?style=for-the-badge&logo=github"></a>
|
||||
<a align="center" href="https://buymeacoffee.com/donaldzou" target="_blank"><img src="https://img.shields.io/badge/Buy%20me%20a%20coffee-ffdd00?style=for-the-badge&logo=buymeacoffee&logoColor=000000"></a>
|
||||
<a align="center" href="https://patreon.com/c/DonaldDonnyZou/membership" target="_blank"><img src="https://img.shields.io/badge/Patreon-000000?style=for-the-badge&logo=patreon&logoColor=ffffff"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<b>or, visit our merch store and support us by purchasing a merch for only $USD 17.00 (Including shipping worldwide & duties)</b>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a align="center" href="https://merch.wgdashboard.dev" target="_blank"><img src="https://img.shields.io/badge/Merch%20from%20WGDashboard-926183?style=for-the-badge"></a>
|
||||
</p>
|
||||
|
||||
<hr>
|
||||
<h4 align="center">
|
||||
for more information, visit our
|
||||
</h4>
|
||||
<h1 align="center">
|
||||
<a href="https://wgdashboard.dev">Official Website</a>
|
||||
</h1>
|
||||
|
||||
> [!NOTE]
|
||||
> To better manage documentation for this project. I've moved it to its own [repo](https://github.com/donaldzou/WGDashboard-Documentation). I will keep updating over there and leave this README only with important information.
|
||||
|
||||
- [💡 Features](https://donaldzou.github.io/WGDashboard-Documentation/features.html)
|
||||
- [📝 Requirements](https://donaldzou.github.io/WGDashboard-Documentation/requirements.html)
|
||||
- [🛠 Install](https://donaldzou.github.io/WGDashboard-Documentation/install.html)
|
||||
- [🪜 Usage](https://donaldzou.github.io/WGDashboard-Documentation/usage.html)
|
||||
- [📖 API Documentation](https://donaldzou.github.io/WGDashboard-Documentation/api-documentation.html)
|
||||
- [And much more...](https://donaldzou.github.io/WGDashboard-Documentation/)
|
||||
# Screenshots
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/sign-in.png" alt=""/>
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/cross-server.png" alt=""/>
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/index.png" alt=""/>
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/new-configuration.png" alt="" />
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/settings.png" alt="" />
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/light-dark.png" alt="" />
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/configuration.png" alt=""/>
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/add-peers.png" alt="" />
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/ping.png" alt=""/>
|
||||
<img src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Documentation%20Images/traceroute.png" alt=""/>
|
||||
|
||||
@@ -4,7 +4,5 @@
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 5.1.x | :white_check_mark: |
|
||||
| 5.0.x | :x: |
|
||||
| 4.0.x | :white_check_mark: |
|
||||
| < 4.0 | :x: |
|
||||
| 4.3 | :white_check_mark: |
|
||||
| < 4.3 | :x: |
|
||||
|
||||
@@ -1,32 +1,49 @@
|
||||
FROM golang:1.24 AS awg-go
|
||||
|
||||
RUN git clone https://github.com/WGDashboard/amneziawg-go /awg
|
||||
WORKDIR /awg
|
||||
RUN go mod download && \
|
||||
go mod verify && \
|
||||
go build -ldflags '-linkmode external -extldflags "-fno-PIC -static"' -v -o /usr/bin
|
||||
|
||||
FROM alpine:latest AS awg-tools
|
||||
|
||||
RUN apk update && apk add --no-cache \
|
||||
make git build-base linux-headers \
|
||||
&& git clone https://github.com/WGDashboard/amneziawg-tools \
|
||||
&& cd amneziawg-tools/src \
|
||||
&& make \
|
||||
&& chmod +x wg*
|
||||
|
||||
FROM alpine:latest
|
||||
LABEL maintainer="dselen@nerthus.nl"
|
||||
|
||||
RUN apk update && apk add --no-cache \
|
||||
iproute2 iptables bash curl wget unzip procps sudo \
|
||||
tzdata wireguard-tools python3 py3-psutil py3-bcrypt openresolv
|
||||
|
||||
COPY --from=awg-go /usr/bin/amneziawg-go /usr/bin/amneziawg-go
|
||||
COPY --from=awg-tools /amneziawg-tools/src/wg /usr/bin/awg
|
||||
COPY --from=awg-tools /amneziawg-tools/src/wg-quick/linux.bash /usr/bin/awg-quick
|
||||
|
||||
# Declaring environment variables, change Peernet to an address you like, standard is a 24 bit subnet.
|
||||
ARG wg_net="10.0.0.1"
|
||||
ARG wg_port="51820"
|
||||
ARG wg_net="10.0.0.1" \
|
||||
wg_port="51820"
|
||||
|
||||
# Following ENV variables are changable on container runtime because /entrypoint.sh handles that. See compose.yaml for more info.
|
||||
ENV TZ="Europe/Amsterdam"
|
||||
ENV global_dns="1.1.1.1"
|
||||
ENV isolate="none"
|
||||
ENV public_ip="0.0.0.0"
|
||||
|
||||
# Doing package management operations, such as upgrading
|
||||
RUN apk update \
|
||||
&& apk add --no-cache bash git tzdata \
|
||||
iptables ip6tables openrc curl wireguard-tools \
|
||||
sudo py3-psutil py3-bcrypt \
|
||||
&& apk upgrade
|
||||
ENV TZ="Europe/Amsterdam" \
|
||||
global_dns="9.9.9.9" \
|
||||
wgd_port="10086" \
|
||||
public_ip=""
|
||||
|
||||
# Using WGDASH -- like wg_net functionally as a ARG command. But it is needed in entrypoint.sh so it needs to be exported as environment variable.
|
||||
ENV WGDASH=/opt/wireguarddashboard
|
||||
|
||||
# Removing the Linux Image package to preserve space on the image, for this reason also deleting apt lists, to be able to install packages: run apt update.
|
||||
ENV WGDASH=/opt/wgdashboard
|
||||
|
||||
# Doing WireGuard Dashboard installation measures. Modify the git clone command to get the preferred version, with a specific branch for example.
|
||||
RUN mkdir /data \
|
||||
&& mkdir /configs \
|
||||
&& mkdir -p ${WGDASH}/src
|
||||
&& mkdir -p ${WGDASH}/src \
|
||||
&& mkdir -p /etc/amnezia/amneziawg
|
||||
COPY ./src ${WGDASH}/src
|
||||
|
||||
# Generate basic WireGuard interface. Echoing the WireGuard interface config for readability, adjust if you want it for efficiency.
|
||||
@@ -50,9 +67,10 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD sh -c 'pgrep gunicorn > /dev/null && pgrep tail > /dev/null' || exit 1
|
||||
|
||||
# Copy the basic entrypoint.sh script.
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY ./docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Exposing the default WireGuard Dashboard port for web access.
|
||||
EXPOSE 10086
|
||||
WORKDIR $WGDASH
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
140
docker/Dockerfile
Normal file
140
docker/Dockerfile
Normal file
@@ -0,0 +1,140 @@
|
||||
#
|
||||
# AWG GOLANG BUILDING STAGE
|
||||
# Base: Alpine
|
||||
#
|
||||
|
||||
# Pull the current golang-alpine image.
|
||||
FROM golang:1.25-alpine AS awg-go
|
||||
|
||||
# Install build-dependencies.
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
gcc \
|
||||
musl-dev
|
||||
|
||||
# Standard working directory for WGDashboard
|
||||
RUN mkdir -p /workspace && \
|
||||
git clone https://github.com/WGDashboard/amneziawg-go /workspace/awg
|
||||
|
||||
# Enable CGO compilation for AmneziaWG
|
||||
ENV CGO_ENABLED=1
|
||||
|
||||
# Change directory
|
||||
WORKDIR /workspace/awg
|
||||
# Compile the binaries
|
||||
RUN go mod download && \
|
||||
go mod verify && \
|
||||
go build -ldflags '-linkmode external -extldflags "-fno-PIC -static"' -v -o /usr/bin
|
||||
#
|
||||
# AWG TOOLS BUILDING STAGE
|
||||
# Base: Alpine
|
||||
#
|
||||
FROM alpine:latest AS awg-tools
|
||||
|
||||
# Install needed dependencies.
|
||||
RUN apk add --no-cache \
|
||||
make \
|
||||
git \
|
||||
build-base \
|
||||
linux-headers \
|
||||
ca-certificates
|
||||
|
||||
# Get the workspace ready
|
||||
RUN mkdir -p /workspace && \
|
||||
git clone https://github.com/WGDashboard/amneziawg-tools /workspace/awg-tools
|
||||
|
||||
# Change directory
|
||||
WORKDIR /workspace/awg-tools/src
|
||||
# Compile and change permissions
|
||||
RUN make && chmod +x wg*
|
||||
|
||||
#
|
||||
# PIP DEPENDENCY BUILDING
|
||||
# Base: Alpine
|
||||
#
|
||||
|
||||
# Use the python-alpine image for building pip dependencies
|
||||
FROM python:3.14-alpine AS pip-builder
|
||||
|
||||
# Add the build dependencies and create a Python virtual environment.
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
pkgconfig \
|
||||
python3-dev \
|
||||
libffi-dev \
|
||||
linux-headers \
|
||||
rust \
|
||||
cargo \
|
||||
&& mkdir -p /opt/wgdashboard/src \
|
||||
&& python3 -m venv /opt/wgdashboard/src/venv
|
||||
|
||||
# Copy the requirements file into the build layer.
|
||||
COPY ./src/requirements.txt /opt/wgdashboard/src
|
||||
# Install the pip packages
|
||||
RUN . /opt/wgdashboard/src/venv/bin/activate && \
|
||||
pip3 install --upgrade pip && \
|
||||
pip3 install -r /opt/wgdashboard/src/requirements.txt
|
||||
|
||||
#
|
||||
# WGDashboard RUNNING STAGE
|
||||
# Base: Alpine
|
||||
#
|
||||
|
||||
# Running with the python-alpine image.
|
||||
FROM python:3.14-alpine AS final
|
||||
LABEL maintainer="dselen@nerthus.nl"
|
||||
|
||||
# Install only the runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
iproute2 iptables \
|
||||
bash curl procps \
|
||||
tzdata wireguard-tools
|
||||
|
||||
# Copy only the final binaries from the AWG builder stages
|
||||
COPY --from=awg-go /usr/bin/amneziawg-go /usr/bin/amneziawg-go
|
||||
COPY --from=awg-tools /workspace/awg-tools/src/wg /usr/bin/awg
|
||||
COPY --from=awg-tools /workspace/awg-tools/src/wg-quick/linux.bash /usr/bin/awg-quick
|
||||
|
||||
# Environment variables
|
||||
ARG wg_net="10.0.0.1"
|
||||
ARG wg_port="51820"
|
||||
ENV TZ="Europe/Amsterdam" \
|
||||
global_dns="9.9.9.9" \
|
||||
wgd_port="10086" \
|
||||
public_ip="" \
|
||||
WGDASH=/opt/wgdashboard
|
||||
|
||||
# Create directories needed for operation
|
||||
RUN mkdir /data /configs -p ${WGDASH}/src /etc/amnezia/amneziawg
|
||||
|
||||
# Copy the python virtual environment from the pip-builder stage
|
||||
COPY ./src ${WGDASH}/src
|
||||
COPY --from=pip-builder /opt/wgdashboard/src/venv /opt/wgdashboard/src/venv
|
||||
|
||||
# First WireGuard interface template
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
RUN out_adapt=$(ip -o -4 route show to default | awk '{print $NF}') \
|
||||
&& echo -e "[Interface]\n\
|
||||
Address = ${wg_net}/24\n\
|
||||
PrivateKey =\n\
|
||||
PostUp = iptables -t nat -I POSTROUTING 1 -s ${wg_net}/24 -o ${out_adapt} -j MASQUERADE\n\
|
||||
PostUp = iptables -I FORWARD -i wg0 -o wg0 -j DROP\n\
|
||||
PreDown = iptables -t nat -D POSTROUTING -s ${wg_net}/24 -o ${out_adapt} -j MASQUERADE\n\
|
||||
PreDown = iptables -D FORWARD -i wg0 -o wg0 -j DROP\n\
|
||||
ListenPort = ${wg_port}\n\
|
||||
SaveConfig = true\n\
|
||||
DNS = ${global_dns}" > /configs/wg0.conf.template \
|
||||
&& chmod 600 /configs/wg0.conf.template
|
||||
|
||||
# Set a healthcheck to determine the container its health
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD sh -c 'pgrep gunicorn > /dev/null && pgrep tail > /dev/null' || exit 1
|
||||
|
||||
# Copy in the runtime script, essential.
|
||||
COPY ./docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
#
|
||||
EXPOSE 10086
|
||||
WORKDIR $WGDASH/src
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
210
docker/README.md
210
docker/README.md
@@ -1,109 +1,213 @@
|
||||
# WGDashboard Docker Explanation:
|
||||
Author: DaanSelen<br>
|
||||
Author: @DaanSelen<br>
|
||||
|
||||
This document delves into how the WGDashboard Docker container has been built.<br>
|
||||
Of course there are two stages, one before run-time and one at/after run-time.<br>
|
||||
The `Dockerfile` describes how the container image is made, and the `entrypoint.sh` is executed after running the container. <br>
|
||||
In this example, WireGuard is integrated into the container itself, so it should be a run-and-go/out-of-the-box.<br>
|
||||
Of course there are two stages (simply said), one before run-time and one at/after run-time.<br>
|
||||
The `Dockerfile` describes how the container image is made, and the `entrypoint.sh` is executed after the container is started. <br>
|
||||
In this example, [WireGuard](https://www.wireguard.com/) is integrated into the container itself, so it should be a run-and-go(/out-of-the-box) experience.<br>
|
||||
For more details on the source-code specific to this Docker image, refer to the source files, they have lots of comments.
|
||||
|
||||
I have tried to embed some new features such as `isolate` and interface startup on container-start (through `enable`). I hope you enjoy!
|
||||
<br>
|
||||
<img
|
||||
src="https://wgdashboard-resources.tor1.cdn.digitaloceanspaces.com/Logos/Logo-2-Rounded-512x512.png"
|
||||
alt="WG-Dashboard Logo"
|
||||
title="WG-Dashboard Logo"
|
||||
style="display: block; margin: 0 auto;"
|
||||
width="150"
|
||||
height="150"
|
||||
/>
|
||||
<br>
|
||||
|
||||
<img src="https://raw.githubusercontent.com/donaldzou/WGDashboard/main/src/static/img/logo.png" alt="WG-Dashboard Logo" title="WG-Dashboard Logo" width="150" height="150" />
|
||||
To get the container running you either pull the pre-made image from a remote repository, there are 2 official options.<br>
|
||||
|
||||
## Getting the container running:
|
||||
- ghcr.io/wgdashboard/wgdashboard:<tag>
|
||||
- docker.io/donaldzou/wgdashboard:<tag>
|
||||
|
||||
To get the container running you either pull the image from the repository, `donaldzou/wgdashboard:latest`.<br>
|
||||
From there either use the environment variables describe below as parameters or use the Docker Compose file: `compose.yaml`.<br>
|
||||
Be careful, the default generated WireGuard configuration file uses port 51820/udp. So use this port if you want to use it out of the box.<br>
|
||||
Otherwise edit the configuration file in `/etc/wireguard/wg0.conf`.
|
||||
> tags should be either: latest, main, <version> or <commit-sha>.
|
||||
|
||||
An example of a simple command to get the container running is show below:<br>
|
||||
From there either use the environment variables described below as parameters or use the Docker Compose file: `compose.yaml`.<br>
|
||||
Be careful, the default generated WireGuard configuration file uses port 51820/udp. So make sure to use this port if you want to use it out of the box.<br>
|
||||
Otherwise edit the configuration file in WGDashboard under `Configuration Settings` -> `Edit Raw Configuration File`.
|
||||
|
||||
```shell
|
||||
> Otherwise you need to enter the container and edit: `/etc/wireguard/wg0.conf`.
|
||||
|
||||
# WGDashboard: 🐳 Docker Deployment Guide
|
||||
|
||||
To run the container, you can either pull the image from the Github Container Registry (ghcr.io), Docker Hub (docker.io) or build it yourself. The image is available at:
|
||||
|
||||
> `docker.io` is in most cases automatically resolved by the Docker application. Therefor you can ofter specify: `donaldzou/wgdashboard:latest`
|
||||
|
||||
### 🔧 Quick Docker Run Command
|
||||
|
||||
Here's an example to get it up and running quickly:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name wgdashboard \
|
||||
--restart unless-stopped \
|
||||
-e enable=wg0 \
|
||||
-e isolate=wg0 \
|
||||
-p 10086:10086/tcp \
|
||||
-p 51820:51820/udp \
|
||||
--cap-add NET_ADMIN \
|
||||
donaldzou/wgdashboard:latest
|
||||
ghcr.io/wgdashboard/wgdashboard:latest
|
||||
```
|
||||
<br>
|
||||
If you want to use Compose instead of a raw Docker command, refer to the example in the `compose.yaml` or the one pasted below:
|
||||
<br><br>
|
||||
|
||||
> ⚠️ The default WireGuard port is `51820/udp`. If you change this, update the `/etc/wireguard/wg0.conf` accordingly.
|
||||
|
||||
---
|
||||
|
||||
### 📦 Docker Compose Alternative
|
||||
|
||||
You can also use Docker Compose for easier configuration:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
wgdashboard:
|
||||
image: donaldzou/wgdashboard:latest
|
||||
image: ghcr.io/wgdashboard/wgdashboard:latest
|
||||
restart: unless-stopped
|
||||
container_name: wgdashboard
|
||||
environment:
|
||||
#- tz=
|
||||
#- global_dns=
|
||||
#- enable=
|
||||
#- isolate=
|
||||
#- public_ip=
|
||||
|
||||
ports:
|
||||
- 10086:10086/tcp
|
||||
- 51820:51820/udp
|
||||
|
||||
volumes:
|
||||
- aconf:/etc/amnezia/amneziawg
|
||||
- conf:/etc/wireguard
|
||||
- data:/data
|
||||
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
|
||||
volumes:
|
||||
aconf:
|
||||
conf:
|
||||
data:
|
||||
|
||||
```
|
||||
|
||||
If you want to customize the yaml, make sure the core stays the same, but for example volume PATHs (ON THE HOST) can be freely changed.<br>
|
||||
This setup is just generic and will use the Docker volumes.
|
||||
> 📁 You can customize the **volume paths** on the host to fit your needs. The example above uses Docker volumes.
|
||||
|
||||
## Updating the container:
|
||||
---
|
||||
|
||||
Updating is right now in Alpha stage. I have got it to work, testing methods.
|
||||
## 🔄 Updating the Container
|
||||
|
||||
## Working with the container and environment variables:
|
||||
Updating the WGDashboard container should be through 'The Docker Way' - by pulling the newest/newer image and replacing this old one.
|
||||
|
||||
Once the container is running, the installation process is essentially the same as running it on bare-metal.<br>
|
||||
So go to the assign TCP port in this case HTTP, like the default 10086 one in the example and log into the WEB-GUI.<br>
|
||||
---
|
||||
|
||||
| Environment variable | Accepted arguments | Default value | Example value | Verbose |
|
||||
| -------------- | ------- | ------- | ------- | ------- |
|
||||
| tz | Europe/Amsterdam or any confirming timezone notation. | `Europe/Amsterdam` | `America/New_York` | Sets the timezone of the Docker container. This is to timesync the container to any other processes which would need it. |
|
||||
| global_dns | Any IPv4 address, such as my personal recommendation: 9.9.9.9 (QUAD9). | `1.1.1.1` | `8.8.8.8` or any IP-Address that resolves DNS-names, and of course is reachable | Set the default DNS given to clients once they connect to the WireGuard tunnel, and for new peers, set to Cloudflare DNS for reliability.
|
||||
| enable | Anything, preferably an existing WireGuard interface name. | `none` | `wg0,wg2,wg13` | Enables or disables the starting of the WireGuard interface on container 'boot-up'.
|
||||
| isolate | Anything, preferably an existing WireGuard interface name. | `none` | `wg1,wg0` | The Wireguard interface itself IS able to reach the peers (Done through the `iptables` package).
|
||||
| public_ip | Any IPv4 (public recommended) address, such as the one returned by default | Default uses the return of `curl ifconfig.me` | `89.20.83.118` | To reach your VPN from outside your own network, you need WG-Dashboard to know what your public IP-address is, otherwise it will generate faulty config files for clients. This happends because it is inside a Docker/Kubernetes container. In or outside of NAT is not relevant as long as the given IP-address is reachable from the internet or the target network.
|
||||
## ⚙️ Environment Variables
|
||||
|
||||
## Be careful with:
|
||||
| Variable | Accepted Values | Default | Example | Description |
|
||||
| ------------------ | ---------------------------------------- | ----------------------- | --------------------- | ----------------------------------------------------------------------- |
|
||||
| `tz` | Timezone | `Europe/Amsterdam` | `America/New_York` | Sets the container's timezone. Useful for accurate logs and scheduling. |
|
||||
| `global_dns` | IPv4 and IPv6 addresses | `9.9.9.9` | `8.8.8.8`, `1.1.1.1` | Default DNS for WireGuard clients. |
|
||||
| `public_ip` | Public IP address | Retrieved automatically | `253.162.134.73` | Used to generate accurate client configs. Needed if container is NAT’d. |
|
||||
| `wgd_port` | Any port that is allowed for the process | `10086` | `443` | This port is used to set the WGDashboard web port. |
|
||||
| `username` | Any non‐empty string | `-` | `admin` | Username for the WGDashboard web interface account. |
|
||||
| `password` | Any non‐empty string | `-` | `s3cr3tP@ss` | Password for the WGDashboard web interface account (stored hashed). |
|
||||
| `enable_totp` | `true`, `false` | `true` | `false` | Enable TOTP‐based two‐factor authentication for the account. |
|
||||
| `wg_autostart` | Wireguard interface name | `false` | `true` | Auto‐start the WireGuard client when the container launches. |
|
||||
| `email_server` | SMTP server address | `-` | `smtp.gmail.com` | SMTP server for sending email notifications. |
|
||||
| `email_port` | SMTP port number | `-` | `587` | Port for connecting to the SMTP server. |
|
||||
| `email_encryption` | `TLS`, `SSL`, etc. | `-` | `TLS` | Encryption method for email communication. |
|
||||
| `email_username` | Any non-empty string | `-` | `user@example.com` | Username for SMTP authentication. |
|
||||
| `email_password` | Any non-empty string | `-` | `app_password` | Password for SMTP authentication. |
|
||||
| `email_from` | Valid email address | `-` | `noreply@example.com` | Email address used as the sender for notifications. |
|
||||
| `email_template` | Path to template file | `-` | `your-template` | Custom template for email notifications. |
|
||||
|
||||
When you are going to work with multiple WireGuard interfaces, you need to also open them up to the Docker host. This done by either adding the port mappings like: `51821:51821/udp` in the Docker Compose file, or to open a range like: `51820-51830:51820-51830/udp`<br>
|
||||
The latter opens up UDP ports from 51820 to 51830, so all ports in between as well! Be careful, it is good security practise to open only needed ports!
|
||||
---
|
||||
|
||||
## Building the image yourself:
|
||||
## 🔐 Port Forwarding Note
|
||||
|
||||
To build the image yourself, you need to do a couple things:<br>
|
||||
1. Clone the Github repository containing the source code of WGDashboard including the docker directory. For example do: `git clone https://github.com/donaldzou/WGDashboard.git`
|
||||
1. Navigate into the cloned repository.
|
||||
1. (Make sure you have Docker correctly installed, if not: [Click here](https://docs.docker.com/engine/install/)) and run: `docker build . -t <Image name>:<Image tag>` as an example: `docker build . -t dselen/wgdashboard:latest`.<br>
|
||||
When using multiple WireGuard interfaces, remember to **open their respective ports** on the host.
|
||||
|
||||
This will make Docker compile the image from the resources in the directory you mention, in this case the source/root one. Let it compile, it takes only a couple seconds with a minute at most.
|
||||
Examples:
|
||||
```yaml
|
||||
# Individual mapping
|
||||
- 51821:51821/udp
|
||||
|
||||
1. If all went well, see your image with `docker images`. Example below:
|
||||
# Or port range
|
||||
- 51820-51830:51820-51830/udp
|
||||
```
|
||||
|
||||
> 🚨 **Security Tip:** Only expose ports you actually use.
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Building the Image Yourself
|
||||
|
||||
To build from source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/WGDashboard/WGDashboard.git
|
||||
cd WGDashboard
|
||||
docker build . -f docker/Dockerfile -t yourname/wgdashboard:latest
|
||||
```
|
||||
|
||||
Example output:
|
||||
```shell
|
||||
dselen@dev-mach:~/development/WGDashboard/docker$ docker images
|
||||
docker images
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
dselen/wgdashboard latest c96fd96ee3b3 42 minutes ago 314MB
|
||||
yourname/wgdashboard latest c96fd96ee3b3 42 minutes ago 314MB
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🧱 Dockerfile Overview
|
||||
|
||||
Here's a brief overview of the Dockerfile stages used in the image build:
|
||||
|
||||
### 1. **Build Tools & Go Compilation**
|
||||
|
||||
```Dockerfile
|
||||
FROM golang:1.24 AS compiler
|
||||
WORKDIR /go
|
||||
|
||||
RUN apt-get update && apt-get install -y ...
|
||||
RUN git clone ... && make
|
||||
...
|
||||
```
|
||||
|
||||
### 2. **Binary Copy to Scratch**
|
||||
|
||||
```Dockerfile
|
||||
FROM scratch AS bins
|
||||
COPY --from=compiler /go/amneziawg-go/amneziawg-go /amneziawg-go
|
||||
...
|
||||
```
|
||||
|
||||
### 3. **Final Alpine Container Setup**
|
||||
|
||||
```Dockerfile
|
||||
FROM alpine:latest
|
||||
COPY --from=bins ...
|
||||
RUN apk update && apk add --no-cache ...
|
||||
COPY ./src ${WGDASH}/src
|
||||
COPY ./docker/entrypoint.sh /entrypoint.sh
|
||||
...
|
||||
EXPOSE 10086
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Entrypoint Overview
|
||||
|
||||
### Major Functions:
|
||||
|
||||
- **`ensure_installation`**: Sets up the app, database, and Python environment.
|
||||
- **`set_envvars`**: Writes `wg-dashboard.ini` and applies environment variables.
|
||||
- **`start_core`**: Starts the main WGDashboard service.
|
||||
- **`ensure_blocking`**: Tails the error log to keep the container process alive.
|
||||
|
||||
---
|
||||
|
||||
## ✅ Final Notes
|
||||
|
||||
- Use `docker logs wgdashboard` for troubleshooting.
|
||||
- Access the web interface via `http://your-ip:10086` (or whichever port you specified in the compose).
|
||||
- The first time run will auto-generate WireGuard keys and configs (configs are generated from the template).
|
||||
|
||||
## Closing remarks:
|
||||
|
||||
For feedback please submit an issue to the repository. Or message dselen@nerthus.nl.
|
||||
|
||||
@@ -1,22 +1,42 @@
|
||||
services:
|
||||
wireguard-dashboard:
|
||||
image: donaldzou/wgdashboard:latest
|
||||
wgdashboard:
|
||||
# Since the github organisation we recommend the ghcr.io.
|
||||
# Alternatively we also still push to docker.io under donaldzou/wgdashboard.
|
||||
# Both share the exact same tags. So they should be interchangable.
|
||||
image: ghcr.io/wgdashboard/wgdashboard:latest
|
||||
|
||||
# Make sure to set the restart policy. Because for a VPN its important to come back IF it crashes.
|
||||
restart: unless-stopped
|
||||
container_name: wgdashboard
|
||||
|
||||
# Environment variables can be used to configure certain values at startup. Without having to configure it from the dashboard.
|
||||
# By default its all disabled, but uncomment the following lines to apply these. (uncommenting is removing the # character)
|
||||
# Refer to the documentation on https://wgdashboard.dev/ for more info on what everything means.
|
||||
#environment:
|
||||
#- tz= # <--- Set container timezone, default: Europe/Amsterdam.
|
||||
#- global_dns= # <--- Set global DNS address, default: 1.1.1.1.
|
||||
#- isolate= # <--- Set the interfaces that will disallow peer communication, default: 'none'.
|
||||
#- public_ip= # <--- Set public IP to ensure the correct one is chosen, defaulting to the IP give by ifconfig.me.
|
||||
#- wgd_port= # <--- Set the port WGDashboard will use for its web-server.
|
||||
|
||||
# The following section, ports is very important for exposing more than one Wireguard/AmneziaWireguard interfaces.
|
||||
# Once you create a new configuration and assign a port in the dashboard, don't forget to add it to the ports as well.
|
||||
# Quick-tip: most Wireguard VPN tunnels use UDP. WGDashboard uses HTTP, so tcp.
|
||||
ports:
|
||||
- 10086:10086/tcp
|
||||
- 51820:51820/udp
|
||||
|
||||
# Volumes can be configured however you'd like. The default is using docker volumes.
|
||||
# If you want to use local paths, replace the path before the : with your path.
|
||||
volumes:
|
||||
- aconf:/etc/amnezia/amneziawg
|
||||
- conf:/etc/wireguard
|
||||
- data:/data
|
||||
|
||||
# Needed for network administration.
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
|
||||
# The following configuration is linked to the above default volumes.
|
||||
volumes:
|
||||
aconf:
|
||||
conf:
|
||||
data:
|
||||
|
||||
216
docker/entrypoint.sh
Normal file
216
docker/entrypoint.sh
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file="/data/wg-dashboard.ini"
|
||||
|
||||
trap 'stop_service' SIGTERM
|
||||
|
||||
# Hash password with bcrypt
|
||||
hash_password() {
|
||||
python3 -c "import bcrypt; print(bcrypt.hashpw('$1'.encode(), bcrypt.gensalt(12)).decode())"
|
||||
}
|
||||
|
||||
# Function to set or update section/key/value in the INI file
|
||||
set_ini() {
|
||||
local section="$1" key="$2" value="$3"
|
||||
local current_value
|
||||
|
||||
# Add section if it doesn't exist
|
||||
grep -q "^\[${section}\]" "$config_file" \
|
||||
|| printf "\n[%s]\n" "${section}" >> "$config_file"
|
||||
|
||||
# Check current value if key exists
|
||||
if grep -q "^[[:space:]]*${key}[[:space:]]*=" "$config_file"; then
|
||||
current_value=$(grep "^[[:space:]]*${key}[[:space:]]*=" "$config_file" | cut -d= -f2- | xargs)
|
||||
|
||||
# Don't display actual value if it's a password field
|
||||
if [[ "$key" == *"password"* ]]; then
|
||||
if [ "$current_value" = "$value" ]; then
|
||||
echo "- $key is already set correctly (value hidden)"
|
||||
return 0
|
||||
fi
|
||||
sed -i "/^\[${section}\]/,/^\[/{s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${value}|}" "$config_file"
|
||||
echo "- Updated $key (value hidden)"
|
||||
else
|
||||
if [ "$current_value" = "$value" ]; then
|
||||
echo "- $key is already set correctly ($value)"
|
||||
return 0
|
||||
fi
|
||||
sed -i "/^\[${section}\]/,/^\[/{s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${value}|}" "$config_file"
|
||||
echo "- Updated $key to: $value"
|
||||
fi
|
||||
else
|
||||
sed -i "/^\[${section}\]/a ${key} = ${value}" "$config_file"
|
||||
|
||||
# Don't display actual value if it's a password field
|
||||
if [[ "$key" == *"password"* ]]; then
|
||||
echo "- Added new setting $key (value hidden)"
|
||||
else
|
||||
echo "- Added new setting $key: $value"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service() {
|
||||
echo "[WGDashboard] Stopping WGDashboard..."
|
||||
/bin/bash ./wgd.sh stop
|
||||
exit 0
|
||||
}
|
||||
|
||||
echo "------------------------- START ----------------------------"
|
||||
echo "Starting the WGDashboard Docker container."
|
||||
|
||||
ensure_installation() {
|
||||
echo "Quick-installing..."
|
||||
|
||||
# Make the wgd.sh script executable.
|
||||
chmod +x "${WGDASH}"/src/wgd.sh
|
||||
cd "${WGDASH}"/src || exit
|
||||
|
||||
# Github issue: https://github.com/donaldzou/WGDashboard/issues/723
|
||||
echo "Checking for stale pids..."
|
||||
if [[ -f ${WGDASH}/src/gunicorn.pid ]]; then
|
||||
echo "Found stale pid, removing..."
|
||||
rm ${WGDASH}/src/gunicorn.pid
|
||||
fi
|
||||
|
||||
# Removing clear shell command from the wgd.sh script to enhance docker logging.
|
||||
echo "Removing clear command from wgd.sh for better Docker logging."
|
||||
sed -i '/clear/d' ./wgd.sh
|
||||
|
||||
# Create required directories and links
|
||||
if [ ! -d "/data/db" ]; then
|
||||
echo "Creating database dir"
|
||||
mkdir -p /data/db
|
||||
fi
|
||||
|
||||
if [ ! -d "${WGDASH}/src/db" ]; then
|
||||
ln -s /data/db "${WGDASH}/src/db"
|
||||
fi
|
||||
|
||||
if [ ! -f "${config_file}" ]; then
|
||||
echo "Creating wg-dashboard.ini file"
|
||||
touch "${config_file}"
|
||||
fi
|
||||
|
||||
if [ ! -f "${WGDASH}/src/wg-dashboard.ini" ]; then
|
||||
ln -s "${config_file}" "${WGDASH}/src/wg-dashboard.ini"
|
||||
fi
|
||||
|
||||
# Setup WireGuard if needed
|
||||
if [ -z "$(ls -A /etc/wireguard)" ]; then
|
||||
cp -a "/configs/wg0.conf.template" "/etc/wireguard/wg0.conf"
|
||||
|
||||
echo "Setting a secure private key."
|
||||
local privateKey
|
||||
privateKey=$(wg genkey)
|
||||
sed -i "s|^PrivateKey *=.*$|PrivateKey = ${privateKey}|g" /etc/wireguard/wg0.conf
|
||||
|
||||
echo "Done setting template."
|
||||
else
|
||||
echo "Existing wg0 configuration file found, using that."
|
||||
fi
|
||||
}
|
||||
|
||||
set_envvars() {
|
||||
printf "\n------------- SETTING ENVIRONMENT VARIABLES ----------------\n"
|
||||
|
||||
# Check if config file is empty
|
||||
if [ ! -s "${config_file}" ]; then
|
||||
echo "Config file is empty. Creating initial structure."
|
||||
fi
|
||||
|
||||
echo "Checking basic configuration:"
|
||||
set_ini Peers peer_global_dns "${global_dns}"
|
||||
|
||||
if [ -z "${public_ip}" ]; then
|
||||
public_ip=$(curl -s ifconfig.me)
|
||||
echo "Automatically detected public IP: ${public_ip}"
|
||||
fi
|
||||
|
||||
set_ini Peers remote_endpoint "${public_ip}"
|
||||
set_ini Server app_port "${wgd_port}"
|
||||
|
||||
# Account settings - process all parameters
|
||||
[[ -n "$username" ]] && echo "Configuring user account:"
|
||||
# Basic account variables
|
||||
[[ -n "$username" ]] && set_ini Account username "${username}"
|
||||
|
||||
if [[ -n "$password" ]]; then
|
||||
echo "- Setting password"
|
||||
set_ini Account password "$(hash_password "${password}")"
|
||||
fi
|
||||
|
||||
# Additional account variables
|
||||
[[ -n "$enable_totp" ]] && set_ini Account enable_totp "${enable_totp}"
|
||||
[[ -n "$totp_verified" ]] && set_ini Account totp_verified "${totp_verified}"
|
||||
[[ -n "$totp_key" ]] && set_ini Account totp_key "${totp_key}"
|
||||
|
||||
# Welcome session
|
||||
[[ -n "$welcome_session" ]] && set_ini Other welcome_session "${welcome_session}"
|
||||
# If username and password are set but welcome_session isn't, disable it
|
||||
if [[ -n "$username" && -n "$password" && -z "$welcome_session" ]]; then
|
||||
set_ini Other welcome_session "false"
|
||||
fi
|
||||
|
||||
# Autostart WireGuard
|
||||
if [[ -n "$wg_autostart" ]]; then
|
||||
echo "Configuring WireGuard autostart:"
|
||||
set_ini WireGuardConfiguration autostart "${wg_autostart}"
|
||||
fi
|
||||
|
||||
# Email (check if any settings need to be configured)
|
||||
email_vars=("email_server" "email_port" "email_encryption" "email_username" "email_password" "email_from" "email_template")
|
||||
for var in "${email_vars[@]}"; do
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "Configuring email settings:"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Email (iterate through all possible fields)
|
||||
email_fields=("server:email_server" "port:email_port" "encryption:email_encryption"
|
||||
"username:email_username" "email_password:email_password"
|
||||
"send_from:email_from" "email_template:email_template")
|
||||
|
||||
for field_pair in "${email_fields[@]}"; do
|
||||
IFS=: read -r field var <<< "$field_pair"
|
||||
[[ -n "${!var}" ]] && set_ini Email "$field" "${!var}"
|
||||
done
|
||||
}
|
||||
|
||||
# Start service and monitor logs
|
||||
start_and_monitor() {
|
||||
printf "\n---------------------- STARTING CORE -----------------------\n"
|
||||
|
||||
# Due to some instances complaining about this, making sure its there every time.
|
||||
mkdir -p /dev/net
|
||||
mknod /dev/net/tun c 10 200
|
||||
chmod 600 /dev/net/tun
|
||||
|
||||
# Actually starting WGDashboard
|
||||
echo "Starting WGDashboard directly with Gunicorn..."
|
||||
/opt/wgdashboard/src/venv/bin/python3 ./venv/bin/gunicorn --config ./gunicorn.conf.py
|
||||
|
||||
# Wait a second before continuing, to give the python program some time to get ready.
|
||||
sleep 1
|
||||
echo -e "\nEnsuring container continuation."
|
||||
|
||||
# Find and monitor log file
|
||||
local logdir="${WGDASH}/src/log"
|
||||
latestErrLog=$(find "$logdir" -name "error_*.log" -type f -print | sort -r | head -n 1)
|
||||
|
||||
# Only tail the logs if they are found
|
||||
if [ -n "$latestErrLog" ]; then
|
||||
tail -f "$latestErrLog" &
|
||||
# Wait for the tail process to end.
|
||||
wait $!
|
||||
else
|
||||
echo "No log files found to tail. Something went wrong, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution flow
|
||||
ensure_installation
|
||||
set_envvars
|
||||
start_and_monitor
|
||||
235
entrypoint.sh
235
entrypoint.sh
@@ -1,235 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Path to the configuration file (exists because of previous function).
|
||||
config_file="/data/wg-dashboard.ini"
|
||||
|
||||
echo "------------------------- START ----------------------------"
|
||||
echo "Starting the WireGuard Dashboard Docker container."
|
||||
|
||||
ensure_installation() {
|
||||
# When using a custom directory to store the files, this part moves over and makes sure the installation continues.
|
||||
echo "Quick-installing..."
|
||||
|
||||
if [ ! -d "/data/db" ]; then
|
||||
echo "Creating database dir"
|
||||
mkdir /data/db
|
||||
fi
|
||||
|
||||
if [ ! -d "${WGDASH}/src/db" ]; then
|
||||
ln -s /data/db "${WGDASH}/src/db"
|
||||
fi
|
||||
|
||||
if [ ! -f "${config_file}" ]; then
|
||||
echo "Creating wg-dashboard.ini file"
|
||||
touch "${config_file}"
|
||||
fi
|
||||
|
||||
if [ ! -f "${WGDASH}/src/wg-dashboard.ini" ]; then
|
||||
ln -s "${config_file}" "${WGDASH}/src/wg-dashboard.ini"
|
||||
fi
|
||||
|
||||
python3 -m venv "${WGDASH}"/src/venv
|
||||
. "${WGDASH}/src/venv/bin/activate"
|
||||
|
||||
echo "Moving PIP dependency from ephemerality to runtime environment: psutil"
|
||||
mv /usr/lib/python3.12/site-packages/psutil* "${WGDASH}"/src/venv/lib/python3.12/site-packages
|
||||
|
||||
echo "Moving PIP dependency from ephemerality to runtime environment: bcrypt"
|
||||
mv /usr/lib/python3.12/site-packages/bcrypt* "${WGDASH}"/src/venv/lib/python3.12/site-packages
|
||||
|
||||
|
||||
chmod +x "${WGDASH}"/src/wgd.sh
|
||||
cd "${WGDASH}"/src || exit
|
||||
./wgd.sh install
|
||||
|
||||
echo "Looks like the installation succeeded. Moving on."
|
||||
|
||||
# This first step is to ensure the wg0.conf file exists, and if not, then its copied over from the ephemeral container storage.
|
||||
# This is done so WGDashboard it works out of the box
|
||||
|
||||
if [ ! -f "/etc/wireguard/wg0.conf" ]; then
|
||||
echo "Standard wg0 Configuration file not found, grabbing template."
|
||||
cp -a "/configs/wg0.conf.template" "/etc/wireguard/wg0.conf"
|
||||
|
||||
echo "Setting a secure private key." # SORRY 4 BE4 - Daan
|
||||
|
||||
local privateKey
|
||||
privateKey=$(wg genkey)
|
||||
sed -i "s|^PrivateKey *=.*$|PrivateKey = ${privateKey}|g" /etc/wireguard/wg0.conf
|
||||
|
||||
echo "Done setting template."
|
||||
else
|
||||
echo "Existing wg0 configuration file found, using that."
|
||||
fi
|
||||
}
|
||||
|
||||
set_envvars() {
|
||||
printf "\n------------- SETTING ENVIRONMENT VARIABLES ----------------\n"
|
||||
|
||||
# Check if the file is empty
|
||||
if [ ! -s "${config_file}" ]; then
|
||||
echo "Config file is empty. Creating [Peers] section."
|
||||
|
||||
# Create [Peers] section with initial values
|
||||
{
|
||||
echo "[Peers]"
|
||||
echo "peer_global_dns = ${global_dns}"
|
||||
echo "remote_endpoint = ${public_ip}"
|
||||
#echo -e "\n[Server]"
|
||||
} > "${config_file}"
|
||||
|
||||
else
|
||||
echo "Config file is not empty, using pre-existing."
|
||||
fi
|
||||
|
||||
echo "Verifying current variables..."
|
||||
|
||||
# Check and update the DNS if it has changed
|
||||
current_dns=$(grep "peer_global_dns = " "${config_file}" | awk '{print $NF}')
|
||||
if [ "${global_dns}" == "$current_dns" ]; then
|
||||
echo "DNS is correct, moving on."
|
||||
|
||||
else
|
||||
echo "Changing default DNS..."
|
||||
sed -i "s/^peer_global_dns = .*/peer_global_dns = ${global_dns}/" "${config_file}"
|
||||
fi
|
||||
|
||||
if [ "${public_ip}" == "0.0.0.0" ]; then
|
||||
|
||||
default_ip=$(curl -s ifconfig.me)
|
||||
|
||||
echo "Trying to fetch the Public-IP using ifconfig.me: ${default_ip}"
|
||||
sed -i "s/^remote_endpoint = .*/remote_endpoint = ${default_ip}/" "${config_file}"
|
||||
|
||||
else
|
||||
echo "Public-IP is correct, moving on."
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# === CORE SERVICES ===
|
||||
start_core() {
|
||||
printf "\n---------------------- STARTING CORE -----------------------\n"
|
||||
|
||||
echo "Activating Python venv and executing the WireGuard Dashboard service."
|
||||
|
||||
. "${WGDASH}"/src/venv/bin/activate
|
||||
cd "${WGDASH}"/src || return
|
||||
bash wgd.sh start
|
||||
|
||||
# Isolated peers feature, first converting the existing configuration files and the given names to arrays.
|
||||
#
|
||||
# WILL BE REMOVED IN FUTURE WHEN WGDASHBOARD ITSELF SUPPORTS THIS!!
|
||||
#
|
||||
|
||||
local configurations=(/etc/wireguard/*)
|
||||
IFS=',' read -r -a do_isolate <<< "${isolate}"
|
||||
non_isolate=()
|
||||
|
||||
# Checking if there are matches between the two arrays.
|
||||
for config in "${configurations[@]}"; do
|
||||
config=$(echo "$config" | sed -e 's|.*/etc/wireguard/||' -e 's|\.conf$||')
|
||||
|
||||
local found
|
||||
found=false
|
||||
|
||||
for interface in "${do_isolate[@]}"; do
|
||||
|
||||
if [[ "$config" == "$interface" ]]; then
|
||||
found=true
|
||||
break
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
if [ "$found" = false ]; then
|
||||
non_isolate+=("$config")
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
# Isolating the matches.
|
||||
noneFound=0
|
||||
|
||||
for interface in "${do_isolate[@]}"; do
|
||||
|
||||
if [ "$interface" = "none" ] || [ "$interface" = "" ]; then
|
||||
echo "Found none, stopping isolation checking."
|
||||
noneFound=1
|
||||
break
|
||||
|
||||
else
|
||||
|
||||
if [ ! -f "/etc/wireguard/${interface}.conf" ]; then
|
||||
echo "Ignoring ${interface}"
|
||||
|
||||
elif [ -f "/etc/wireguard/${interface}.conf" ]; then
|
||||
|
||||
|
||||
echo "Isolating interface:" "$interface"
|
||||
|
||||
upblocking=$(grep -c "PostUp = iptables -I FORWARD -i ${interface} -o ${interface} -j DROP" /etc/wireguard/"${interface}".conf)
|
||||
downblocking=$(grep -c "PreDown = iptables -D FORWARD -i ${interface} -o ${interface} -j DROP" /etc/wireguard/"${interface}".conf)
|
||||
|
||||
if [ "$upblocking" -lt 1 ] && [ "$downblocking" -lt 1 ]; then
|
||||
sed -i "/PostUp =/a PostUp = iptables -I FORWARD -i ${interface} -o ${interface} -j DROP" /etc/wireguard/"${interface}".conf
|
||||
sed -i "/PreDown =/a PreDown = iptables -D FORWARD -i ${interface} -o ${interface} -j DROP" /etc/wireguard/"${interface}".conf
|
||||
fi
|
||||
|
||||
else
|
||||
echo "Configuration for $interface in enforce isolation does not seem to exist, continuing."
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
# Removing isolation for the configurations that did not match.
|
||||
|
||||
|
||||
for interface in "${non_isolate[@]}"; do
|
||||
if [ $noneFound -eq 1 ]; then
|
||||
break
|
||||
|
||||
elif [ ! -f "/etc/wireguard/${interface}.conf" ]; then
|
||||
echo "Ignoring ${interface}"
|
||||
|
||||
elif [ -f "/etc/wireguard/${interface}.conf" ]; then
|
||||
echo "Removing isolation, if isolation is present for:" "$interface"
|
||||
|
||||
sed -i "/PostUp = iptables -I FORWARD -i ${interface} -o ${interface} -j DROP/d" /etc/wireguard/"${interface}".conf
|
||||
sed -i "/PreDown = iptables -D FORWARD -i ${interface} -o ${interface} -j DROP/d" /etc/wireguard/"${interface}".conf
|
||||
else
|
||||
echo "Configuration for $interface in removing isolation does not seem to exist, continuing."
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
ensure_blocking() {
|
||||
sleep 1s
|
||||
echo -e "\nEnsuring container continuation."
|
||||
|
||||
# Find and tail the latest error and access logs if they exist
|
||||
local logdir="/opt/wireguarddashboard/src/log"
|
||||
|
||||
latestErrLog=$(find "$logdir" -name "error_*.log" -type f -print | sort -r | head -n 1)
|
||||
latestAccLog=$(find "$logdir" -name "access_*.log" -type f -print | sort -r | head -n 1)
|
||||
|
||||
# Only tail the logs if they are found
|
||||
if [ -n "$latestErrLog" ] || [ -n "$latestAccLog" ]; then
|
||||
tail -f "$latestErrLog" "$latestAccLog"
|
||||
else
|
||||
echo "No log files found to tail."
|
||||
fi
|
||||
|
||||
# Blocking command to keep the container running as a last resort.
|
||||
sleep infinity
|
||||
}
|
||||
|
||||
# Execute functions for the WireGuard Dashboard services, then set the environment variables
|
||||
ensure_installation
|
||||
set_envvars
|
||||
start_core
|
||||
ensure_blocking
|
||||
1098
package-lock.json
generated
1098
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"@volar/language-server": "2.4.0-alpha.18",
|
||||
"@vue/language-server": "2.0.28",
|
||||
"ag-charts-vue3": "^10.3.1",
|
||||
"dayjs": "^1.11.12"
|
||||
}
|
||||
}
|
||||
232
src/client.py
Normal file
232
src/client.py
Normal file
@@ -0,0 +1,232 @@
|
||||
import datetime
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from flask import Blueprint, render_template, abort, request, Flask, current_app, session, redirect, url_for
|
||||
import os
|
||||
|
||||
from modules.WireguardConfiguration import WireguardConfiguration
|
||||
from modules.DashboardConfig import DashboardConfig
|
||||
from modules.Email import EmailSender
|
||||
|
||||
|
||||
def ResponseObject(status=True, message=None, data=None, status_code = 200) -> Flask.response_class:
|
||||
response = Flask.make_response(current_app, {
|
||||
"status": status,
|
||||
"message": message,
|
||||
"data": data
|
||||
})
|
||||
response.status_code = status_code
|
||||
response.content_type = "application/json"
|
||||
return response
|
||||
|
||||
|
||||
|
||||
from modules.DashboardClients import DashboardClients
|
||||
def createClientBlueprint(wireguardConfigurations: dict[WireguardConfiguration], dashboardConfig: DashboardConfig, dashboardClients: DashboardClients):
|
||||
|
||||
client = Blueprint('client', __name__, template_folder=os.path.abspath("./static/dist/WGDashboardClient"))
|
||||
prefix = f'{dashboardConfig.GetConfig("Server", "app_prefix")[1]}/client'
|
||||
|
||||
def login_required(f):
|
||||
@wraps(f)
|
||||
def func(*args, **kwargs):
|
||||
if session.get("Email") is None or session.get("TotpVerified") is None or not session.get("TotpVerified") or session.get("Role") != "client":
|
||||
return ResponseObject(False, "Unauthorized access.", data=None, status_code=401)
|
||||
|
||||
if not dashboardClients.GetClient(session.get("ClientID")):
|
||||
session.clear()
|
||||
return ResponseObject(False, "Unauthorized access.", data=None, status_code=401)
|
||||
|
||||
return f(*args, **kwargs)
|
||||
return func
|
||||
|
||||
@client.before_request
|
||||
def clientBeforeRequest():
|
||||
if not dashboardConfig.GetConfig("Clients", "enable")[1]:
|
||||
abort(404)
|
||||
|
||||
if request.method.lower() == 'options':
|
||||
return ResponseObject(True)
|
||||
|
||||
@client.post(f'{prefix}/api/signup')
|
||||
def ClientAPI_SignUp():
|
||||
data = request.get_json()
|
||||
status, msg = dashboardClients.SignUp(**data)
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
@client.get(f'{prefix}/api/signin/oidc/providers')
|
||||
def ClientAPI_SignIn_OIDC_GetProviders():
|
||||
_, oidc = dashboardConfig.GetConfig("OIDC", "client_enable")
|
||||
if not oidc:
|
||||
return ResponseObject(status=False, message="OIDC is disabled")
|
||||
|
||||
return ResponseObject(data=dashboardClients.OIDC.GetProviders())
|
||||
|
||||
@client.post(f'{prefix}/api/signin/oidc')
|
||||
def ClientAPI_SignIn_OIDC():
|
||||
_, oidc = dashboardConfig.GetConfig("OIDC", "client_enable")
|
||||
if not oidc:
|
||||
return ResponseObject(status=False, message="OIDC is disabled")
|
||||
|
||||
data = request.get_json()
|
||||
status, oidcData = dashboardClients.SignIn_OIDC(**data)
|
||||
if not status:
|
||||
return ResponseObject(status, oidcData)
|
||||
|
||||
session['Email'] = oidcData.get('email')
|
||||
session['Role'] = 'client'
|
||||
session['TotpVerified'] = True
|
||||
|
||||
return ResponseObject()
|
||||
|
||||
@client.post(f'{prefix}/api/signin')
|
||||
def ClientAPI_SignIn():
|
||||
data = request.get_json()
|
||||
status, msg = dashboardClients.SignIn(**data)
|
||||
if status:
|
||||
session['Email'] = data.get('Email')
|
||||
session['Role'] = 'client'
|
||||
session['TotpVerified'] = False
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
@client.post(f'{prefix}/api/resetPassword/generateResetToken')
|
||||
def ClientAPI_ResetPassword_GenerateResetToken():
|
||||
date = datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
|
||||
emailSender = EmailSender(dashboardConfig)
|
||||
if not emailSender.ready():
|
||||
return ResponseObject(False, "We can't send you an email due to your Administrator has not setup email service. Please contact your administrator.")
|
||||
|
||||
data = request.get_json()
|
||||
email = data.get('Email', None)
|
||||
if not email:
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
u = dashboardClients.SignIn_UserExistence(email)
|
||||
if not u:
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
token = dashboardClients.GenerateClientPasswordResetToken(u.get('ClientID'))
|
||||
|
||||
status, msg = emailSender.send(
|
||||
email, "[WGDashboard | Client] Reset Password",
|
||||
f"Hi {email}, \n\nIt looks like you're trying to reset your password at {date} \n\nEnter this 6 digits code on the Forgot Password to continue:\n\n{token}\n\nThis code will expire in 30 minutes for your security. If you didn’t request a password reset, you can safely ignore this email—your current password will remain unchanged.\n\nIf you need help, feel free to contact support.\n\nBest regards,\n\nWGDashboard"
|
||||
)
|
||||
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
@client.post(f'{prefix}/api/resetPassword/validateResetToken')
|
||||
def ClientAPI_ResetPassword_ValidateResetToken():
|
||||
data = request.get_json()
|
||||
email = data.get('Email', None)
|
||||
token = data.get('Token', None)
|
||||
if not all([email, token]):
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
u = dashboardClients.SignIn_UserExistence(email)
|
||||
if not u:
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
return ResponseObject(status=dashboardClients.ValidateClientPasswordResetToken(u.get('ClientID'), token))
|
||||
|
||||
@client.post(f'{prefix}/api/resetPassword')
|
||||
def ClientAPI_ResetPassword():
|
||||
data = request.get_json()
|
||||
email = data.get('Email', None)
|
||||
token = data.get('Token', None)
|
||||
password = data.get('Password', None)
|
||||
confirmPassword = data.get('ConfirmPassword', None)
|
||||
if not all([email, token, password, confirmPassword]):
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
u = dashboardClients.SignIn_UserExistence(email)
|
||||
if not u:
|
||||
return ResponseObject(False, "Please provide a valid Email")
|
||||
|
||||
if not dashboardClients.ValidateClientPasswordResetToken(u.get('ClientID'), token):
|
||||
return ResponseObject(False, "Verification code is either invalid or expired")
|
||||
|
||||
status, msg = dashboardClients.ResetClientPassword(u.get('ClientID'), password, confirmPassword)
|
||||
|
||||
dashboardClients.RevokeClientPasswordResetToken(u.get('ClientID'), token)
|
||||
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
|
||||
@client.get(f'{prefix}/api/signout')
|
||||
def ClientAPI_SignOut():
|
||||
if session.get("SignInMethod") == "OIDC":
|
||||
dashboardClients.SignOut_OIDC()
|
||||
session.clear()
|
||||
return ResponseObject(True)
|
||||
|
||||
@client.get(f'{prefix}/api/signin/totp')
|
||||
def ClientAPI_SignIn_TOTP():
|
||||
token = request.args.get('Token', None)
|
||||
if not token:
|
||||
return ResponseObject(False, "Please provide TOTP token")
|
||||
|
||||
status, msg = dashboardClients.SignIn_GetTotp(token)
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
@client.post(f'{prefix}/api/signin/totp')
|
||||
def ClientAPI_SignIn_ValidateTOTP():
|
||||
data = request.get_json()
|
||||
token = data.get('Token', None)
|
||||
userProvidedTotp = data.get('UserProvidedTOTP', None)
|
||||
if not all([token, userProvidedTotp]):
|
||||
return ResponseObject(False, "Please fill in all fields")
|
||||
status, msg = dashboardClients.SignIn_GetTotp(token, userProvidedTotp)
|
||||
if status:
|
||||
if session.get('Email') is None:
|
||||
return ResponseObject(False, "Sign in status is invalid", status_code=401)
|
||||
session['TotpVerified'] = True
|
||||
profile = dashboardClients.GetClientProfile(session.get("ClientID"))
|
||||
|
||||
return ResponseObject(True, data={
|
||||
"Email": session.get('Email'),
|
||||
"Profile": profile
|
||||
})
|
||||
return ResponseObject(status, msg)
|
||||
|
||||
@client.get(prefix)
|
||||
def ClientIndex():
|
||||
return render_template('client.html')
|
||||
|
||||
@client.get(f'{prefix}/api/serverInformation')
|
||||
def ClientAPI_ServerInformation():
|
||||
return ResponseObject(data={
|
||||
"ServerTimezone": str(get_localzone())
|
||||
})
|
||||
|
||||
@client.get(f'{prefix}/api/validateAuthentication')
|
||||
@login_required
|
||||
def ClientAPI_ValidateAuthentication():
|
||||
return ResponseObject(True)
|
||||
|
||||
@client.get(f'{prefix}/api/configurations')
|
||||
@login_required
|
||||
def ClientAPI_Configurations():
|
||||
return ResponseObject(True, data=dashboardClients.GetClientAssignedPeers(session['ClientID']))
|
||||
|
||||
@client.get(f'{prefix}/api/settings/getClientProfile')
|
||||
@login_required
|
||||
def ClientAPI_Settings_GetClientProfile():
|
||||
return ResponseObject(data={
|
||||
"Email": session.get("Email"),
|
||||
"SignInMethod": session.get("SignInMethod"),
|
||||
"Profile": dashboardClients.GetClientProfile(session.get("ClientID"))
|
||||
})
|
||||
|
||||
@client.post(f'{prefix}/api/settings/updatePassword')
|
||||
@login_required
|
||||
def ClientAPI_Settings_UpdatePassword():
|
||||
data = request.get_json()
|
||||
status, message = dashboardClients.UpdateClientPassword(session['ClientID'], **data)
|
||||
|
||||
return ResponseObject(status, message)
|
||||
|
||||
return client
|
||||
2812
src/dashboard.py
2812
src/dashboard.py
File diff suppressed because it is too large
Load Diff
@@ -1,26 +1,26 @@
|
||||
import dashboard
|
||||
from datetime import datetime
|
||||
|
||||
global sqldb, cursor, DashboardConfig, WireguardConfigurations, AllPeerJobs, JobLogger
|
||||
global sqldb, cursor, DashboardConfig, WireguardConfigurations, AllPeerJobs, JobLogger, Dash
|
||||
app_host, app_port = dashboard.gunicornConfig()
|
||||
date = datetime.today().strftime('%Y_%m_%d_%H_%M_%S')
|
||||
|
||||
|
||||
def post_worker_init(worker):
|
||||
dashboard.startThreads()
|
||||
|
||||
dashboard.DashboardPlugins.startThreads()
|
||||
|
||||
worker_class = 'gthread'
|
||||
workers = 1
|
||||
threads = 1
|
||||
threads = 2
|
||||
bind = f"{app_host}:{app_port}"
|
||||
daemon = True
|
||||
pidfile = './gunicorn.pid'
|
||||
wsgi_app = "dashboard:app"
|
||||
accesslog = f"./log/access_{date}.log"
|
||||
log_level = "debug"
|
||||
loglevel = "info"
|
||||
capture_output = True
|
||||
errorlog = f"./log/error_{date}.log"
|
||||
print(f"[WGDashboard] WGDashboard w/ Gunicorn will be running on {bind}", flush=True)
|
||||
print(f"[WGDashboard] Access log file is at {accesslog}", flush=True)
|
||||
print(f"[WGDashboard] Error log file is at {errorlog}", flush=True)
|
||||
pythonpath = "., ./modules"
|
||||
|
||||
print(f"[Gunicorn] WGDashboard w/ Gunicorn will be running on {bind}", flush=True)
|
||||
print(f"[Gunicorn] Access log file is at {accesslog}", flush=True)
|
||||
print(f"[Gunicorn] Error log file is at {errorlog}", flush=True)
|
||||
92
src/modules/AmneziaWGPeer.py
Normal file
92
src/modules/AmneziaWGPeer.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import subprocess
|
||||
import uuid
|
||||
|
||||
from .Peer import Peer
|
||||
from .Utilities import ValidateIPAddressesWithRange, ValidateDNSAddress, GenerateWireguardPublicKey
|
||||
|
||||
|
||||
class AmneziaWGPeer(Peer):
|
||||
def __init__(self, tableData, configuration):
|
||||
self.advanced_security = tableData["advanced_security"]
|
||||
super().__init__(tableData, configuration)
|
||||
|
||||
|
||||
def updatePeer(self, name: str, private_key: str,
|
||||
preshared_key: str,
|
||||
dns_addresses: str, allowed_ip: str, endpoint_allowed_ip: str, mtu: int,
|
||||
keepalive: int, advanced_security: str) -> tuple[bool, str] or tuple[bool, None]:
|
||||
if not self.configuration.getStatus():
|
||||
self.configuration.toggleConfiguration()
|
||||
|
||||
existingAllowedIps = [item for row in list(
|
||||
map(lambda x: [q.strip() for q in x.split(',')],
|
||||
map(lambda y: y.allowed_ip,
|
||||
list(filter(lambda k: k.id != self.id, self.configuration.getPeersList()))))) for item in row]
|
||||
|
||||
if allowed_ip in existingAllowedIps:
|
||||
return False, "Allowed IP already taken by another peer"
|
||||
if not ValidateIPAddressesWithRange(endpoint_allowed_ip):
|
||||
return False, f"Endpoint Allowed IPs format is incorrect"
|
||||
if len(dns_addresses) > 0 and not ValidateDNSAddress(dns_addresses):
|
||||
return False, f"DNS format is incorrect"
|
||||
|
||||
if type(mtu) is str:
|
||||
mtu = 0
|
||||
|
||||
if type(keepalive) is str:
|
||||
keepalive = 0
|
||||
|
||||
if mtu < 0 or mtu > 1460:
|
||||
return False, "MTU format is not correct"
|
||||
if keepalive < 0:
|
||||
return False, "Persistent Keepalive format is not correct"
|
||||
if advanced_security != "on" and advanced_security != "off":
|
||||
return False, "Advanced Security can only be on or off"
|
||||
if len(private_key) > 0:
|
||||
pubKey = GenerateWireguardPublicKey(private_key)
|
||||
if not pubKey[0] or pubKey[1] != self.id:
|
||||
return False, "Private key does not match with the public key"
|
||||
try:
|
||||
rd = random.Random()
|
||||
uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
|
||||
pskExist = len(preshared_key) > 0
|
||||
|
||||
if pskExist:
|
||||
with open(uid, "w+") as f:
|
||||
f.write(preshared_key)
|
||||
newAllowedIPs = allowed_ip.replace(" ", "")
|
||||
updateAllowedIp = subprocess.check_output(
|
||||
f"{self.configuration.Protocol} set {self.configuration.Name} peer {self.id} allowed-ips {newAllowedIPs} {f'preshared-key {uid}' if pskExist else 'preshared-key /dev/null'}",
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
if pskExist: os.remove(uid)
|
||||
|
||||
if len(updateAllowedIp.decode().strip("\n")) != 0:
|
||||
return False, "Update peer failed when updating Allowed IPs"
|
||||
saveConfig = subprocess.check_output(f"{self.configuration.Protocol}-quick save {self.configuration.Name}",
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
if f"wg showconf {self.configuration.Name}" not in saveConfig.decode().strip('\n'):
|
||||
return False, "Update peer failed when saving the configuration"
|
||||
|
||||
with self.configuration.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.configuration.peersTable.update().values({
|
||||
"name": name,
|
||||
"private_key": private_key,
|
||||
"DNS": dns_addresses,
|
||||
"endpoint_allowed_ip": endpoint_allowed_ip,
|
||||
"mtu": mtu,
|
||||
"keepalive": keepalive,
|
||||
"preshared_key": preshared_key,
|
||||
"advanced_security": advanced_security
|
||||
}).where(
|
||||
self.configuration.peersTable.c.id == self.id
|
||||
)
|
||||
)
|
||||
self.configuration.getPeers()
|
||||
return True, None
|
||||
except subprocess.CalledProcessError as exc:
|
||||
return False, exc.output.decode("UTF-8").strip()
|
||||
322
src/modules/AmneziaWireguardConfiguration.py
Normal file
322
src/modules/AmneziaWireguardConfiguration.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""
|
||||
AmneziaWG Configuration
|
||||
"""
|
||||
import random, sqlalchemy, os, subprocess, re, uuid
|
||||
from flask import current_app
|
||||
from .PeerJobs import PeerJobs
|
||||
from .AmneziaWGPeer import AmneziaWGPeer
|
||||
from .PeerShareLinks import PeerShareLinks
|
||||
from .Utilities import RegexMatch
|
||||
from .WireguardConfiguration import WireguardConfiguration
|
||||
from .DashboardWebHooks import DashboardWebHooks
|
||||
|
||||
|
||||
class AmneziaWireguardConfiguration(WireguardConfiguration):
|
||||
def __init__(self, DashboardConfig,
|
||||
AllPeerJobs: PeerJobs,
|
||||
AllPeerShareLinks: PeerShareLinks,
|
||||
DashboardWebHooks: DashboardWebHooks,
|
||||
name: str = None, data: dict = None, backup: dict = None, startup: bool = False):
|
||||
self.Jc = 0
|
||||
self.Jmin = 0
|
||||
self.Jmax = 0
|
||||
self.S1 = 0
|
||||
self.S2 = 0
|
||||
self.H1 = 1
|
||||
self.H2 = 2
|
||||
self.H3 = 3
|
||||
self.H4 = 4
|
||||
|
||||
super().__init__(DashboardConfig, AllPeerJobs, AllPeerShareLinks, DashboardWebHooks, name, data, backup, startup, wg=False)
|
||||
|
||||
def toJson(self):
|
||||
self.Status = self.getStatus()
|
||||
return {
|
||||
"Status": self.Status,
|
||||
"Name": self.Name,
|
||||
"PrivateKey": self.PrivateKey,
|
||||
"PublicKey": self.PublicKey,
|
||||
"Address": self.Address,
|
||||
"ListenPort": self.ListenPort,
|
||||
"PreUp": self.PreUp,
|
||||
"PreDown": self.PreDown,
|
||||
"PostUp": self.PostUp,
|
||||
"PostDown": self.PostDown,
|
||||
"SaveConfig": self.SaveConfig,
|
||||
"Info": self.configurationInfo.model_dump(),
|
||||
"DataUsage": {
|
||||
"Total": sum(list(map(lambda x: x.cumu_data + x.total_data, self.Peers))),
|
||||
"Sent": sum(list(map(lambda x: x.cumu_sent + x.total_sent, self.Peers))),
|
||||
"Receive": sum(list(map(lambda x: x.cumu_receive + x.total_receive, self.Peers)))
|
||||
},
|
||||
"ConnectedPeers": len(list(filter(lambda x: x.status == "running", self.Peers))),
|
||||
"TotalPeers": len(self.Peers),
|
||||
"Protocol": self.Protocol,
|
||||
"Table": self.Table,
|
||||
"Jc": self.Jc,
|
||||
"Jmin": self.Jmin,
|
||||
"Jmax": self.Jmax,
|
||||
"S1": self.S1,
|
||||
"S2": self.S2,
|
||||
"H1": self.H1,
|
||||
"H2": self.H2,
|
||||
"H3": self.H3,
|
||||
"H4": self.H4
|
||||
}
|
||||
|
||||
def createDatabase(self, dbName = None):
|
||||
if dbName is None:
|
||||
dbName = self.Name
|
||||
|
||||
|
||||
self.peersTable = sqlalchemy.Table(
|
||||
dbName, self.metadata,
|
||||
sqlalchemy.Column('id', sqlalchemy.String(255), nullable=False, primary_key=True),
|
||||
sqlalchemy.Column('private_key', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('DNS', sqlalchemy.Text),
|
||||
sqlalchemy.Column('advanced_security', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('endpoint_allowed_ip', sqlalchemy.Text),
|
||||
sqlalchemy.Column('name', sqlalchemy.Text),
|
||||
sqlalchemy.Column('total_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('status', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('latest_handshake', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('allowed_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('cumu_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('mtu', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('keepalive', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('remote_endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('preshared_key', sqlalchemy.String(255)),
|
||||
extend_existing=True
|
||||
)
|
||||
self.peersRestrictedTable = sqlalchemy.Table(
|
||||
f'{dbName}_restrict_access', self.metadata,
|
||||
sqlalchemy.Column('id', sqlalchemy.String(255), nullable=False, primary_key=True),
|
||||
sqlalchemy.Column('private_key', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('DNS', sqlalchemy.Text),
|
||||
sqlalchemy.Column('advanced_security', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('endpoint_allowed_ip', sqlalchemy.Text),
|
||||
sqlalchemy.Column('name', sqlalchemy.Text),
|
||||
sqlalchemy.Column('total_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('status', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('latest_handshake', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('allowed_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('cumu_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('mtu', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('keepalive', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('remote_endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('preshared_key', sqlalchemy.String(255)),
|
||||
extend_existing=True
|
||||
)
|
||||
self.peersTransferTable = sqlalchemy.Table(
|
||||
f'{dbName}_transfer', self.metadata,
|
||||
sqlalchemy.Column('id', sqlalchemy.String(255), nullable=False),
|
||||
sqlalchemy.Column('total_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('time', (sqlalchemy.DATETIME if self.DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else sqlalchemy.TIMESTAMP),
|
||||
server_default=sqlalchemy.func.now()),
|
||||
extend_existing=True
|
||||
)
|
||||
self.peersDeletedTable = sqlalchemy.Table(
|
||||
f'{dbName}_deleted', self.metadata,
|
||||
sqlalchemy.Column('id', sqlalchemy.String(255), nullable=False),
|
||||
sqlalchemy.Column('private_key', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('DNS', sqlalchemy.Text),
|
||||
sqlalchemy.Column('advanced_security', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('endpoint_allowed_ip', sqlalchemy.Text),
|
||||
sqlalchemy.Column('name', sqlalchemy.Text),
|
||||
sqlalchemy.Column('total_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('total_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('status', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('latest_handshake', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('allowed_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('cumu_receive', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_sent', sqlalchemy.Float),
|
||||
sqlalchemy.Column('cumu_data', sqlalchemy.Float),
|
||||
sqlalchemy.Column('mtu', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('keepalive', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('remote_endpoint', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('preshared_key', sqlalchemy.String(255)),
|
||||
extend_existing=True
|
||||
)
|
||||
self.infoTable = sqlalchemy.Table(
|
||||
'ConfigurationsInfo', self.metadata,
|
||||
sqlalchemy.Column('ID', sqlalchemy.String(255), primary_key=True),
|
||||
sqlalchemy.Column('Info', sqlalchemy.Text),
|
||||
extend_existing=True
|
||||
)
|
||||
|
||||
self.peersHistoryEndpointTable = sqlalchemy.Table(
|
||||
f'{dbName}_history_endpoint', self.metadata,
|
||||
sqlalchemy.Column('id', sqlalchemy.String(255), nullable=False),
|
||||
sqlalchemy.Column('endpoint', sqlalchemy.String(255), nullable=False),
|
||||
sqlalchemy.Column('time',
|
||||
(sqlalchemy.DATETIME if self.DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else sqlalchemy.TIMESTAMP)),
|
||||
extend_existing=True
|
||||
)
|
||||
|
||||
self.metadata.create_all(self.engine)
|
||||
|
||||
def getPeers(self):
|
||||
self.Peers.clear()
|
||||
if self.configurationFileChanged():
|
||||
with open(self.configPath, 'r') as configFile:
|
||||
p = []
|
||||
pCounter = -1
|
||||
content = configFile.read().split('\n')
|
||||
try:
|
||||
if "[Peer]" not in content:
|
||||
current_app.logger.info(f"{self.Name} config has no [Peer] section")
|
||||
return
|
||||
|
||||
peerStarts = content.index("[Peer]")
|
||||
content = content[peerStarts:]
|
||||
for i in content:
|
||||
if not RegexMatch("#(.*)", i) and not RegexMatch(";(.*)", i):
|
||||
if i == "[Peer]":
|
||||
pCounter += 1
|
||||
p.append({})
|
||||
p[pCounter]["name"] = ""
|
||||
else:
|
||||
if len(i) > 0:
|
||||
split = re.split(r'\s*=\s*', i, 1)
|
||||
if len(split) == 2:
|
||||
p[pCounter][split[0]] = split[1]
|
||||
|
||||
if RegexMatch("#Name# = (.*)", i):
|
||||
split = re.split(r'\s*=\s*', i, 1)
|
||||
if len(split) == 2:
|
||||
p[pCounter]["name"] = split[1]
|
||||
with self.engine.begin() as conn:
|
||||
for i in p:
|
||||
if "PublicKey" in i.keys():
|
||||
tempPeer = conn.execute(self.peersTable.select().where(
|
||||
self.peersTable.columns.id == i['PublicKey']
|
||||
)).mappings().fetchone()
|
||||
if tempPeer is None:
|
||||
tempPeer = {
|
||||
"id": i['PublicKey'],
|
||||
"advanced_security": i.get('AdvancedSecurity', 'off'),
|
||||
"private_key": "",
|
||||
"DNS": self.DashboardConfig.GetConfig("Peers", "peer_global_DNS")[1],
|
||||
"endpoint_allowed_ip": self.DashboardConfig.GetConfig("Peers", "peer_endpoint_allowed_ip")[
|
||||
1],
|
||||
"name": i.get("name"),
|
||||
"total_receive": 0,
|
||||
"total_sent": 0,
|
||||
"total_data": 0,
|
||||
"endpoint": "N/A",
|
||||
"status": "stopped",
|
||||
"latest_handshake": "N/A",
|
||||
"allowed_ip": i.get("AllowedIPs", "N/A"),
|
||||
"cumu_receive": 0,
|
||||
"cumu_sent": 0,
|
||||
"cumu_data": 0,
|
||||
"mtu": self.DashboardConfig.GetConfig("Peers", "peer_mtu")[1],
|
||||
"keepalive": self.DashboardConfig.GetConfig("Peers", "peer_keep_alive")[1],
|
||||
"remote_endpoint": self.DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
|
||||
"preshared_key": i["PresharedKey"] if "PresharedKey" in i.keys() else ""
|
||||
}
|
||||
conn.execute(
|
||||
self.peersTable.insert().values(tempPeer)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
self.peersTable.update().values({
|
||||
"allowed_ip": i.get("AllowedIPs", "N/A")
|
||||
}).where(
|
||||
self.peersTable.columns.id == i['PublicKey']
|
||||
)
|
||||
)
|
||||
self.Peers.append(AmneziaWGPeer(tempPeer, self))
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"{self.Name} getPeers() Error", e)
|
||||
else:
|
||||
with self.engine.connect() as conn:
|
||||
existingPeers = conn.execute(self.peersTable.select()).mappings().fetchall()
|
||||
for i in existingPeers:
|
||||
self.Peers.append(AmneziaWGPeer(i, self))
|
||||
|
||||
def addPeers(self, peers: list) -> tuple[bool, list, str]:
|
||||
result = {
|
||||
"message": None,
|
||||
"peers": []
|
||||
}
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
for i in peers:
|
||||
newPeer = {
|
||||
"id": i['id'],
|
||||
"private_key": i['private_key'],
|
||||
"DNS": i['DNS'],
|
||||
"endpoint_allowed_ip": i['endpoint_allowed_ip'],
|
||||
"name": i['name'],
|
||||
"total_receive": 0,
|
||||
"total_sent": 0,
|
||||
"total_data": 0,
|
||||
"endpoint": "N/A",
|
||||
"status": "stopped",
|
||||
"latest_handshake": "N/A",
|
||||
"allowed_ip": i.get("allowed_ip", "N/A"),
|
||||
"cumu_receive": 0,
|
||||
"cumu_sent": 0,
|
||||
"cumu_data": 0,
|
||||
"mtu": i['mtu'],
|
||||
"keepalive": i['keepalive'],
|
||||
"remote_endpoint": self.DashboardConfig.GetConfig("Peers", "remote_endpoint")[1],
|
||||
"preshared_key": i["preshared_key"],
|
||||
"advanced_security": i['advanced_security']
|
||||
}
|
||||
conn.execute(
|
||||
self.peersTable.insert().values(newPeer)
|
||||
)
|
||||
for p in peers:
|
||||
presharedKeyExist = len(p['preshared_key']) > 0
|
||||
rd = random.Random()
|
||||
uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
|
||||
if presharedKeyExist:
|
||||
with open(uid, "w+") as f:
|
||||
f.write(p['preshared_key'])
|
||||
|
||||
subprocess.check_output(
|
||||
f"{self.Protocol} set {self.Name} peer {p['id']} allowed-ips {p['allowed_ip'].replace(' ', '')}{f' preshared-key {uid}' if presharedKeyExist else ''}",
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
if presharedKeyExist:
|
||||
os.remove(uid)
|
||||
subprocess.check_output(
|
||||
f"{self.Protocol}-quick save {self.Name}", shell=True, stderr=subprocess.STDOUT)
|
||||
self.getPeers()
|
||||
for p in peers:
|
||||
p = self.searchPeer(p['id'])
|
||||
if p[0]:
|
||||
result['peers'].append(p[1])
|
||||
self.DashboardWebHooks.RunWebHook("peer_created", {
|
||||
"configuration": self.Name,
|
||||
"peers": list(map(lambda k : k['id'], peers))
|
||||
})
|
||||
except Exception as e:
|
||||
current_app.logger.error("Add peers error", e)
|
||||
return False, [], str(e)
|
||||
return True, result['peers'], ""
|
||||
|
||||
def getRestrictedPeers(self):
|
||||
self.RestrictedPeers = []
|
||||
with self.engine.connect() as conn:
|
||||
restricted = conn.execute(self.peersRestrictedTable.select()).mappings().fetchall()
|
||||
for i in restricted:
|
||||
self.RestrictedPeers.append(AmneziaWGPeer(i, self))
|
||||
25
src/modules/ConnectionString.py
Normal file
25
src/modules/ConnectionString.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import configparser
|
||||
import os
|
||||
from sqlalchemy_utils import database_exists, create_database
|
||||
from flask import current_app
|
||||
|
||||
def ConnectionString(database) -> str:
|
||||
parser = configparser.ConfigParser(strict=False)
|
||||
parser.read_file(open('wg-dashboard.ini', "r+"))
|
||||
sqlitePath = os.path.join("db")
|
||||
if not os.path.isdir(sqlitePath):
|
||||
os.mkdir(sqlitePath)
|
||||
if parser.get("Database", "type") == "postgresql":
|
||||
cn = f'postgresql+psycopg://{parser.get("Database", "username")}:{parser.get("Database", "password")}@{parser.get("Database", "host")}/{database}'
|
||||
elif parser.get("Database", "type") == "mysql":
|
||||
cn = f'mysql+pymysql://{parser.get("Database", "username")}:{parser.get("Database", "password")}@{parser.get("Database", "host")}/{database}'
|
||||
else:
|
||||
cn = f'sqlite:///{os.path.join(sqlitePath, f"{database}.db")}'
|
||||
try:
|
||||
if not database_exists(cn):
|
||||
create_database(cn)
|
||||
except Exception as e:
|
||||
current_app.logger.error("Database error. Terminating...", e)
|
||||
exit(1)
|
||||
|
||||
return cn
|
||||
11
src/modules/DashboardAPIKey.py
Normal file
11
src/modules/DashboardAPIKey.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""
|
||||
Dashboard API Key
|
||||
"""
|
||||
class DashboardAPIKey:
|
||||
def __init__(self, Key: str, CreatedAt: str, ExpiredAt: str):
|
||||
self.Key = Key
|
||||
self.CreatedAt = CreatedAt
|
||||
self.ExpiredAt = ExpiredAt
|
||||
|
||||
def toJson(self):
|
||||
return self.__dict__
|
||||
498
src/modules/DashboardClients.py
Normal file
498
src/modules/DashboardClients.py
Normal file
@@ -0,0 +1,498 @@
|
||||
import datetime
|
||||
import hashlib
|
||||
import random
|
||||
import uuid
|
||||
|
||||
import bcrypt
|
||||
import pyotp
|
||||
import sqlalchemy as db
|
||||
import requests
|
||||
|
||||
from .ConnectionString import ConnectionString
|
||||
from .DashboardClientsPeerAssignment import DashboardClientsPeerAssignment
|
||||
from .DashboardClientsTOTP import DashboardClientsTOTP
|
||||
from .DashboardOIDC import DashboardOIDC
|
||||
from .Utilities import ValidatePasswordStrength
|
||||
from .DashboardLogger import DashboardLogger
|
||||
from flask import session
|
||||
|
||||
|
||||
class DashboardClients:
|
||||
def __init__(self, wireguardConfigurations):
|
||||
self.logger = DashboardLogger()
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.OIDC = DashboardOIDC("Client")
|
||||
|
||||
self.dashboardClientsTable = db.Table(
|
||||
'DashboardClients', self.metadata,
|
||||
db.Column('ClientID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('Email', db.String(255), nullable=False, index=True),
|
||||
db.Column('Password', db.String(500)),
|
||||
db.Column('TotpKey', db.String(500)),
|
||||
db.Column('TotpKeyVerified', db.Integer),
|
||||
db.Column('CreatedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('DeletedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP)),
|
||||
extend_existing=True,
|
||||
)
|
||||
|
||||
self.dashboardOIDCClientsTable = db.Table(
|
||||
'DashboardOIDCClients', self.metadata,
|
||||
db.Column('ClientID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('Email', db.String(255), nullable=False, index=True),
|
||||
db.Column('ProviderIssuer', db.String(500), nullable=False, index=True),
|
||||
db.Column('ProviderSubject', db.String(500), nullable=False, index=True),
|
||||
db.Column('CreatedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('DeletedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP)),
|
||||
extend_existing=True,
|
||||
)
|
||||
|
||||
self.dashboardClientsInfoTable = db.Table(
|
||||
'DashboardClientsInfo', self.metadata,
|
||||
db.Column('ClientID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('Name', db.String(500)),
|
||||
extend_existing=True,
|
||||
)
|
||||
|
||||
self.dashboardClientsPasswordResetLinkTable = db.Table(
|
||||
'DashboardClientsPasswordResetLinks', self.metadata,
|
||||
db.Column('ResetToken', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('ClientID', db.String(255), nullable=False),
|
||||
db.Column('CreatedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('ExpiryDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP)),
|
||||
extend_existing=True
|
||||
)
|
||||
|
||||
self.metadata.create_all(self.engine)
|
||||
self.Clients = {}
|
||||
self.ClientsRaw = []
|
||||
self.__getClients()
|
||||
self.DashboardClientsTOTP = DashboardClientsTOTP()
|
||||
self.DashboardClientsPeerAssignment = DashboardClientsPeerAssignment(wireguardConfigurations)
|
||||
|
||||
def __getClients(self):
|
||||
with self.engine.connect() as conn:
|
||||
localClients = db.select(
|
||||
self.dashboardClientsTable.c.ClientID,
|
||||
self.dashboardClientsTable.c.Email,
|
||||
db.literal_column("'Local'").label("ClientGroup")
|
||||
).where(
|
||||
self.dashboardClientsTable.c.DeletedDate.is_(None)
|
||||
)
|
||||
|
||||
oidcClients = db.select(
|
||||
self.dashboardOIDCClientsTable.c.ClientID,
|
||||
self.dashboardOIDCClientsTable.c.Email,
|
||||
self.dashboardOIDCClientsTable.c.ProviderIssuer.label("ClientGroup"),
|
||||
).where(
|
||||
self.dashboardOIDCClientsTable.c.DeletedDate.is_(None)
|
||||
)
|
||||
|
||||
union = db.union(localClients, oidcClients).alias("U")
|
||||
|
||||
self.ClientsRaw = conn.execute(
|
||||
db.select(
|
||||
union,
|
||||
self.dashboardClientsInfoTable.c.Name
|
||||
).outerjoin(self.dashboardClientsInfoTable,
|
||||
union.c.ClientID == self.dashboardClientsInfoTable.c.ClientID)
|
||||
).mappings().fetchall()
|
||||
|
||||
groups = set(map(lambda c: c.get('ClientGroup'), self.ClientsRaw))
|
||||
gr = {}
|
||||
for g in groups:
|
||||
gr[(g if g == 'Local' else self.OIDC.GetProviderNameByIssuer(g))] = [
|
||||
dict(x) for x in list(
|
||||
filter(lambda c: c.get('ClientGroup') == g, self.ClientsRaw)
|
||||
)
|
||||
]
|
||||
self.Clients = gr
|
||||
|
||||
def GetAllClients(self):
|
||||
self.__getClients()
|
||||
return self.Clients
|
||||
|
||||
def GetAllClientsRaw(self):
|
||||
self.__getClients()
|
||||
return self.ClientsRaw
|
||||
|
||||
def GetClient(self, ClientID) -> dict[str, str] | None:
|
||||
c = filter(lambda x: x['ClientID'] == ClientID, self.ClientsRaw)
|
||||
client = next((dict(client) for client in c), None)
|
||||
if client is not None:
|
||||
client['ClientGroup'] = self.OIDC.GetProviderNameByIssuer(client['ClientGroup'])
|
||||
return client
|
||||
|
||||
def GetClientProfile(self, ClientID):
|
||||
with self.engine.connect() as conn:
|
||||
return dict(conn.execute(
|
||||
db.select(
|
||||
*[c for c in self.dashboardClientsInfoTable.c if c.name != 'ClientID']
|
||||
).where(
|
||||
self.dashboardClientsInfoTable.c.ClientID == ClientID
|
||||
)
|
||||
).mappings().fetchone())
|
||||
|
||||
def SignIn_ValidatePassword(self, Email, Password) -> bool:
|
||||
if not all([Email, Password]):
|
||||
return False
|
||||
existingClient = self.SignIn_UserExistence(Email)
|
||||
if existingClient:
|
||||
return bcrypt.checkpw(Password.encode("utf-8"), existingClient.get("Password").encode("utf-8"))
|
||||
return False
|
||||
|
||||
def SignIn_UserExistence(self, Email):
|
||||
with self.engine.connect() as conn:
|
||||
existingClient = conn.execute(
|
||||
self.dashboardClientsTable.select().where(
|
||||
self.dashboardClientsTable.c.Email == Email
|
||||
)
|
||||
).mappings().fetchone()
|
||||
return existingClient
|
||||
|
||||
def SignIn_OIDC_UserExistence(self, data: dict[str, str]):
|
||||
with self.engine.connect() as conn:
|
||||
existingClient = conn.execute(
|
||||
self.dashboardOIDCClientsTable.select().where(
|
||||
db.and_(
|
||||
self.dashboardOIDCClientsTable.c.ProviderIssuer == data.get('iss'),
|
||||
self.dashboardOIDCClientsTable.c.ProviderSubject == data.get('sub'),
|
||||
)
|
||||
)
|
||||
).mappings().fetchone()
|
||||
return existingClient
|
||||
|
||||
def SignUp_OIDC(self, data: dict[str, str]) -> tuple[bool, str] | tuple[bool, None]:
|
||||
if not self.SignIn_OIDC_UserExistence(data):
|
||||
with self.engine.begin() as conn:
|
||||
newClientUUID = str(uuid.uuid4())
|
||||
conn.execute(
|
||||
self.dashboardOIDCClientsTable.insert().values({
|
||||
"ClientID": newClientUUID,
|
||||
"Email": data.get('email', ''),
|
||||
"ProviderIssuer": data.get('iss', ''),
|
||||
"ProviderSubject": data.get('sub', '')
|
||||
})
|
||||
)
|
||||
conn.execute(
|
||||
self.dashboardClientsInfoTable.insert().values({
|
||||
"ClientID": newClientUUID,
|
||||
"Name": data.get("name")
|
||||
})
|
||||
)
|
||||
self.logger.log(Message=f"User {data.get('email', '')} from {data.get('iss', '')} signed up")
|
||||
self.__getClients()
|
||||
return True, newClientUUID
|
||||
return False, "User already signed up"
|
||||
|
||||
def SignOut_OIDC(self):
|
||||
sessionPayload = session.get('OIDCPayload')
|
||||
status, oidc_config = self.OIDC.GetProviderConfiguration(session.get('SignInPayload').get("Provider"))
|
||||
signOut = requests.get(
|
||||
oidc_config.get("end_session_endpoint"),
|
||||
params={
|
||||
'id_token_hint': session.get('SignInPayload').get("Payload").get('sid')
|
||||
}
|
||||
)
|
||||
return True
|
||||
|
||||
def SignIn_OIDC(self, **kwargs):
|
||||
status, data = self.OIDC.VerifyToken(**kwargs)
|
||||
if not status:
|
||||
return False, "Sign in failed. Reason: " + data
|
||||
existingClient = self.SignIn_OIDC_UserExistence(data)
|
||||
if not existingClient:
|
||||
status, newClientUUID = self.SignUp_OIDC(data)
|
||||
session['ClientID'] = newClientUUID
|
||||
else:
|
||||
session['ClientID'] = existingClient.get("ClientID")
|
||||
session['SignInMethod'] = 'OIDC'
|
||||
session['SignInPayload'] = {
|
||||
"Provider": kwargs.get('provider'),
|
||||
"Payload": data
|
||||
}
|
||||
return True, data
|
||||
|
||||
def SignIn(self, Email, Password) -> tuple[bool, str]:
|
||||
if not all([Email, Password]):
|
||||
return False, "Please fill in all fields"
|
||||
existingClient = self.SignIn_UserExistence(Email)
|
||||
if existingClient:
|
||||
checkPwd = self.SignIn_ValidatePassword(Email, Password)
|
||||
if checkPwd:
|
||||
session['SignInMethod'] = 'local'
|
||||
session['Email'] = Email
|
||||
session['ClientID'] = existingClient.get("ClientID")
|
||||
return True, self.DashboardClientsTOTP.GenerateToken(existingClient.get("ClientID"))
|
||||
return False, "Email or Password is incorrect"
|
||||
|
||||
def SignIn_GetTotp(self, Token: str, UserProvidedTotp: str = None) -> tuple[bool, str] or tuple[bool, None, str]:
|
||||
status, data = self.DashboardClientsTOTP.GetTotp(Token)
|
||||
|
||||
if not status:
|
||||
return False, "TOTP Token is invalid"
|
||||
if UserProvidedTotp is None:
|
||||
if data.get('TotpKeyVerified') is None:
|
||||
return True, pyotp.totp.TOTP(data.get('TotpKey')).provisioning_uri(name=data.get('Email'),
|
||||
issuer_name="WGDashboard Client")
|
||||
else:
|
||||
totpMatched = pyotp.totp.TOTP(data.get('TotpKey')).verify(UserProvidedTotp)
|
||||
if not totpMatched:
|
||||
return False, "TOTP is does not match"
|
||||
else:
|
||||
self.DashboardClientsTOTP.RevokeToken(Token)
|
||||
if data.get('TotpKeyVerified') is None:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsTable.update().values({
|
||||
'TotpKeyVerified': 1
|
||||
}).where(
|
||||
self.dashboardClientsTable.c.ClientID == data.get('ClientID')
|
||||
)
|
||||
)
|
||||
|
||||
return True, None
|
||||
|
||||
def SignUp(self, Email, Password, ConfirmPassword) -> tuple[bool, str] or tuple[bool, None]:
|
||||
try:
|
||||
if not all([Email, Password, ConfirmPassword]):
|
||||
return False, "Please fill in all fields"
|
||||
if Password != ConfirmPassword:
|
||||
return False, "Passwords does not match"
|
||||
|
||||
existingClient = self.SignIn_UserExistence(Email)
|
||||
if existingClient:
|
||||
return False, "Email already signed up"
|
||||
|
||||
pwStrength, msg = ValidatePasswordStrength(Password)
|
||||
if not pwStrength:
|
||||
return pwStrength, msg
|
||||
|
||||
with self.engine.begin() as conn:
|
||||
newClientUUID = str(uuid.uuid4())
|
||||
totpKey = pyotp.random_base32()
|
||||
encodePassword = Password.encode('utf-8')
|
||||
conn.execute(
|
||||
self.dashboardClientsTable.insert().values({
|
||||
"ClientID": newClientUUID,
|
||||
"Email": Email,
|
||||
"Password": bcrypt.hashpw(encodePassword, bcrypt.gensalt()).decode("utf-8"),
|
||||
"TotpKey": totpKey
|
||||
})
|
||||
)
|
||||
conn.execute(
|
||||
self.dashboardClientsInfoTable.insert().values({
|
||||
"ClientID": newClientUUID
|
||||
})
|
||||
)
|
||||
self.logger.log(Message=f"User {Email} signed up")
|
||||
self.__getClients()
|
||||
except Exception as e:
|
||||
self.logger.log(Status="false", Message=f"Signed up failed, reason: {str(e)}")
|
||||
return False, "Signe up failed."
|
||||
|
||||
return True, None
|
||||
|
||||
def GetClientAssignedPeers(self, ClientID):
|
||||
return self.DashboardClientsPeerAssignment.GetAssignedPeers(ClientID)
|
||||
|
||||
def ResetClientPassword(self, ClientID, NewPassword, ConfirmNewPassword) -> tuple[bool, str] | tuple[bool, None]:
|
||||
c = self.GetClient(ClientID)
|
||||
if c is None:
|
||||
return False, "Client does not exist"
|
||||
|
||||
if NewPassword != ConfirmNewPassword:
|
||||
return False, "New passwords does not match"
|
||||
|
||||
pwStrength, msg = ValidatePasswordStrength(NewPassword)
|
||||
if not pwStrength:
|
||||
return pwStrength, msg
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsTable.update().values({
|
||||
"TotpKeyVerified": None,
|
||||
"TotpKey": pyotp.random_base32(),
|
||||
"Password": bcrypt.hashpw(NewPassword.encode('utf-8'), bcrypt.gensalt()).decode("utf-8"),
|
||||
}).where(
|
||||
self.dashboardClientsTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
self.logger.log(Message=f"User {ClientID} reset password and TOTP")
|
||||
except Exception as e:
|
||||
self.logger.log(Status="false", Message=f"User {ClientID} reset password failed, reason: {str(e)}")
|
||||
return False, "Reset password failed."
|
||||
|
||||
|
||||
return True, None
|
||||
|
||||
def UpdateClientPassword(self, ClientID, CurrentPassword, NewPassword, ConfirmNewPassword) -> tuple[bool, str] | tuple[bool, None]:
|
||||
c = self.GetClient(ClientID)
|
||||
if c is None:
|
||||
return False, "Client does not exist"
|
||||
|
||||
if not all([CurrentPassword, NewPassword, ConfirmNewPassword]):
|
||||
return False, "Please fill in all fields"
|
||||
|
||||
if not self.SignIn_ValidatePassword(c.get('Email'), CurrentPassword):
|
||||
return False, "Current password does not match"
|
||||
|
||||
if NewPassword != ConfirmNewPassword:
|
||||
return False, "New passwords does not match"
|
||||
|
||||
pwStrength, msg = ValidatePasswordStrength(NewPassword)
|
||||
if not pwStrength:
|
||||
return pwStrength, msg
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsTable.update().values({
|
||||
"Password": bcrypt.hashpw(NewPassword.encode('utf-8'), bcrypt.gensalt()).decode("utf-8"),
|
||||
}).where(
|
||||
self.dashboardClientsTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
self.logger.log(Message=f"User {ClientID} updated password")
|
||||
except Exception as e:
|
||||
self.logger.log(Status="false", Message=f"User {ClientID} update password failed, reason: {str(e)}")
|
||||
return False, "Update password failed."
|
||||
return True, None
|
||||
|
||||
def UpdateClientProfile(self, ClientID, Name):
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsInfoTable.update().values({
|
||||
"Name": Name
|
||||
}).where(
|
||||
self.dashboardClientsInfoTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
self.logger.log(Message=f"User {ClientID} updated name to {Name}")
|
||||
except Exception as e:
|
||||
self.logger.log(Status="false", Message=f"User {ClientID} updated name to {Name} failed")
|
||||
return False
|
||||
return True
|
||||
|
||||
def DeleteClient(self, ClientID):
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
client = self.GetClient(ClientID)
|
||||
if client.get("ClientGroup") == "Local":
|
||||
conn.execute(
|
||||
self.dashboardClientsTable.delete().where(
|
||||
self.dashboardClientsTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
self.dashboardOIDCClientsTable.delete().where(
|
||||
self.dashboardOIDCClientsTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
conn.execute(
|
||||
self.dashboardClientsInfoTable.delete().where(
|
||||
self.dashboardClientsInfoTable.c.ClientID == ClientID
|
||||
)
|
||||
)
|
||||
self.DashboardClientsPeerAssignment.UnassignPeers(ClientID)
|
||||
self.__getClients()
|
||||
except Exception as e:
|
||||
self.logger.log(Status="false", Message=f"Failed to delete {ClientID}")
|
||||
return False
|
||||
return True
|
||||
|
||||
'''
|
||||
For WGDashboard Admin to Manage Clients
|
||||
'''
|
||||
|
||||
def GenerateClientPasswordResetToken(self, ClientID) -> bool | str:
|
||||
c = self.GetClient(ClientID)
|
||||
if c is None:
|
||||
return False
|
||||
|
||||
newToken = str(random.randint(0, 999999)).zfill(6)
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsPasswordResetLinkTable.update().values({
|
||||
"ExpiryDate": datetime.datetime.now()
|
||||
|
||||
}).where(
|
||||
db.and_(
|
||||
self.dashboardClientsPasswordResetLinkTable.c.ClientID == ClientID,
|
||||
self.dashboardClientsPasswordResetLinkTable.c.ExpiryDate > db.func.now()
|
||||
)
|
||||
)
|
||||
)
|
||||
conn.execute(
|
||||
self.dashboardClientsPasswordResetLinkTable.insert().values({
|
||||
"ResetToken": newToken,
|
||||
"ClientID": ClientID,
|
||||
"CreatedDate": datetime.datetime.now(),
|
||||
"ExpiryDate": datetime.datetime.now() + datetime.timedelta(minutes=30)
|
||||
})
|
||||
)
|
||||
|
||||
return newToken
|
||||
|
||||
def ValidateClientPasswordResetToken(self, ClientID, Token):
|
||||
c = self.GetClient(ClientID)
|
||||
if c is None:
|
||||
return False
|
||||
with self.engine.connect() as conn:
|
||||
t = conn.execute(
|
||||
self.dashboardClientsPasswordResetLinkTable.select().where(
|
||||
db.and_(self.dashboardClientsPasswordResetLinkTable.c.ClientID == ClientID,
|
||||
self.dashboardClientsPasswordResetLinkTable.c.ResetToken == Token,
|
||||
self.dashboardClientsPasswordResetLinkTable.c.ExpiryDate > datetime.datetime.now())
|
||||
|
||||
)
|
||||
).mappings().fetchone()
|
||||
return t is not None
|
||||
|
||||
def RevokeClientPasswordResetToken(self, ClientID, Token):
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsPasswordResetLinkTable.update().values({
|
||||
"ExpiryDate": datetime.datetime.now()
|
||||
}).where(
|
||||
db.and_(self.dashboardClientsPasswordResetLinkTable.c.ClientID == ClientID,
|
||||
self.dashboardClientsPasswordResetLinkTable.c.ResetToken == Token)
|
||||
)
|
||||
)
|
||||
return True
|
||||
|
||||
def GetAssignedPeerClients(self, ConfigurationName, PeerID):
|
||||
c = self.DashboardClientsPeerAssignment.GetAssignedClients(ConfigurationName, PeerID)
|
||||
for a in c:
|
||||
client = self.GetClient(a.ClientID)
|
||||
if client is not None:
|
||||
a.Client = self.GetClient(a.ClientID)
|
||||
return c
|
||||
|
||||
def GetClientAssignedPeersGrouped(self, ClientID):
|
||||
client = self.GetClient(ClientID)
|
||||
if client is not None:
|
||||
p = self.DashboardClientsPeerAssignment.GetAssignedPeers(ClientID)
|
||||
configs = set(map(lambda x : x['configuration_name'], p))
|
||||
d = {}
|
||||
for i in configs:
|
||||
d[i] = list(filter(lambda x : x['configuration_name'] == i, p))
|
||||
return d
|
||||
return None
|
||||
|
||||
def AssignClient(self, ConfigurationName, PeerID, ClientID) -> tuple[bool, dict[str, str]] | tuple[bool, None]:
|
||||
return self.DashboardClientsPeerAssignment.AssignClient(ClientID, ConfigurationName, PeerID)
|
||||
|
||||
def UnassignClient(self, AssignmentID):
|
||||
return self.DashboardClientsPeerAssignment.UnassignClients(AssignmentID)
|
||||
|
||||
159
src/modules/DashboardClientsPeerAssignment.py
Normal file
159
src/modules/DashboardClientsPeerAssignment.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from .ConnectionString import ConnectionString
|
||||
from .DashboardLogger import DashboardLogger
|
||||
import sqlalchemy as db
|
||||
from .WireguardConfiguration import WireguardConfiguration
|
||||
|
||||
class Assignment:
|
||||
def __init__(self, **kwargs):
|
||||
self.AssignmentID: str = kwargs.get('AssignmentID')
|
||||
self.ClientID: str = kwargs.get('ClientID')
|
||||
self.ConfigurationName: str = kwargs.get('ConfigurationName')
|
||||
self.PeerID: str = kwargs.get('PeerID')
|
||||
self.AssignedDate: datetime.datetime = kwargs.get('AssignedDate')
|
||||
self.UnassignedDate: datetime.datetime = kwargs.get('UnassignedDate')
|
||||
self.Client: dict = {
|
||||
"ClientID": self.ClientID
|
||||
}
|
||||
|
||||
def toJson(self):
|
||||
return {
|
||||
"AssignmentID": self.AssignmentID,
|
||||
"Client": self.Client,
|
||||
"ConfigurationName": self.ConfigurationName,
|
||||
"PeerID": self.PeerID,
|
||||
"AssignedDate": self.AssignedDate.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"UnassignedDate": self.UnassignedDate.strftime("%Y-%m-%d %H:%M:%S") if self.UnassignedDate is not None else self.UnassignedDate
|
||||
}
|
||||
|
||||
class DashboardClientsPeerAssignment:
|
||||
def __init__(self, wireguardConfigurations: dict[str, WireguardConfiguration]):
|
||||
self.logger = DashboardLogger()
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.wireguardConfigurations = wireguardConfigurations
|
||||
self.dashboardClientsPeerAssignmentTable = db.Table(
|
||||
'DashboardClientsPeerAssignment', self.metadata,
|
||||
db.Column('AssignmentID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('ClientID', db.String(255), nullable=False, index=True),
|
||||
db.Column('ConfigurationName', db.String(255)),
|
||||
db.Column('PeerID', db.String(500)),
|
||||
db.Column('AssignedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('UnassignedDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP)),
|
||||
extend_existing=True
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.assignments: list[Assignment] = []
|
||||
self.__getAssignments()
|
||||
|
||||
def __getAssignments(self):
|
||||
with self.engine.connect() as conn:
|
||||
assignments = []
|
||||
get = conn.execute(
|
||||
self.dashboardClientsPeerAssignmentTable.select().where(
|
||||
self.dashboardClientsPeerAssignmentTable.c.UnassignedDate.is_(None)
|
||||
)
|
||||
).mappings().fetchall()
|
||||
for a in get:
|
||||
assignments.append(Assignment(**a))
|
||||
self.assignments = assignments
|
||||
|
||||
|
||||
def AssignClient(self, ClientID, ConfigurationName, PeerID):
|
||||
existing = list(
|
||||
filter(lambda e:
|
||||
e.ClientID == ClientID and
|
||||
e.ConfigurationName == ConfigurationName and
|
||||
e.PeerID == PeerID, self.assignments)
|
||||
)
|
||||
if len(existing) == 0:
|
||||
if ConfigurationName in self.wireguardConfigurations.keys():
|
||||
config = self.wireguardConfigurations.get(ConfigurationName)
|
||||
peer = list(filter(lambda x : x.id == PeerID, config.Peers))
|
||||
if len(peer) == 1:
|
||||
with self.engine.begin() as conn:
|
||||
data = {
|
||||
"AssignmentID": str(uuid.uuid4()),
|
||||
"ClientID": ClientID,
|
||||
"ConfigurationName": ConfigurationName,
|
||||
"PeerID": PeerID
|
||||
}
|
||||
conn.execute(
|
||||
self.dashboardClientsPeerAssignmentTable.insert().values(data)
|
||||
)
|
||||
self.__getAssignments()
|
||||
return True, data
|
||||
return False, None
|
||||
|
||||
def UnassignClients(self, AssignmentID):
|
||||
existing = list(
|
||||
filter(lambda e:
|
||||
e.AssignmentID == AssignmentID, self.assignments)
|
||||
)
|
||||
if not existing:
|
||||
return False
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsPeerAssignmentTable.update().values({
|
||||
"UnassignedDate": datetime.datetime.now()
|
||||
}).where(
|
||||
self.dashboardClientsPeerAssignmentTable.c.AssignmentID == AssignmentID
|
||||
)
|
||||
)
|
||||
self.__getAssignments()
|
||||
return True
|
||||
|
||||
def UnassignPeers(self, ClientID):
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsPeerAssignmentTable.update().values({
|
||||
"UnassignedDate": datetime.datetime.now()
|
||||
}).where(
|
||||
db.and_(
|
||||
self.dashboardClientsPeerAssignmentTable.c.ClientID == ClientID,
|
||||
self.dashboardClientsPeerAssignmentTable.c.UnassignedDate.is_(db.null())
|
||||
)
|
||||
)
|
||||
)
|
||||
self.__getAssignments()
|
||||
return True
|
||||
|
||||
def GetAssignedClients(self, ConfigurationName, PeerID) -> list[Assignment]:
|
||||
self.__getAssignments()
|
||||
return list(filter(
|
||||
lambda c : c.ConfigurationName == ConfigurationName and
|
||||
c.PeerID == PeerID, self.assignments))
|
||||
|
||||
def GetAssignedPeers(self, ClientID):
|
||||
self.__getAssignments()
|
||||
|
||||
peers = []
|
||||
assigned = filter(lambda e:
|
||||
e.ClientID == ClientID, self.assignments)
|
||||
|
||||
for a in assigned:
|
||||
peer = filter(lambda e : e.id == a.PeerID,
|
||||
self.wireguardConfigurations[a.ConfigurationName].Peers)
|
||||
for p in peer:
|
||||
peers.append({
|
||||
'assignment_id': a.AssignmentID,
|
||||
'protocol': self.wireguardConfigurations[a.ConfigurationName].Protocol,
|
||||
'id': p.id,
|
||||
'private_key': p.private_key,
|
||||
'name': p.name,
|
||||
'received_data': p.total_receive + p.cumu_receive,
|
||||
'sent_data': p.total_sent + p.cumu_sent,
|
||||
'data': p.total_data + p.cumu_data,
|
||||
'status': p.status,
|
||||
'latest_handshake': p.latest_handshake,
|
||||
'allowed_ip': p.allowed_ip,
|
||||
'jobs': p.jobs,
|
||||
'configuration_name': a.ConfigurationName,
|
||||
'peer_configuration_data': p.downloadPeer()
|
||||
})
|
||||
return peers
|
||||
82
src/modules/DashboardClientsTOTP.py
Normal file
82
src/modules/DashboardClientsTOTP.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import datetime
|
||||
import hashlib
|
||||
import uuid
|
||||
|
||||
import sqlalchemy as db
|
||||
from .ConnectionString import ConnectionString
|
||||
|
||||
|
||||
class DashboardClientsTOTP:
|
||||
def __init__(self):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.dashboardClientsTOTPTable = db.Table(
|
||||
'DashboardClientsTOTPTokens', self.metadata,
|
||||
db.Column("Token", db.String(500), primary_key=True, index=True),
|
||||
db.Column("ClientID", db.String(500), index=True),
|
||||
db.Column(
|
||||
"ExpireTime", (db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP)
|
||||
)
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.metadata.reflect(self.engine)
|
||||
self.dashboardClientsTable = self.metadata.tables['DashboardClients']
|
||||
|
||||
def GenerateToken(self, ClientID) -> str:
|
||||
token = hashlib.sha512(f"{ClientID}_{datetime.datetime.now()}_{uuid.uuid4()}".encode()).hexdigest()
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsTOTPTable.update().values({
|
||||
"ExpireTime": datetime.datetime.now()
|
||||
}).where(
|
||||
db.and_(self.dashboardClientsTOTPTable.c.ClientID == ClientID, self.dashboardClientsTOTPTable.c.ExpireTime > datetime.datetime.now())
|
||||
)
|
||||
)
|
||||
conn.execute(
|
||||
self.dashboardClientsTOTPTable.insert().values({
|
||||
"Token": token,
|
||||
"ClientID": ClientID,
|
||||
"ExpireTime": datetime.datetime.now() + datetime.timedelta(minutes=10)
|
||||
})
|
||||
)
|
||||
return token
|
||||
|
||||
def RevokeToken(self, Token) -> bool:
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardClientsTOTPTable.update().values({
|
||||
"ExpireTime": datetime.datetime.now()
|
||||
}).where(
|
||||
self.dashboardClientsTOTPTable.c.Token == Token
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
return False
|
||||
return True
|
||||
|
||||
def GetTotp(self, token: str) -> tuple[bool, dict] or tuple[bool, None]:
|
||||
with self.engine.connect() as conn:
|
||||
totp = conn.execute(
|
||||
db.select(
|
||||
self.dashboardClientsTable.c.ClientID,
|
||||
self.dashboardClientsTable.c.Email,
|
||||
self.dashboardClientsTable.c.TotpKey,
|
||||
self.dashboardClientsTable.c.TotpKeyVerified,
|
||||
).select_from(
|
||||
self.dashboardClientsTOTPTable
|
||||
).where(
|
||||
db.and_(
|
||||
self.dashboardClientsTOTPTable.c.Token == token,
|
||||
self.dashboardClientsTOTPTable.c.ExpireTime > datetime.datetime.now()
|
||||
)
|
||||
).join(
|
||||
self.dashboardClientsTable,
|
||||
self.dashboardClientsTOTPTable.c.ClientID == self.dashboardClientsTable.c.ClientID
|
||||
)
|
||||
).mappings().fetchone()
|
||||
if totp:
|
||||
return True, dict(totp)
|
||||
return False, None
|
||||
|
||||
|
||||
285
src/modules/DashboardConfig.py
Normal file
285
src/modules/DashboardConfig.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""
|
||||
Dashboard Configuration
|
||||
"""
|
||||
import configparser, secrets, os, pyotp, ipaddress, bcrypt
|
||||
from sqlalchemy_utils import database_exists, create_database
|
||||
import sqlalchemy as db
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from flask import current_app
|
||||
from .ConnectionString import ConnectionString
|
||||
from .Utilities import (
|
||||
GetRemoteEndpoint, ValidateDNSAddress
|
||||
)
|
||||
from .DashboardAPIKey import DashboardAPIKey
|
||||
|
||||
|
||||
|
||||
class DashboardConfig:
|
||||
DashboardVersion = 'v4.3.0.1'
|
||||
ConfigurationPath = os.getenv('CONFIGURATION_PATH', '.')
|
||||
ConfigurationFilePath = os.path.join(ConfigurationPath, 'wg-dashboard.ini')
|
||||
|
||||
def __init__(self):
|
||||
if not os.path.exists(DashboardConfig.ConfigurationFilePath):
|
||||
open(DashboardConfig.ConfigurationFilePath, "x")
|
||||
self.__config = configparser.RawConfigParser(strict=False)
|
||||
self.__config.read_file(open(DashboardConfig.ConfigurationFilePath, "r+"))
|
||||
self.hiddenAttribute = ["totp_key", "auth_req"]
|
||||
self.__default = {
|
||||
"Account": {
|
||||
"username": "admin",
|
||||
"password": "admin",
|
||||
"enable_totp": "false",
|
||||
"totp_verified": "false",
|
||||
"totp_key": pyotp.random_base32()
|
||||
},
|
||||
"Server": {
|
||||
"wg_conf_path": "/etc/wireguard",
|
||||
"awg_conf_path": "/etc/amnezia/amneziawg",
|
||||
"app_prefix": "",
|
||||
"app_ip": "0.0.0.0",
|
||||
"app_port": "10086",
|
||||
"auth_req": "true",
|
||||
"version": DashboardConfig.DashboardVersion,
|
||||
"dashboard_refresh_interval": "60000",
|
||||
"dashboard_peer_list_display": "grid",
|
||||
"dashboard_sort": "status",
|
||||
"dashboard_theme": "dark",
|
||||
"dashboard_api_key": "false",
|
||||
"dashboard_language": "en-US"
|
||||
},
|
||||
"Peers": {
|
||||
"peer_global_DNS": "1.1.1.1",
|
||||
"peer_endpoint_allowed_ip": "0.0.0.0/0",
|
||||
"peer_display_mode": "grid",
|
||||
"remote_endpoint": GetRemoteEndpoint(),
|
||||
"peer_MTU": "1420",
|
||||
"peer_keep_alive": "21"
|
||||
},
|
||||
"Other": {
|
||||
"welcome_session": "true"
|
||||
},
|
||||
"Database":{
|
||||
"type": "sqlite",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"username": "",
|
||||
"password": ""
|
||||
},
|
||||
"Email":{
|
||||
"server": "",
|
||||
"port": "",
|
||||
"encryption": "",
|
||||
"username": "",
|
||||
"email_password": "",
|
||||
"authentication_required": "true",
|
||||
"send_from": "",
|
||||
"email_template": ""
|
||||
},
|
||||
"OIDC": {
|
||||
"admin_enable": "false",
|
||||
"client_enable": "false"
|
||||
},
|
||||
"Clients": {
|
||||
"enable": "true",
|
||||
},
|
||||
"WireGuardConfiguration": {
|
||||
"autostart": ""
|
||||
}
|
||||
}
|
||||
|
||||
for section, keys in self.__default.items():
|
||||
for key, value in keys.items():
|
||||
exist, currentData = self.GetConfig(section, key)
|
||||
if not exist:
|
||||
self.SetConfig(section, key, value, True)
|
||||
|
||||
self.engine = db.create_engine(ConnectionString('wgdashboard'))
|
||||
self.dbMetadata = db.MetaData()
|
||||
self.__createAPIKeyTable()
|
||||
self.DashboardAPIKeys = self.__getAPIKeys()
|
||||
self.APIAccessed = False
|
||||
self.SetConfig("Server", "version", DashboardConfig.DashboardVersion)
|
||||
|
||||
def getConnectionString(self, database) -> str or None:
|
||||
sqlitePath = os.path.join(DashboardConfig.ConfigurationPath, "db")
|
||||
|
||||
if not os.path.isdir(sqlitePath):
|
||||
os.mkdir(sqlitePath)
|
||||
|
||||
if self.GetConfig("Database", "type")[1] == "postgresql":
|
||||
cn = f'postgresql+psycopg2://{self.GetConfig("Database", "username")[1]}:{self.GetConfig("Database", "password")[1]}@{self.GetConfig("Database", "host")[1]}/{database}'
|
||||
elif self.GetConfig("Database", "type")[1] == "mysql":
|
||||
cn = f'mysql+mysqldb://{self.GetConfig("Database", "username")[1]}:{self.GetConfig("Database", "password")[1]}@{self.GetConfig("Database", "host")[1]}/{database}'
|
||||
else:
|
||||
cn = f'sqlite:///{os.path.join(sqlitePath, f"{database}.db")}'
|
||||
if not database_exists(cn):
|
||||
create_database(cn)
|
||||
return cn
|
||||
|
||||
def __createAPIKeyTable(self):
|
||||
self.apiKeyTable = db.Table('DashboardAPIKeys', self.dbMetadata,
|
||||
db.Column("Key", db.String(255), nullable=False, primary_key=True),
|
||||
db.Column("CreatedAt",
|
||||
(db.DATETIME if self.GetConfig('Database', 'type')[1] == 'sqlite' else db.TIMESTAMP),
|
||||
server_default=db.func.now()
|
||||
),
|
||||
db.Column("ExpiredAt",
|
||||
(db.DATETIME if self.GetConfig('Database', 'type')[1] == 'sqlite' else db.TIMESTAMP)
|
||||
)
|
||||
)
|
||||
self.dbMetadata.create_all(self.engine)
|
||||
def __getAPIKeys(self) -> list[DashboardAPIKey]:
|
||||
try:
|
||||
with self.engine.connect() as conn:
|
||||
keys = conn.execute(self.apiKeyTable.select().where(
|
||||
db.or_(self.apiKeyTable.columns.ExpiredAt.is_(None), self.apiKeyTable.columns.ExpiredAt > datetime.now())
|
||||
)).fetchall()
|
||||
fKeys = []
|
||||
for k in keys:
|
||||
fKeys.append(DashboardAPIKey(k[0], k[1].strftime("%Y-%m-%d %H:%M:%S"), (k[2].strftime("%Y-%m-%d %H:%M:%S") if k[2] else None)))
|
||||
return fKeys
|
||||
except Exception as e:
|
||||
current_app.logger.error("API Keys error", e)
|
||||
return []
|
||||
|
||||
def createAPIKeys(self, ExpiredAt = None):
|
||||
newKey = secrets.token_urlsafe(32)
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.apiKeyTable.insert().values({
|
||||
"Key": newKey,
|
||||
"ExpiredAt": ExpiredAt
|
||||
})
|
||||
)
|
||||
|
||||
self.DashboardAPIKeys = self.__getAPIKeys()
|
||||
|
||||
def deleteAPIKey(self, key):
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.apiKeyTable.update().values({
|
||||
"ExpiredAt": datetime.now(),
|
||||
}).where(self.apiKeyTable.columns.Key == key)
|
||||
)
|
||||
|
||||
self.DashboardAPIKeys = self.__getAPIKeys()
|
||||
|
||||
def __configValidation(self, section : str, key: str, value: Any) -> tuple[bool, str]:
|
||||
if (type(value) is str and len(value) == 0
|
||||
and section not in ['Email', 'WireGuardConfiguration'] and
|
||||
(section == 'Peer' and key == 'peer_global_dns')):
|
||||
return False, "Field cannot be empty!"
|
||||
if section == "Peers" and key == "peer_global_dns" and len(value) > 0:
|
||||
return ValidateDNSAddress(value)
|
||||
if section == "Peers" and key == "peer_endpoint_allowed_ip":
|
||||
value = value.split(",")
|
||||
for i in value:
|
||||
i = i.strip()
|
||||
try:
|
||||
ipaddress.ip_network(i, strict=False)
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
if section == "Server" and key == "wg_conf_path":
|
||||
if not os.path.exists(value):
|
||||
return False, f"{value} is not a valid path"
|
||||
if section == "Account" and key == "password":
|
||||
if self.GetConfig("Account", "password")[0]:
|
||||
if not self.__checkPassword(
|
||||
value["currentPassword"], self.GetConfig("Account", "password")[1].encode("utf-8")):
|
||||
return False, "Current password does not match."
|
||||
if value["newPassword"] != value["repeatNewPassword"]:
|
||||
return False, "New passwords does not match"
|
||||
return True, ""
|
||||
|
||||
def generatePassword(self, plainTextPassword: str):
|
||||
return bcrypt.hashpw(plainTextPassword.encode("utf-8"), bcrypt.gensalt())
|
||||
|
||||
def __checkPassword(self, plainTextPassword: str, hashedPassword: bytes):
|
||||
return bcrypt.checkpw(plainTextPassword.encode("utf-8"), hashedPassword)
|
||||
|
||||
def SetConfig(self, section: str, key: str, value: str | bool | list[str] | dict[str, str], init: bool = False) -> tuple[bool, str] | tuple[bool, None]:
|
||||
if key in self.hiddenAttribute and not init:
|
||||
return False, None
|
||||
|
||||
if not init:
|
||||
valid, msg = self.__configValidation(section, key, value)
|
||||
if not valid:
|
||||
return False, msg
|
||||
|
||||
if section == "Account" and key == "password":
|
||||
if not init:
|
||||
value = self.generatePassword(value["newPassword"]).decode("utf-8")
|
||||
else:
|
||||
value = self.generatePassword(value).decode("utf-8")
|
||||
|
||||
if section == "Email" and key == "email_template":
|
||||
value = value.encode('unicode_escape').decode('utf-8')
|
||||
|
||||
if section == "Server" and key == "wg_conf_path":
|
||||
if not os.path.exists(value):
|
||||
return False, "Path does not exist"
|
||||
|
||||
if section not in self.__config:
|
||||
if init:
|
||||
self.__config[section] = {}
|
||||
else:
|
||||
return False, "Section does not exist"
|
||||
|
||||
if ((key not in self.__config[section].keys() and init) or
|
||||
(key in self.__config[section].keys())):
|
||||
if type(value) is bool:
|
||||
if value:
|
||||
self.__config[section][key] = "true"
|
||||
else:
|
||||
self.__config[section][key] = "false"
|
||||
elif type(value) in [int, float]:
|
||||
self.__config[section][key] = str(value)
|
||||
elif type(value) is list:
|
||||
self.__config[section][key] = "||".join(value).strip("||")
|
||||
else:
|
||||
self.__config[section][key] = fr"{value}"
|
||||
return self.SaveConfig(), ""
|
||||
else:
|
||||
return False, f"{key} does not exist under {section}"
|
||||
|
||||
def SaveConfig(self) -> bool:
|
||||
try:
|
||||
with open(DashboardConfig.ConfigurationFilePath, "w+", encoding='utf-8') as configFile:
|
||||
self.__config.write(configFile)
|
||||
return True
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def GetConfig(self, section, key) ->tuple[bool, bool] | tuple[bool, str] | tuple[bool, list[str]] | tuple[bool, None]:
|
||||
if section not in self.__config:
|
||||
return False, None
|
||||
|
||||
if key not in self.__config[section]:
|
||||
return False, None
|
||||
|
||||
if section == "Email" and key == "email_template":
|
||||
return True, self.__config[section][key].encode('utf-8').decode('unicode_escape')
|
||||
|
||||
if section == "WireGuardConfiguration" and key == "autostart":
|
||||
return True, list(filter(lambda x: len(x) > 0, self.__config[section][key].split("||")))
|
||||
|
||||
if self.__config[section][key] in ["1", "yes", "true", "on"]:
|
||||
return True, True
|
||||
|
||||
if self.__config[section][key] in ["0", "no", "false", "off"]:
|
||||
return True, False
|
||||
|
||||
|
||||
return True, self.__config[section][key]
|
||||
|
||||
def toJson(self) -> dict[str, dict[Any, Any]]:
|
||||
the_dict = {}
|
||||
|
||||
for section in self.__config.sections():
|
||||
the_dict[section] = {}
|
||||
for key, val in self.__config.items(section):
|
||||
if key not in self.hiddenAttribute:
|
||||
the_dict[section][key] = self.GetConfig(section, key)[1]
|
||||
return the_dict
|
||||
44
src/modules/DashboardLogger.py
Normal file
44
src/modules/DashboardLogger.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
Dashboard Logger Class
|
||||
"""
|
||||
import uuid
|
||||
import sqlalchemy as db
|
||||
from flask import current_app
|
||||
from .ConnectionString import ConnectionString
|
||||
|
||||
|
||||
class DashboardLogger:
|
||||
def __init__(self):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard_log"))
|
||||
self.metadata = db.MetaData()
|
||||
self.dashboardLoggerTable = db.Table('DashboardLog', self.metadata,
|
||||
|
||||
db.Column('LogID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('LogDate',
|
||||
(db.DATETIME if 'sqlite:///' in ConnectionString("wgdashboard") else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('URL', db.String(255)),
|
||||
db.Column('IP', db.String(255)),
|
||||
|
||||
db.Column('Status', db.String(255), nullable=False),
|
||||
db.Column('Message', db.Text), extend_existing=True,
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.log(Message="WGDashboard started")
|
||||
|
||||
def log(self, URL: str = "", IP: str = "", Status: str = "true", Message: str = "") -> bool:
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.dashboardLoggerTable.insert().values(
|
||||
LogID=str(uuid.uuid4()),
|
||||
URL=URL,
|
||||
IP=IP,
|
||||
Status=Status,
|
||||
Message=Message
|
||||
)
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Access Log Error", e)
|
||||
return False
|
||||
142
src/modules/DashboardOIDC.py
Normal file
142
src/modules/DashboardOIDC.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
from jose import jwt
|
||||
import certifi
|
||||
from flask import current_app
|
||||
|
||||
class DashboardOIDC:
|
||||
ConfigurationPath = os.getenv('CONFIGURATION_PATH', '.')
|
||||
ConfigurationFilePath = os.path.join(ConfigurationPath, 'wg-dashboard-oidc-providers.json')
|
||||
def __init__(self, mode):
|
||||
self.mode = mode
|
||||
self.providers: dict[str, dict] = {}
|
||||
self.provider_secret: dict[str, str] = {}
|
||||
self.__default = {
|
||||
"Admin": {
|
||||
'Provider': {
|
||||
'client_id': '',
|
||||
'client_secret': '',
|
||||
'issuer': '',
|
||||
},
|
||||
},
|
||||
"Client": {
|
||||
'Provider': {
|
||||
'client_id': '',
|
||||
'client_secret': '',
|
||||
'issuer': '',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if not os.path.exists(DashboardOIDC.ConfigurationFilePath):
|
||||
with open(DashboardOIDC.ConfigurationFilePath, "w+") as f:
|
||||
encoder = json.JSONEncoder(indent=4)
|
||||
f.write(encoder.encode(self.__default))
|
||||
|
||||
self.ReadFile()
|
||||
|
||||
def GetProviders(self):
|
||||
return self.providers
|
||||
|
||||
def GetProviderNameByIssuer(self, issuer):
|
||||
for (key, val) in self.providers.items():
|
||||
if val.get('openid_configuration').get('issuer') == issuer:
|
||||
return key
|
||||
return issuer
|
||||
|
||||
def VerifyToken(self, provider, code, redirect_uri):
|
||||
try:
|
||||
if not all([provider, code, redirect_uri]):
|
||||
return False, "Please provide all parameters"
|
||||
|
||||
if provider not in self.providers.keys():
|
||||
return False, "Provider does not exist"
|
||||
|
||||
secrete = self.provider_secret.get(provider)
|
||||
oidc_config_status, oidc_config = self.GetProviderConfiguration(provider)
|
||||
provider_info = self.providers.get(provider)
|
||||
|
||||
|
||||
data = {
|
||||
"grant_type": "authorization_code",
|
||||
"code": code,
|
||||
"redirect_uri": redirect_uri,
|
||||
"client_id": provider_info.get('client_id'),
|
||||
"client_secret": secrete
|
||||
}
|
||||
|
||||
try:
|
||||
tokens = requests.post(oidc_config.get('token_endpoint'), data=data).json()
|
||||
if not all([tokens.get('access_token'), tokens.get('id_token')]):
|
||||
return False, tokens.get('error_description', None)
|
||||
except Exception as e:
|
||||
current_app.logger.error("Verify token failed", e)
|
||||
return False, str(e)
|
||||
|
||||
access_token = tokens.get('access_token')
|
||||
id_token = tokens.get('id_token')
|
||||
jwks_uri = oidc_config.get("jwks_uri")
|
||||
issuer = oidc_config.get("issuer")
|
||||
jwks = requests.get(jwks_uri, verify=certifi.where()).json()
|
||||
|
||||
headers = jwt.get_unverified_header(id_token)
|
||||
kid = headers["kid"]
|
||||
|
||||
key = next(k for k in jwks["keys"] if k["kid"] == kid)
|
||||
|
||||
payload = jwt.decode(
|
||||
id_token,
|
||||
key,
|
||||
algorithms=[key["alg"]],
|
||||
audience=provider_info.get('client_id'),
|
||||
issuer=issuer,
|
||||
access_token=access_token
|
||||
)
|
||||
print(payload)
|
||||
return True, payload
|
||||
except Exception as e:
|
||||
current_app.logger.error('Read OIDC file failed. Reason: ' + str(e), provider, code, redirect_uri)
|
||||
return False, str(e)
|
||||
|
||||
def GetProviderConfiguration(self, provider_name):
|
||||
if not all([provider_name]):
|
||||
return False, None
|
||||
provider = self.providers.get(provider_name)
|
||||
try:
|
||||
oidc_config = requests.get(
|
||||
f"{provider.get('issuer').strip('/')}/.well-known/openid-configuration",
|
||||
verify=certifi.where()
|
||||
).json()
|
||||
except Exception as e:
|
||||
current_app.logger.error("Failed to get OpenID Configuration of " + provider.get('issuer'), exc_info=e)
|
||||
return False, None
|
||||
return True, oidc_config
|
||||
|
||||
def ReadFile(self):
|
||||
decoder = json.JSONDecoder()
|
||||
try:
|
||||
providers = decoder.decode(
|
||||
open(DashboardOIDC.ConfigurationFilePath, 'r').read()
|
||||
)
|
||||
providers = providers[self.mode]
|
||||
for k in providers.keys():
|
||||
if all([providers[k]['client_id'], providers[k]['client_secret'], providers[k]['issuer']]):
|
||||
try:
|
||||
oidc_config = requests.get(
|
||||
f"{providers[k]['issuer'].strip('/')}/.well-known/openid-configuration",
|
||||
timeout=3,
|
||||
verify=certifi.where()
|
||||
).json()
|
||||
self.providers[k] = {
|
||||
'client_id': providers[k]['client_id'],
|
||||
'issuer': providers[k]['issuer'].strip('/'),
|
||||
'openid_configuration': oidc_config
|
||||
}
|
||||
self.provider_secret[k] = providers[k]['client_secret']
|
||||
current_app.logger.info(f"Registered OIDC Provider: {k}")
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Failed to register OIDC config for {k}", exc_info=e)
|
||||
except Exception as e:
|
||||
current_app.logger.error('Read OIDC file failed. Reason: ' + str(e))
|
||||
return False
|
||||
117
src/modules/DashboardPlugins.py
Normal file
117
src/modules/DashboardPlugins.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import os
|
||||
import sys
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
from typing import Dict, Callable, List, Optional
|
||||
import threading
|
||||
|
||||
|
||||
class DashboardPlugins:
|
||||
|
||||
def __init__(self, app, WireguardConfigurations, directory: str = 'plugins'):
|
||||
self.directory = Path('plugins')
|
||||
self.loadedPlugins: dict[str, Callable] = {}
|
||||
self.errorPlugins: List[str] = []
|
||||
self.logger = app.logger
|
||||
self.WireguardConfigurations = WireguardConfigurations
|
||||
|
||||
def startThreads(self):
|
||||
self.loadAllPlugins()
|
||||
self.executeAllPlugins()
|
||||
|
||||
def preparePlugins(self) -> list[Path]:
|
||||
|
||||
readyPlugins = []
|
||||
|
||||
if not self.directory.exists():
|
||||
os.mkdir(self.directory)
|
||||
return []
|
||||
|
||||
for plugin in self.directory.iterdir():
|
||||
if plugin.is_dir():
|
||||
codeFile = plugin / "main.py"
|
||||
if codeFile.exists():
|
||||
self.logger.info(f"Prepared plugin: {plugin.name}")
|
||||
readyPlugins.append(plugin)
|
||||
|
||||
return readyPlugins
|
||||
|
||||
def loadPlugin(self, path: Path) -> Optional[Callable]:
|
||||
pluginName = path.name
|
||||
codeFile = path / "main.py"
|
||||
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
f"WGDashboardPlugin_{pluginName}",
|
||||
codeFile
|
||||
)
|
||||
|
||||
if spec is None or spec.loader is None:
|
||||
raise ImportError(f"Failed to create spec for {pluginName}")
|
||||
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
|
||||
plugin_dir_str = str(path)
|
||||
if plugin_dir_str not in sys.path:
|
||||
sys.path.insert(0, plugin_dir_str)
|
||||
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
finally:
|
||||
if plugin_dir_str in sys.path:
|
||||
sys.path.remove(plugin_dir_str)
|
||||
|
||||
if hasattr(module, 'main'):
|
||||
main_func = getattr(module, 'main')
|
||||
if callable(main_func):
|
||||
self.logger.info(f"Successfully loaded plugin [{pluginName}]")
|
||||
return main_func
|
||||
else:
|
||||
raise AttributeError(f"'main' in {pluginName} is not callable")
|
||||
else:
|
||||
raise AttributeError(f"Plugin {pluginName} does not have a 'main' function")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to load the plugin [{pluginName}]. Reason: {str(e)}")
|
||||
self.errorPlugins.append(pluginName)
|
||||
return None
|
||||
|
||||
def loadAllPlugins(self):
|
||||
self.loadedPlugins.clear()
|
||||
self.errorPlugins.clear()
|
||||
|
||||
preparedPlugins = self.preparePlugins()
|
||||
|
||||
for plugin in preparedPlugins:
|
||||
pluginName = plugin.name
|
||||
mainFunction = self.loadPlugin(plugin)
|
||||
|
||||
if mainFunction:
|
||||
self.loadedPlugins[pluginName] = mainFunction
|
||||
if self.errorPlugins:
|
||||
self.logger.warning(f"Failed to load {len(self.errorPlugins)} plugin(s): {self.errorPlugins}")
|
||||
|
||||
def executePlugin(self, pluginName: str):
|
||||
if pluginName not in self.loadedPlugins.keys():
|
||||
self.logger.error(f"Failed to execute plugin [{pluginName}]. Reason: Not loaded")
|
||||
return False
|
||||
|
||||
plugin = self.loadedPlugins.get(pluginName)
|
||||
|
||||
try:
|
||||
t = threading.Thread(target=plugin, args=(self.WireguardConfigurations,), daemon=True)
|
||||
t.name = f'WGDashboardPlugin_{pluginName}'
|
||||
t.start()
|
||||
|
||||
if t.is_alive():
|
||||
self.logger.info(f"Execute plugin [{pluginName}] success. PID: {t.native_id}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to execute plugin [{pluginName}]. Reason: {str(e)}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def executeAllPlugins(self):
|
||||
for plugin in self.loadedPlugins.keys():
|
||||
self.executePlugin(plugin)
|
||||
287
src/modules/DashboardWebHooks.py
Normal file
287
src/modules/DashboardWebHooks.py
Normal file
@@ -0,0 +1,287 @@
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import urllib.parse
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import requests
|
||||
from pydantic import BaseModel, field_serializer
|
||||
import sqlalchemy as db
|
||||
from .ConnectionString import ConnectionString
|
||||
from flask import current_app
|
||||
|
||||
WebHookActions = ['peer_created', 'peer_deleted', 'peer_updated']
|
||||
class WebHook(BaseModel):
|
||||
WebHookID: str = ''
|
||||
PayloadURL: str = ''
|
||||
ContentType: str = 'application/json'
|
||||
Headers: dict[str, dict[str, str]] = {}
|
||||
VerifySSL: bool = True
|
||||
SubscribedActions: list[str] = WebHookActions
|
||||
IsActive: bool = True
|
||||
CreationDate: datetime = ''
|
||||
Notes: str = ''
|
||||
|
||||
class WebHookSessionLog(BaseModel):
|
||||
LogTime: datetime
|
||||
Status: int
|
||||
Message: str = ''
|
||||
|
||||
@field_serializer('LogTime')
|
||||
def logTimeSerializer(self, LogTime: datetime):
|
||||
return LogTime.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
class WebHookSessionLogs(BaseModel):
|
||||
Logs: list[WebHookSessionLog] = []
|
||||
|
||||
def addLog(self, status: int, message: str):
|
||||
self.Logs.append(WebHookSessionLog(LogTime=datetime.now(), Status=status, Message=message))
|
||||
|
||||
class DashboardWebHooks:
|
||||
def __init__(self, DashboardConfig):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.webHooksTable = db.Table(
|
||||
'DashboardWebHooks', self.metadata,
|
||||
db.Column('WebHookID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('PayloadURL', db.Text, nullable=False),
|
||||
db.Column('ContentType', db.String(255), nullable=False),
|
||||
db.Column('Headers', db.JSON),
|
||||
db.Column('VerifySSL', db.Boolean, nullable=False),
|
||||
db.Column('SubscribedActions', db.JSON),
|
||||
db.Column('IsActive', db.Boolean, nullable=False),
|
||||
db.Column('CreationDate',
|
||||
(db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP),
|
||||
server_default=db.func.now(),
|
||||
nullable=False),
|
||||
db.Column('Notes', db.Text),
|
||||
extend_existing=True
|
||||
)
|
||||
self.webHookSessionsTable = db.Table(
|
||||
'DashboardWebHookSessions', self.metadata,
|
||||
db.Column('WebHookSessionID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('WebHookID', db.String(255), nullable=False),
|
||||
db.Column('StartDate',
|
||||
(db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP),
|
||||
server_default=db.func.now(),
|
||||
nullable=False
|
||||
),
|
||||
db.Column('EndDate',
|
||||
(db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP),
|
||||
),
|
||||
db.Column('Data', db.JSON),
|
||||
db.Column('Status', db.INTEGER),
|
||||
db.Column('Logs', db.JSON)
|
||||
)
|
||||
|
||||
self.metadata.create_all(self.engine)
|
||||
self.WebHooks: list[WebHook] = []
|
||||
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.webHookSessionsTable.update().values({
|
||||
"EndDate": datetime.now(),
|
||||
"Status": 2
|
||||
}).where(
|
||||
self.webHookSessionsTable.c.Status == -1
|
||||
)
|
||||
)
|
||||
|
||||
self.__getWebHooks()
|
||||
|
||||
def __getWebHooks(self):
|
||||
with self.engine.connect() as conn:
|
||||
webhooks = conn.execute(
|
||||
self.webHooksTable.select().order_by(
|
||||
self.webHooksTable.c.CreationDate
|
||||
)
|
||||
).mappings().fetchall()
|
||||
self.WebHooks.clear()
|
||||
self.WebHooks = [WebHook(**webhook) for webhook in webhooks]
|
||||
|
||||
def GetWebHooks(self):
|
||||
self.__getWebHooks()
|
||||
return list(map(lambda x : x.model_dump(), self.WebHooks))
|
||||
|
||||
def GetWebHookSessions(self, webHook: WebHook):
|
||||
with self.engine.connect() as conn:
|
||||
sessions = conn.execute(
|
||||
self.webHookSessionsTable.select().where(
|
||||
self.webHookSessionsTable.c.WebHookID == webHook.WebHookID
|
||||
).order_by(
|
||||
db.desc(self.webHookSessionsTable.c.StartDate)
|
||||
)
|
||||
).mappings().fetchall()
|
||||
return sessions
|
||||
|
||||
def CreateWebHook(self) -> WebHook:
|
||||
return WebHook(WebHookID=str(uuid.uuid4()))
|
||||
|
||||
def SearchWebHook(self, webHook: WebHook) -> WebHook | None:
|
||||
try:
|
||||
first = next(filter(lambda x : x.WebHookID == webHook.WebHookID, self.WebHooks))
|
||||
except StopIteration:
|
||||
return None
|
||||
return first
|
||||
|
||||
def SearchWebHookByID(self, webHookID: str) -> WebHook | None:
|
||||
try:
|
||||
first = next(filter(lambda x : x.WebHookID == webHookID, self.WebHooks))
|
||||
except StopIteration:
|
||||
return None
|
||||
return first
|
||||
|
||||
def UpdateWebHook(self, webHook: dict[str, str]) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
webHook = WebHook(**webHook)
|
||||
|
||||
if len(webHook.PayloadURL) == 0:
|
||||
return False, "Payload URL cannot be empty"
|
||||
|
||||
if len(webHook.ContentType) == 0 or webHook.ContentType not in [
|
||||
'application/json', 'application/x-www-form-urlencoded'
|
||||
]:
|
||||
return False, "Content Type is invalid"
|
||||
|
||||
|
||||
with self.engine.begin() as conn:
|
||||
if self.SearchWebHook(webHook):
|
||||
conn.execute(
|
||||
self.webHooksTable.update().values(
|
||||
webHook.model_dump(exclude={'WebHookID'})
|
||||
).where(
|
||||
self.webHooksTable.c.WebHookID == webHook.WebHookID
|
||||
)
|
||||
)
|
||||
else:
|
||||
webHook.CreationDate = datetime.now()
|
||||
conn.execute(
|
||||
self.webHooksTable.insert().values(
|
||||
webHook.model_dump()
|
||||
)
|
||||
)
|
||||
self.__getWebHooks()
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
return True, None
|
||||
|
||||
def DeleteWebHook(self, webHook) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
webHook = WebHook(**webHook)
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.webHooksTable.delete().where(
|
||||
self.webHooksTable.c.WebHookID == webHook.WebHookID
|
||||
)
|
||||
)
|
||||
self.__getWebHooks()
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
return True, None
|
||||
|
||||
def RunWebHook(self, action: str, data):
|
||||
try:
|
||||
if action not in WebHookActions:
|
||||
return False
|
||||
self.__getWebHooks()
|
||||
subscribedWebHooks = filter(lambda webhook: action in webhook.SubscribedActions and webhook.IsActive,
|
||||
self.WebHooks)
|
||||
data['action'] = action
|
||||
for i in subscribedWebHooks:
|
||||
try:
|
||||
ws = WebHookSession(i, data)
|
||||
t = threading.Thread(target=ws.Execute, daemon=True)
|
||||
t.start()
|
||||
current_app.logger.info(f"Requesting {i.PayloadURL}")
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Requesting {i.PayloadURL} error", e)
|
||||
except Exception as e:
|
||||
current_app.logger.error("Error when running WebHook")
|
||||
|
||||
class WebHookSession:
|
||||
def __init__(self, webHook: WebHook, data: dict[str, str]):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.webHookSessionsTable = db.Table('DashboardWebHookSessions', self.metadata, autoload_with=self.engine)
|
||||
self.webHook = webHook
|
||||
self.sessionID = str(uuid.uuid4())
|
||||
self.webHookSessionLogs: WebHookSessionLogs = WebHookSessionLogs()
|
||||
self.time = datetime.now()
|
||||
data['time'] = self.time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
data['webhook_id'] = webHook.WebHookID
|
||||
data['webhook_session'] = self.sessionID
|
||||
self.data = data
|
||||
self.Prepare()
|
||||
|
||||
def Prepare(self):
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.webHookSessionsTable.insert().values({
|
||||
"WebHookSessionID": self.sessionID,
|
||||
"WebHookID": self.webHook.WebHookID,
|
||||
"Data": self.data,
|
||||
"StartDate": self.time,
|
||||
"Status": -1,
|
||||
"Logs": self.webHookSessionLogs.model_dump()
|
||||
})
|
||||
)
|
||||
self.UpdateSessionLog(-1, "Preparing webhook session")
|
||||
|
||||
def UpdateSessionLog(self, status, message):
|
||||
self.webHookSessionLogs.addLog(status, message)
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.webHookSessionsTable.update().values({
|
||||
"Logs": self.webHookSessionLogs.model_dump()
|
||||
}).where(
|
||||
self.webHookSessionsTable.c.WebHookSessionID == self.sessionID
|
||||
)
|
||||
)
|
||||
|
||||
def UpdateStatus(self, status: int):
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.webHookSessionsTable.update().values({
|
||||
"Status": status,
|
||||
"EndDate": datetime.now()
|
||||
}).where(
|
||||
self.webHookSessionsTable.c.WebHookSessionID == self.sessionID
|
||||
)
|
||||
)
|
||||
|
||||
def Execute(self):
|
||||
success = False
|
||||
|
||||
for i in range(5):
|
||||
headerDictionary = {
|
||||
'Content-Type': self.webHook.ContentType
|
||||
}
|
||||
for header in self.webHook.Headers.values():
|
||||
if header['key'] not in ['Content-Type']:
|
||||
headerDictionary[header['key']] = header['value']
|
||||
|
||||
if self.webHook.ContentType == "application/json":
|
||||
reqData = json.dumps(self.data)
|
||||
else:
|
||||
for (key, val) in self.data.items():
|
||||
if type(self.data[key]) not in [str, int]:
|
||||
self.data[key] = json.dumps(self.data[key])
|
||||
reqData = urllib.parse.urlencode(self.data)
|
||||
try:
|
||||
req = requests.post(
|
||||
self.webHook.PayloadURL, headers=headerDictionary, timeout=10, data=reqData, verify=self.webHook.VerifySSL
|
||||
)
|
||||
req.raise_for_status()
|
||||
success = True
|
||||
self.UpdateSessionLog(0, "Webhook request finished")
|
||||
self.UpdateSessionLog(0, json.dumps({"returned_data": req.text}))
|
||||
self.UpdateStatus(0)
|
||||
break
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.UpdateSessionLog(1, f"Attempt #{i + 1}/5. Request errored. Reason: " + str(e))
|
||||
time.sleep(10)
|
||||
|
||||
if not success:
|
||||
self.UpdateSessionLog(1, "Webhook request failed & terminated.")
|
||||
self.UpdateStatus(1)
|
||||
76
src/modules/Email.py
Normal file
76
src/modules/Email.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import os.path
|
||||
import smtplib
|
||||
from email import encoders
|
||||
from email.header import Header
|
||||
from email.mime.base import MIMEBase
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import formataddr
|
||||
|
||||
class EmailSender:
|
||||
def __init__(self, DashboardConfig):
|
||||
self.smtp = None
|
||||
self.DashboardConfig = DashboardConfig
|
||||
if not os.path.exists('./attachments'):
|
||||
os.mkdir('./attachments')
|
||||
|
||||
def Server(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "server")[1]
|
||||
|
||||
def Port(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "port")[1]
|
||||
|
||||
def Encryption(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "encryption")[1]
|
||||
|
||||
def Username(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "username")[1]
|
||||
|
||||
def Password(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "email_password")[1]
|
||||
|
||||
def SendFrom(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "send_from")[1]
|
||||
|
||||
# Thank you, @gdeeble from GitHub
|
||||
def AuthenticationRequired(self):
|
||||
return self.DashboardConfig.GetConfig("Email", "authentication_required")[1]
|
||||
|
||||
def ready(self):
|
||||
if self.AuthenticationRequired():
|
||||
return all([self.Server(), self.Port(), self.Encryption(), self.Username(), self.Password(), self.SendFrom()])
|
||||
return all([self.Server(), self.Port(), self.Encryption(), self.SendFrom()])
|
||||
|
||||
def send(self, receiver, subject, body, includeAttachment = False, attachmentName = "") -> tuple[bool, str] | tuple[bool, None]:
|
||||
if self.ready():
|
||||
try:
|
||||
self.smtp = smtplib.SMTP(self.Server(), port=int(self.Port()))
|
||||
self.smtp.ehlo()
|
||||
if self.Encryption() == "STARTTLS":
|
||||
self.smtp.starttls()
|
||||
if self.AuthenticationRequired():
|
||||
self.smtp.login(self.Username(), self.Password())
|
||||
message = MIMEMultipart()
|
||||
message['Subject'] = subject
|
||||
message['From'] = self.SendFrom()
|
||||
message["To"] = receiver
|
||||
message.attach(MIMEText(body, "plain"))
|
||||
|
||||
if includeAttachment and len(attachmentName) > 0:
|
||||
attachmentPath = os.path.join('./attachments', attachmentName)
|
||||
if os.path.exists(attachmentPath):
|
||||
attachment = MIMEBase("application", "octet-stream")
|
||||
with open(os.path.join('./attachments', attachmentName), 'rb') as f:
|
||||
attachment.set_payload(f.read())
|
||||
encoders.encode_base64(attachment)
|
||||
attachment.add_header("Content-Disposition", f"attachment; filename= {attachmentName}",)
|
||||
message.attach(attachment)
|
||||
else:
|
||||
self.smtp.close()
|
||||
return False, "Attachment does not exist"
|
||||
self.smtp.sendmail(self.SendFrom(), receiver, message.as_string())
|
||||
self.smtp.close()
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"Send failed | Reason: {e}"
|
||||
return False, "SMTP not configured"
|
||||
22
src/modules/Log.py
Normal file
22
src/modules/Log.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
Log Class
|
||||
"""
|
||||
class Log:
|
||||
def __init__(self, LogID: str, JobID: str, LogDate: str, Status: str, Message: str):
|
||||
self.LogID = LogID
|
||||
self.JobID = JobID
|
||||
self.LogDate = LogDate
|
||||
self.Status = Status
|
||||
self.Message = Message
|
||||
|
||||
def toJson(self):
|
||||
return {
|
||||
"LogID": self.LogID,
|
||||
"JobID": self.JobID,
|
||||
"LogDate": self.LogDate,
|
||||
"Status": self.Status,
|
||||
"Message": self.Message
|
||||
}
|
||||
|
||||
def __dict__(self):
|
||||
return self.toJson()
|
||||
88
src/modules/NewConfigurationTemplates.py
Normal file
88
src/modules/NewConfigurationTemplates.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import uuid
|
||||
|
||||
from pydantic import BaseModel, field_serializer
|
||||
import sqlalchemy as db
|
||||
from .ConnectionString import ConnectionString
|
||||
|
||||
|
||||
class NewConfigurationTemplate(BaseModel):
|
||||
TemplateID: str = ''
|
||||
Subnet: str = ''
|
||||
ListenPortStart: int = 0
|
||||
ListenPortEnd: int = 0
|
||||
Notes: str = ""
|
||||
|
||||
class NewConfigurationTemplates:
|
||||
def __init__(self):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.templatesTable = db.Table(
|
||||
'NewConfigurationTemplates', self.metadata,
|
||||
db.Column('TemplateID', db.String(255), primary_key=True),
|
||||
db.Column('Subnet', db.String(255)),
|
||||
db.Column('ListenPortStart', db.Integer),
|
||||
db.Column('ListenPortEnd', db.Integer),
|
||||
db.Column('Notes', db.Text),
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.Templates: list[NewConfigurationTemplate] = []
|
||||
self.__getTemplates()
|
||||
|
||||
def GetTemplates(self):
|
||||
self.__getTemplates()
|
||||
return list(map(lambda x : x.model_dump(), self.Templates))
|
||||
|
||||
def __getTemplates(self):
|
||||
with self.engine.connect() as conn:
|
||||
templates = conn.execute(
|
||||
self.templatesTable.select()
|
||||
).mappings().fetchall()
|
||||
self.Templates.clear()
|
||||
self.Templates = [NewConfigurationTemplate(**template) for template in templates]
|
||||
|
||||
def CreateTemplate(self) -> NewConfigurationTemplate:
|
||||
return NewConfigurationTemplate(TemplateID=str(uuid.uuid4()))
|
||||
|
||||
def SearchTemplate(self, template: NewConfigurationTemplate):
|
||||
try:
|
||||
first = next(filter(lambda x : x.TemplateID == template.TemplateID, self.Templates))
|
||||
except StopIteration:
|
||||
return None
|
||||
return first
|
||||
|
||||
def UpdateTemplate(self, template: dict[str, str]) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
template = NewConfigurationTemplate(**template)
|
||||
with self.engine.begin() as conn:
|
||||
if self.SearchTemplate(template):
|
||||
conn.execute(
|
||||
self.templatesTable.update().values(
|
||||
template.model_dump(exclude={'TemplateID'})
|
||||
).where(
|
||||
self.templatesTable.c.TemplateID == template.TemplateID
|
||||
)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
self.templatesTable.insert().values(
|
||||
template.model_dump()
|
||||
)
|
||||
)
|
||||
self.__getTemplates()
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
return True, None
|
||||
|
||||
def DeleteTemplate(self, template: dict[str, str]) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
template = NewConfigurationTemplate(**template)
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.templatesTable.delete().where(
|
||||
self.templatesTable.c.TemplateID == template.TemplateID
|
||||
)
|
||||
)
|
||||
self.__getTemplates()
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
return True, None
|
||||
354
src/modules/Peer.py
Normal file
354
src/modules/Peer.py
Normal file
@@ -0,0 +1,354 @@
|
||||
"""
|
||||
Peer
|
||||
"""
|
||||
import base64
|
||||
import datetime
|
||||
import json
|
||||
import os, subprocess, uuid, random, re
|
||||
from datetime import timedelta
|
||||
|
||||
import jinja2
|
||||
import sqlalchemy as db
|
||||
from .PeerJob import PeerJob
|
||||
from .PeerShareLink import PeerShareLink
|
||||
from .Utilities import GenerateWireguardPublicKey, ValidateIPAddressesWithRange, ValidateDNSAddress
|
||||
|
||||
|
||||
class Peer:
|
||||
def __init__(self, tableData, configuration):
|
||||
self.configuration = configuration
|
||||
self.id = tableData["id"]
|
||||
self.private_key = tableData["private_key"]
|
||||
self.DNS = tableData["DNS"]
|
||||
self.endpoint_allowed_ip = tableData["endpoint_allowed_ip"]
|
||||
self.name = tableData["name"]
|
||||
self.total_receive = tableData["total_receive"]
|
||||
self.total_sent = tableData["total_sent"]
|
||||
self.total_data = tableData["total_data"]
|
||||
self.endpoint = tableData["endpoint"]
|
||||
self.status = tableData["status"]
|
||||
self.latest_handshake = tableData["latest_handshake"]
|
||||
self.allowed_ip = tableData["allowed_ip"]
|
||||
self.cumu_receive = tableData["cumu_receive"]
|
||||
self.cumu_sent = tableData["cumu_sent"]
|
||||
self.cumu_data = tableData["cumu_data"]
|
||||
self.mtu = tableData["mtu"]
|
||||
self.keepalive = tableData["keepalive"]
|
||||
self.remote_endpoint = tableData["remote_endpoint"]
|
||||
self.preshared_key = tableData["preshared_key"]
|
||||
self.jobs: list[PeerJob] = []
|
||||
self.ShareLink: list[PeerShareLink] = []
|
||||
self.getJobs()
|
||||
self.getShareLink()
|
||||
|
||||
def toJson(self):
|
||||
# self.getJobs()
|
||||
# self.getShareLink()
|
||||
return self.__dict__
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.toJson())
|
||||
|
||||
def updatePeer(self, name: str, private_key: str,
|
||||
preshared_key: str,
|
||||
dns_addresses: str, allowed_ip: str, endpoint_allowed_ip: str, mtu: int,
|
||||
keepalive: int) -> tuple[bool, str] or tuple[bool, None]:
|
||||
if not self.configuration.getStatus():
|
||||
self.configuration.toggleConfiguration()
|
||||
|
||||
existingAllowedIps = [item for row in list(
|
||||
map(lambda x: [q.strip() for q in x.split(',')],
|
||||
map(lambda y: y.allowed_ip,
|
||||
list(filter(lambda k: k.id != self.id, self.configuration.getPeersList()))))) for item in row]
|
||||
|
||||
if allowed_ip in existingAllowedIps:
|
||||
return False, "Allowed IP already taken by another peer"
|
||||
|
||||
if not ValidateIPAddressesWithRange(endpoint_allowed_ip):
|
||||
return False, f"Endpoint Allowed IPs format is incorrect"
|
||||
|
||||
if len(dns_addresses) > 0 and not ValidateDNSAddress(dns_addresses):
|
||||
return False, f"DNS format is incorrect"
|
||||
|
||||
if type(mtu) is str or mtu is None:
|
||||
mtu = 0
|
||||
|
||||
if mtu < 0 or mtu > 1460:
|
||||
return False, "MTU format is not correct"
|
||||
|
||||
if type(keepalive) is str or keepalive is None:
|
||||
keepalive = 0
|
||||
|
||||
if keepalive < 0:
|
||||
return False, "Persistent Keepalive format is not correct"
|
||||
if len(private_key) > 0:
|
||||
pubKey = GenerateWireguardPublicKey(private_key)
|
||||
if not pubKey[0] or pubKey[1] != self.id:
|
||||
return False, "Private key does not match with the public key"
|
||||
try:
|
||||
rd = random.Random()
|
||||
uid = str(uuid.UUID(int=rd.getrandbits(128), version=4))
|
||||
pskExist = len(preshared_key) > 0
|
||||
|
||||
if pskExist:
|
||||
with open(uid, "w+") as f:
|
||||
f.write(preshared_key)
|
||||
newAllowedIPs = allowed_ip.replace(" ", "")
|
||||
updateAllowedIp = subprocess.check_output(
|
||||
f"{self.configuration.Protocol} set {self.configuration.Name} peer {self.id} allowed-ips {newAllowedIPs} {f'preshared-key {uid}' if pskExist else 'preshared-key /dev/null'}",
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
if pskExist: os.remove(uid)
|
||||
if len(updateAllowedIp.decode().strip("\n")) != 0:
|
||||
return False, "Update peer failed when updating Allowed IPs"
|
||||
saveConfig = subprocess.check_output(f"{self.configuration.Protocol}-quick save {self.configuration.Name}",
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
if f"wg showconf {self.configuration.Name}" not in saveConfig.decode().strip('\n'):
|
||||
return False, "Update peer failed when saving the configuration"
|
||||
with self.configuration.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.configuration.peersTable.update().values({
|
||||
"name": name,
|
||||
"private_key": private_key,
|
||||
"DNS": dns_addresses,
|
||||
"endpoint_allowed_ip": endpoint_allowed_ip,
|
||||
"mtu": mtu,
|
||||
"keepalive": keepalive,
|
||||
"preshared_key": preshared_key
|
||||
}).where(
|
||||
self.configuration.peersTable.c.id == self.id
|
||||
)
|
||||
)
|
||||
return True, None
|
||||
except subprocess.CalledProcessError as exc:
|
||||
return False, exc.output.decode("UTF-8").strip()
|
||||
|
||||
def downloadPeer(self) -> dict[str, str]:
|
||||
final = {
|
||||
"fileName": "",
|
||||
"file": ""
|
||||
}
|
||||
filename = self.name
|
||||
if len(filename) == 0:
|
||||
filename = "UntitledPeer"
|
||||
filename = "".join(filename.split(' '))
|
||||
filename = f"{filename}"
|
||||
illegal_filename = [".", ",", "/", "?", "<", ">", "\\", ":", "*", '|' '\"', "com1", "com2", "com3",
|
||||
"com4", "com5", "com6", "com7", "com8", "com9", "lpt1", "lpt2", "lpt3", "lpt4",
|
||||
"lpt5", "lpt6", "lpt7", "lpt8", "lpt9", "con", "nul", "prn"]
|
||||
for i in illegal_filename:
|
||||
filename = filename.replace(i, "")
|
||||
|
||||
for i in filename:
|
||||
if re.match("^[a-zA-Z0-9_=+.-]$", i):
|
||||
final["fileName"] += i
|
||||
|
||||
interfaceSection = {
|
||||
"PrivateKey": self.private_key,
|
||||
"Address": self.allowed_ip,
|
||||
"MTU": (
|
||||
self.configuration.configurationInfo.OverridePeerSettings.MTU
|
||||
if self.configuration.configurationInfo.OverridePeerSettings.MTU else self.mtu
|
||||
),
|
||||
"DNS": (
|
||||
self.configuration.configurationInfo.OverridePeerSettings.DNS
|
||||
if self.configuration.configurationInfo.OverridePeerSettings.DNS else self.DNS
|
||||
)
|
||||
}
|
||||
|
||||
if self.configuration.Protocol == "awg":
|
||||
interfaceSection.update({
|
||||
"Jc": self.configuration.Jc,
|
||||
"Jmin": self.configuration.Jmin,
|
||||
"Jmax": self.configuration.Jmax,
|
||||
"S1": self.configuration.S1,
|
||||
"S2": self.configuration.S2,
|
||||
"H1": self.configuration.H1,
|
||||
"H2": self.configuration.H2,
|
||||
"H3": self.configuration.H3,
|
||||
"H4": self.configuration.H4
|
||||
})
|
||||
|
||||
peerSection = {
|
||||
"PublicKey": self.configuration.PublicKey,
|
||||
"AllowedIPs": (
|
||||
self.configuration.configurationInfo.OverridePeerSettings.EndpointAllowedIPs
|
||||
if self.configuration.configurationInfo.OverridePeerSettings.EndpointAllowedIPs else self.endpoint_allowed_ip
|
||||
),
|
||||
"Endpoint": f'{(self.configuration.configurationInfo.OverridePeerSettings.PeerRemoteEndpoint if self.configuration.configurationInfo.OverridePeerSettings.PeerRemoteEndpoint else self.configuration.DashboardConfig.GetConfig("Peers", "remote_endpoint")[1])}:{(self.configuration.configurationInfo.OverridePeerSettings.ListenPort if self.configuration.configurationInfo.OverridePeerSettings.ListenPort else self.configuration.ListenPort)}',
|
||||
"PersistentKeepalive": (
|
||||
self.configuration.configurationInfo.OverridePeerSettings.PersistentKeepalive
|
||||
if self.configuration.configurationInfo.OverridePeerSettings.PersistentKeepalive
|
||||
else self.keepalive
|
||||
),
|
||||
"PresharedKey": self.preshared_key
|
||||
}
|
||||
combine = [interfaceSection.items(), peerSection.items()]
|
||||
for s in range(len(combine)):
|
||||
if s == 0:
|
||||
final["file"] += "[Interface]\n"
|
||||
else:
|
||||
final["file"] += "\n[Peer]\n"
|
||||
for (key, val) in combine[s]:
|
||||
if val is not None and ((type(val) is str and len(val) > 0) or (type(val) is int and val > 0)):
|
||||
final["file"] += f"{key} = {val}\n"
|
||||
|
||||
final["file"] = jinja2.Template(final["file"]).render(configuration=self.configuration)
|
||||
|
||||
|
||||
if self.configuration.Protocol == "awg":
|
||||
final["amneziaVPN"] = json.dumps({
|
||||
"containers": [{
|
||||
"awg": {
|
||||
"isThirdPartyConfig": True,
|
||||
"last_config": final['file'],
|
||||
"port": self.configuration.ListenPort,
|
||||
"transport_proto": "udp"
|
||||
},
|
||||
"container": "amnezia-awg"
|
||||
}],
|
||||
"defaultContainer": "amnezia-awg",
|
||||
"description": self.name,
|
||||
"hostName": (
|
||||
self.configuration.configurationInfo.OverridePeerSettings.PeerRemoteEndpoint
|
||||
if self.configuration.configurationInfo.OverridePeerSettings.PeerRemoteEndpoint
|
||||
else self.configuration.DashboardConfig.GetConfig("Peers", "remote_endpoint")[1])
|
||||
})
|
||||
return final
|
||||
|
||||
def getJobs(self):
|
||||
self.jobs = self.configuration.AllPeerJobs.searchJob(self.configuration.Name, self.id)
|
||||
|
||||
def getShareLink(self):
|
||||
self.ShareLink = self.configuration.AllPeerShareLinks.getLink(self.configuration.Name, self.id)
|
||||
|
||||
def resetDataUsage(self, mode: str):
|
||||
try:
|
||||
with self.configuration.engine.begin() as conn:
|
||||
if mode == "total":
|
||||
conn.execute(
|
||||
self.configuration.peersTable.update().values({
|
||||
"total_data": 0,
|
||||
"cumu_data": 0,
|
||||
"total_receive": 0,
|
||||
"cumu_receive": 0,
|
||||
"total_sent": 0,
|
||||
"cumu_sent": 0
|
||||
}).where(
|
||||
self.configuration.peersTable.c.id == self.id
|
||||
)
|
||||
)
|
||||
self.total_data = 0
|
||||
self.total_receive = 0
|
||||
self.total_sent = 0
|
||||
self.cumu_data = 0
|
||||
self.cumu_sent = 0
|
||||
self.cumu_receive = 0
|
||||
elif mode == "receive":
|
||||
conn.execute(
|
||||
self.configuration.peersTable.update().values({
|
||||
"total_receive": 0,
|
||||
"cumu_receive": 0,
|
||||
}).where(
|
||||
self.configuration.peersTable.c.id == self.id
|
||||
)
|
||||
)
|
||||
self.cumu_receive = 0
|
||||
self.total_receive = 0
|
||||
elif mode == "sent":
|
||||
conn.execute(
|
||||
self.configuration.peersTable.update().values({
|
||||
"total_sent": 0,
|
||||
"cumu_sent": 0
|
||||
}).where(
|
||||
self.configuration.peersTable.c.id == self.id
|
||||
)
|
||||
)
|
||||
self.cumu_sent = 0
|
||||
self.total_sent = 0
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def getEndpoints(self):
|
||||
result = []
|
||||
with self.configuration.engine.connect() as conn:
|
||||
result = conn.execute(
|
||||
db.select(
|
||||
self.configuration.peersHistoryEndpointTable.c.endpoint
|
||||
).group_by(
|
||||
self.configuration.peersHistoryEndpointTable.c.endpoint
|
||||
).where(
|
||||
self.configuration.peersHistoryEndpointTable.c.id == self.id
|
||||
)
|
||||
).mappings().fetchall()
|
||||
return list(result)
|
||||
|
||||
def getTraffics(self, interval: int = 30, startDate: datetime.datetime = None, endDate: datetime.datetime = None):
|
||||
if startDate is None and endDate is None:
|
||||
endDate = datetime.datetime.now()
|
||||
startDate = endDate - timedelta(minutes=interval)
|
||||
else:
|
||||
endDate = endDate.replace(hour=23, minute=59, second=59, microsecond=999999)
|
||||
startDate = startDate.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
with self.configuration.engine.connect() as conn:
|
||||
result = conn.execute(
|
||||
db.select(
|
||||
self.configuration.peersTransferTable.c.cumu_data,
|
||||
self.configuration.peersTransferTable.c.total_data,
|
||||
self.configuration.peersTransferTable.c.cumu_receive,
|
||||
self.configuration.peersTransferTable.c.total_receive,
|
||||
self.configuration.peersTransferTable.c.cumu_sent,
|
||||
self.configuration.peersTransferTable.c.total_sent,
|
||||
self.configuration.peersTransferTable.c.time
|
||||
).where(
|
||||
db.and_(
|
||||
self.configuration.peersTransferTable.c.id == self.id,
|
||||
self.configuration.peersTransferTable.c.time <= endDate,
|
||||
self.configuration.peersTransferTable.c.time >= startDate,
|
||||
)
|
||||
).order_by(
|
||||
self.configuration.peersTransferTable.c.time
|
||||
)
|
||||
).mappings().fetchall()
|
||||
return list(result)
|
||||
|
||||
|
||||
def getSessions(self, startDate: datetime.datetime = None, endDate: datetime.datetime = None):
|
||||
if endDate is None:
|
||||
endDate = datetime.datetime.now()
|
||||
|
||||
if startDate is None:
|
||||
startDate = endDate
|
||||
|
||||
endDate = endDate.replace(hour=23, minute=59, second=59, microsecond=999999)
|
||||
startDate = startDate.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
|
||||
with self.configuration.engine.connect() as conn:
|
||||
result = conn.execute(
|
||||
db.select(
|
||||
self.configuration.peersTransferTable.c.time
|
||||
).where(
|
||||
db.and_(
|
||||
self.configuration.peersTransferTable.c.id == self.id,
|
||||
self.configuration.peersTransferTable.c.time <= endDate,
|
||||
self.configuration.peersTransferTable.c.time >= startDate,
|
||||
)
|
||||
).order_by(
|
||||
self.configuration.peersTransferTable.c.time
|
||||
)
|
||||
).fetchall()
|
||||
time = list(map(lambda x : x[0], result))
|
||||
return time
|
||||
|
||||
def __duration(self, t1: datetime.datetime, t2: datetime.datetime):
|
||||
delta = t1 - t2
|
||||
|
||||
hours, remainder = divmod(delta.total_seconds(), 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
return f"{int(hours):02}:{int(minutes):02}:{int(seconds):02}"
|
||||
32
src/modules/PeerJob.py
Normal file
32
src/modules/PeerJob.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
Peer Job
|
||||
"""
|
||||
from datetime import datetime
|
||||
class PeerJob:
|
||||
def __init__(self, JobID: str, Configuration: str, Peer: str,
|
||||
Field: str, Operator: str, Value: str, CreationDate: datetime, ExpireDate: datetime, Action: str):
|
||||
self.Action = Action
|
||||
self.ExpireDate = ExpireDate
|
||||
self.CreationDate = CreationDate
|
||||
self.Value = Value
|
||||
self.Operator = Operator
|
||||
self.Field = Field
|
||||
self.Configuration = Configuration
|
||||
self.Peer = Peer
|
||||
self.JobID = JobID
|
||||
|
||||
def toJson(self):
|
||||
return {
|
||||
"JobID": self.JobID,
|
||||
"Configuration": self.Configuration,
|
||||
"Peer": self.Peer,
|
||||
"Field": self.Field,
|
||||
"Operator": self.Operator,
|
||||
"Value": self.Value,
|
||||
"CreationDate": self.CreationDate.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"ExpireDate": (self.ExpireDate.strftime("%Y-%m-%d %H:%M:%S") if self.ExpireDate is not None else None),
|
||||
"Action": self.Action
|
||||
}
|
||||
|
||||
def __dict__(self):
|
||||
return self.toJson()
|
||||
80
src/modules/PeerJobLogger.py
Normal file
80
src/modules/PeerJobLogger.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Peer Job Logger
|
||||
"""
|
||||
import uuid
|
||||
from typing import Sequence
|
||||
|
||||
import sqlalchemy as db
|
||||
from flask import current_app
|
||||
from sqlalchemy import RowMapping
|
||||
|
||||
from .ConnectionString import ConnectionString
|
||||
from .Log import Log
|
||||
|
||||
class PeerJobLogger:
|
||||
def __init__(self, AllPeerJobs, DashboardConfig):
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard_log"))
|
||||
self.metadata = db.MetaData()
|
||||
self.jobLogTable = db.Table('JobLog', self.metadata,
|
||||
db.Column('LogID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('JobID', db.String(255), nullable=False),
|
||||
db.Column('LogDate', (db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
db.Column('Status', db.String(255), nullable=False),
|
||||
db.Column('Message', db.Text)
|
||||
)
|
||||
self.logs: list[Log] = []
|
||||
self.metadata.create_all(self.engine)
|
||||
self.AllPeerJobs = AllPeerJobs
|
||||
def log(self, JobID: str, Status: bool = True, Message: str = "") -> bool:
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.jobLogTable.insert().values(
|
||||
{
|
||||
"LogID": str(uuid.uuid4()),
|
||||
"JobID": JobID,
|
||||
"Status": Status,
|
||||
"Message": Message
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Peer Job Log Error", e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def getLogs(self, configName = None) -> list[Log]:
|
||||
logs: list[Log] = []
|
||||
try:
|
||||
allJobs = self.AllPeerJobs.getAllJobs(configName)
|
||||
allJobsID = [x.JobID for x in allJobs]
|
||||
stmt = self.jobLogTable.select().where(self.jobLogTable.columns.JobID.in_(
|
||||
allJobsID
|
||||
))
|
||||
with self.engine.connect() as conn:
|
||||
table = conn.execute(stmt).fetchall()
|
||||
for l in table:
|
||||
logs.append(
|
||||
Log(l.LogID, l.JobID, l.LogDate.strftime("%Y-%m-%d %H:%M:%S"), l.Status, l.Message))
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Getting Peer Job Log Error", e)
|
||||
return logs
|
||||
return logs
|
||||
|
||||
def getFailingJobs(self) -> Sequence[RowMapping]:
|
||||
with self.engine.connect() as conn:
|
||||
table = conn.execute(
|
||||
db.select(
|
||||
self.jobLogTable.c.JobID
|
||||
).where(
|
||||
self.jobLogTable.c.Status == 'false'
|
||||
).group_by(
|
||||
self.jobLogTable.c.JobID
|
||||
).having(
|
||||
db.func.count(
|
||||
self.jobLogTable.c.JobID
|
||||
) > 10
|
||||
)
|
||||
).mappings().fetchall()
|
||||
return table
|
||||
214
src/modules/PeerJobs.py
Normal file
214
src/modules/PeerJobs.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
Peer Jobs
|
||||
"""
|
||||
import sqlalchemy
|
||||
|
||||
from .ConnectionString import ConnectionString
|
||||
from .PeerJob import PeerJob
|
||||
from .PeerJobLogger import PeerJobLogger
|
||||
import sqlalchemy as db
|
||||
from datetime import datetime
|
||||
from flask import current_app
|
||||
|
||||
class PeerJobs:
|
||||
def __init__(self, DashboardConfig, WireguardConfigurations, AllPeerShareLinks):
|
||||
self.Jobs: list[PeerJob] = []
|
||||
self.engine = db.create_engine(ConnectionString('wgdashboard_job'))
|
||||
self.metadata = db.MetaData()
|
||||
self.peerJobTable = db.Table('PeerJobs', self.metadata,
|
||||
db.Column('JobID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('Configuration', db.String(255), nullable=False),
|
||||
db.Column('Peer', db.String(255), nullable=False),
|
||||
db.Column('Field', db.String(255), nullable=False),
|
||||
db.Column('Operator', db.String(255), nullable=False),
|
||||
db.Column('Value', db.String(255), nullable=False),
|
||||
db.Column('CreationDate', (db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP), nullable=False),
|
||||
db.Column('ExpireDate', (db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP)),
|
||||
db.Column('Action', db.String(255), nullable=False),
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.__getJobs()
|
||||
self.JobLogger: PeerJobLogger = PeerJobLogger(self, DashboardConfig)
|
||||
self.WireguardConfigurations = WireguardConfigurations
|
||||
self.AllPeerShareLinks = AllPeerShareLinks
|
||||
|
||||
def __getJobs(self):
|
||||
self.Jobs.clear()
|
||||
with self.engine.connect() as conn:
|
||||
jobs = conn.execute(self.peerJobTable.select().where(
|
||||
self.peerJobTable.columns.ExpireDate.is_(None)
|
||||
)).mappings().fetchall()
|
||||
for job in jobs:
|
||||
self.Jobs.append(PeerJob(
|
||||
job['JobID'], job['Configuration'], job['Peer'], job['Field'], job['Operator'], job['Value'],
|
||||
job['CreationDate'], job['ExpireDate'], job['Action']))
|
||||
|
||||
def getAllJobs(self, configuration: str = None):
|
||||
if configuration is not None:
|
||||
with self.engine.connect() as conn:
|
||||
jobs = conn.execute(self.peerJobTable.select().where(
|
||||
self.peerJobTable.columns.Configuration == configuration
|
||||
)).mappings().fetchall()
|
||||
j = []
|
||||
for job in jobs:
|
||||
j.append(PeerJob(
|
||||
job['JobID'], job['Configuration'], job['Peer'], job['Field'], job['Operator'], job['Value'],
|
||||
job['CreationDate'], job['ExpireDate'], job['Action']))
|
||||
return j
|
||||
return []
|
||||
|
||||
def toJson(self):
|
||||
return [x.toJson() for x in self.Jobs]
|
||||
|
||||
def searchJob(self, Configuration: str, Peer: str):
|
||||
return list(filter(lambda x: x.Configuration == Configuration and x.Peer == Peer, self.Jobs))
|
||||
|
||||
def searchJobById(self, JobID):
|
||||
return list(filter(lambda x: x.JobID == JobID, self.Jobs))
|
||||
|
||||
def saveJob(self, Job: PeerJob) -> tuple[bool, list] | tuple[bool, str]:
|
||||
import traceback
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
currentJob = self.searchJobById(Job.JobID)
|
||||
if len(currentJob) == 0:
|
||||
conn.execute(
|
||||
self.peerJobTable.insert().values(
|
||||
{
|
||||
"JobID": Job.JobID,
|
||||
"Configuration": Job.Configuration,
|
||||
"Peer": Job.Peer,
|
||||
"Field": Job.Field,
|
||||
"Operator": Job.Operator,
|
||||
"Value": Job.Value,
|
||||
"CreationDate": datetime.now(),
|
||||
"ExpireDate": None,
|
||||
"Action": Job.Action
|
||||
}
|
||||
)
|
||||
)
|
||||
self.JobLogger.log(Job.JobID, Message=f"Job is created if {Job.Field} {Job.Operator} {Job.Value} then {Job.Action}")
|
||||
else:
|
||||
conn.execute(
|
||||
self.peerJobTable.update().values({
|
||||
"Field": Job.Field,
|
||||
"Operator": Job.Operator,
|
||||
"Value": Job.Value,
|
||||
"Action": Job.Action
|
||||
}).where(self.peerJobTable.columns.JobID == Job.JobID)
|
||||
)
|
||||
self.JobLogger.log(Job.JobID, Message=f"Job is updated from if {currentJob[0].Field} {currentJob[0].Operator} {currentJob[0].Value} then {currentJob[0].Action}; to if {Job.Field} {Job.Operator} {Job.Value} then {Job.Action}")
|
||||
self.__getJobs()
|
||||
self.WireguardConfigurations.get(Job.Configuration).searchPeer(Job.Peer)[1].getJobs()
|
||||
return True, list(
|
||||
filter(lambda x: x.Configuration == Job.Configuration and x.Peer == Job.Peer and x.JobID == Job.JobID,
|
||||
self.Jobs))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return False, str(e)
|
||||
|
||||
def deleteJob(self, Job: PeerJob) -> tuple[bool, None] | tuple[bool, str]:
|
||||
try:
|
||||
if len(self.searchJobById(Job.JobID)) == 0:
|
||||
return False, "Job does not exist"
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.peerJobTable.update().values(
|
||||
{
|
||||
"ExpireDate": datetime.now()
|
||||
}
|
||||
).where(self.peerJobTable.columns.JobID == Job.JobID)
|
||||
)
|
||||
self.JobLogger.log(Job.JobID, Message=f"Job is removed due to being deleted or finished.")
|
||||
self.__getJobs()
|
||||
self.WireguardConfigurations.get(Job.Configuration).searchPeer(Job.Peer)[1].getJobs()
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
|
||||
def updateJobConfigurationName(self, ConfigurationName: str, NewConfigurationName: str) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
with self.engine.begin() as conn:
|
||||
conn.execute(
|
||||
self.peerJobTable.update().values({
|
||||
"Configuration": NewConfigurationName
|
||||
}).where(self.peerJobTable.columns.Configuration == ConfigurationName)
|
||||
)
|
||||
self.__getJobs()
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
|
||||
def getPeerJobLogs(self, configurationName):
|
||||
return self.JobLogger.getLogs(configurationName)
|
||||
|
||||
|
||||
def runJob(self):
|
||||
self.cleanJob()
|
||||
needToDelete = []
|
||||
self.__getJobs()
|
||||
for job in self.Jobs:
|
||||
c = self.WireguardConfigurations.get(job.Configuration)
|
||||
if c is not None:
|
||||
f, fp = c.searchPeer(job.Peer)
|
||||
if f:
|
||||
if job.Field in ["total_receive", "total_sent", "total_data"]:
|
||||
s = job.Field.split("_")[1]
|
||||
x: float = getattr(fp, f"total_{s}") + getattr(fp, f"cumu_{s}")
|
||||
y: float = float(job.Value)
|
||||
else:
|
||||
x: datetime = datetime.now()
|
||||
y: datetime = datetime.strptime(job.Value, "%Y-%m-%d %H:%M:%S")
|
||||
runAction: bool = self.__runJob_Compare(x, y, job.Operator)
|
||||
if runAction:
|
||||
s = False
|
||||
if job.Action == "restrict":
|
||||
s, msg = c.restrictPeers([fp.id])
|
||||
elif job.Action == "delete":
|
||||
s, msg = c.deletePeers([fp.id], self, self.AllPeerShareLinks)
|
||||
elif job.Action == "reset_total_data_usage":
|
||||
s = fp.resetDataUsage("total")
|
||||
c.restrictPeers([fp.id])
|
||||
c.allowAccessPeers([fp.id])
|
||||
if s is True:
|
||||
self.JobLogger.log(job.JobID, s,
|
||||
f"Peer {fp.id} from {c.Name} is successfully {job.Action}ed."
|
||||
)
|
||||
needToDelete.append(job)
|
||||
else:
|
||||
self.JobLogger.log(job.JobID, s,
|
||||
f"Peer {fp.id} from {c.Name} failed {job.Action}ed."
|
||||
)
|
||||
else:
|
||||
self.JobLogger.log(job.JobID, False,
|
||||
f"Somehow can't find this peer {job.Peer} from {c.Name} failed {job.Action}ed."
|
||||
)
|
||||
else:
|
||||
self.JobLogger.log(job.JobID, False,
|
||||
f"Somehow can't find this peer {job.Peer} from {job.Configuration} failed {job.Action}ed."
|
||||
)
|
||||
for j in needToDelete:
|
||||
self.deleteJob(j)
|
||||
|
||||
def cleanJob(self):
|
||||
failingJobs = self.JobLogger.getFailingJobs()
|
||||
with self.engine.begin() as conn:
|
||||
for job in failingJobs:
|
||||
conn.execute(
|
||||
self.peerJobTable.update().values(
|
||||
{
|
||||
"ExpireDate": datetime.now()
|
||||
}
|
||||
).where(self.peerJobTable.columns.JobID == job.get('JobID'))
|
||||
)
|
||||
self.JobLogger.log(job.get('JobID'), Message=f"Job is removed due to being stale.")
|
||||
|
||||
def __runJob_Compare(self, x: float | datetime, y: float | datetime, operator: str):
|
||||
if operator == "eq":
|
||||
return x == y
|
||||
if operator == "neq":
|
||||
return x != y
|
||||
if operator == "lgt":
|
||||
return x > y
|
||||
if operator == "lst":
|
||||
return x < y
|
||||
22
src/modules/PeerShareLink.py
Normal file
22
src/modules/PeerShareLink.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from datetime import datetime
|
||||
"""
|
||||
Peer Share Link
|
||||
"""
|
||||
class PeerShareLink:
|
||||
def __init__(self, ShareID:str, Configuration: str, Peer: str, ExpireDate: datetime, SharedDate: datetime):
|
||||
self.ShareID = ShareID
|
||||
self.Peer = Peer
|
||||
self.Configuration = Configuration
|
||||
self.SharedDate = SharedDate
|
||||
self.ExpireDate = ExpireDate
|
||||
if not self.ExpireDate:
|
||||
self.ExpireDate = datetime.strptime("2199-12-31","%Y-%m-%d")
|
||||
|
||||
def toJson(self):
|
||||
return {
|
||||
"ShareID": self.ShareID,
|
||||
"Peer": self.Peer,
|
||||
"Configuration": self.Configuration,
|
||||
"ExpireDate": self.ExpireDate.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"SharedDate": self.SharedDate.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
}
|
||||
89
src/modules/PeerShareLinks.py
Normal file
89
src/modules/PeerShareLinks.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from .ConnectionString import ConnectionString
|
||||
from .PeerShareLink import PeerShareLink
|
||||
import sqlalchemy as db
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
"""
|
||||
Peer Share Links
|
||||
"""
|
||||
class PeerShareLinks:
|
||||
def __init__(self, DashboardConfig, WireguardConfigurations):
|
||||
self.Links: list[PeerShareLink] = []
|
||||
self.engine = db.create_engine(ConnectionString("wgdashboard"))
|
||||
self.metadata = db.MetaData()
|
||||
self.peerShareLinksTable = db.Table(
|
||||
'PeerShareLinks', self.metadata,
|
||||
db.Column('ShareID', db.String(255), nullable=False, primary_key=True),
|
||||
db.Column('Configuration', db.String(255), nullable=False),
|
||||
db.Column('Peer', db.String(255), nullable=False),
|
||||
db.Column('ExpireDate', (db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP)),
|
||||
db.Column('SharedDate', (db.DATETIME if DashboardConfig.GetConfig("Database", "type")[1] == 'sqlite' else db.TIMESTAMP),
|
||||
server_default=db.func.now()),
|
||||
)
|
||||
self.metadata.create_all(self.engine)
|
||||
self.__getSharedLinks()
|
||||
self.wireguardConfigurations = WireguardConfigurations
|
||||
def __getSharedLinks(self):
|
||||
self.Links.clear()
|
||||
with self.engine.connect() as conn:
|
||||
allLinks = conn.execute(
|
||||
self.peerShareLinksTable.select().where(
|
||||
db.or_(self.peerShareLinksTable.columns.ExpireDate.is_(None), self.peerShareLinksTable.columns.ExpireDate > datetime.now())
|
||||
)
|
||||
).mappings().fetchall()
|
||||
for link in allLinks:
|
||||
self.Links.append(PeerShareLink(**link))
|
||||
|
||||
|
||||
|
||||
def getLink(self, Configuration: str, Peer: str) -> list[PeerShareLink]:
|
||||
self.__getSharedLinks()
|
||||
return list(filter(lambda x : x.Configuration == Configuration and x.Peer == Peer, self.Links))
|
||||
|
||||
def getLinkByID(self, ShareID: str) -> list[PeerShareLink]:
|
||||
self.__getSharedLinks()
|
||||
return list(filter(lambda x : x.ShareID == ShareID, self.Links))
|
||||
|
||||
def addLink(self, Configuration: str, Peer: str, ExpireDate: datetime = None) -> tuple[bool, str]:
|
||||
try:
|
||||
newShareID = str(uuid.uuid4())
|
||||
with self.engine.begin() as conn:
|
||||
if len(self.getLink(Configuration, Peer)) > 0:
|
||||
conn.execute(
|
||||
self.peerShareLinksTable.update().values(
|
||||
{
|
||||
"ExpireDate": datetime.now()
|
||||
}
|
||||
).where(db.and_(self.peerShareLinksTable.columns.Configuration == Configuration, self.peerShareLinksTable.columns.Peer == Peer))
|
||||
)
|
||||
|
||||
conn.execute(
|
||||
self.peerShareLinksTable.insert().values(
|
||||
{
|
||||
"ShareID": newShareID,
|
||||
"Configuration": Configuration,
|
||||
"Peer": Peer,
|
||||
"ExpireDate": ExpireDate
|
||||
}
|
||||
)
|
||||
)
|
||||
self.__getSharedLinks()
|
||||
self.wireguardConfigurations.get(Configuration).searchPeer(Peer)[1].getShareLink()
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
return True, newShareID
|
||||
|
||||
def updateLinkExpireDate(self, ShareID, ExpireDate: datetime = None) -> tuple[bool, str]:
|
||||
with self.engine.begin() as conn:
|
||||
updated = conn.execute(
|
||||
self.peerShareLinksTable.update().values(
|
||||
{
|
||||
"ExpireDate": ExpireDate
|
||||
}
|
||||
).returning(self.peerShareLinksTable.c.Configuration, self.peerShareLinksTable.c.Peer)
|
||||
.where(self.peerShareLinksTable.columns.ShareID == ShareID)
|
||||
).mappings().fetchone()
|
||||
self.__getSharedLinks()
|
||||
self.wireguardConfigurations.get(updated.Configuration).searchPeer(updated.Peer)[1].getShareLink()
|
||||
return True, ""
|
||||
204
src/modules/SystemStatus.py
Normal file
204
src/modules/SystemStatus.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import shutil, subprocess, time, threading, psutil
|
||||
from flask import current_app
|
||||
|
||||
class SystemStatus:
|
||||
def __init__(self):
|
||||
self.CPU = CPU()
|
||||
self.MemoryVirtual = Memory('virtual')
|
||||
self.MemorySwap = Memory('swap')
|
||||
self.Disks = Disks()
|
||||
self.NetworkInterfaces = NetworkInterfaces()
|
||||
self.Processes = Processes()
|
||||
def toJson(self):
|
||||
process = [
|
||||
threading.Thread(target=self.CPU.getCPUPercent),
|
||||
threading.Thread(target=self.CPU.getPerCPUPercent),
|
||||
threading.Thread(target=self.NetworkInterfaces.getData)
|
||||
]
|
||||
for p in process:
|
||||
p.start()
|
||||
for p in process:
|
||||
p.join()
|
||||
|
||||
|
||||
return {
|
||||
"CPU": self.CPU,
|
||||
"Memory": {
|
||||
"VirtualMemory": self.MemoryVirtual,
|
||||
"SwapMemory": self.MemorySwap
|
||||
},
|
||||
"Disks": self.Disks,
|
||||
"NetworkInterfaces": self.NetworkInterfaces,
|
||||
"NetworkInterfacesPriority": self.NetworkInterfaces.getInterfacePriorities(),
|
||||
"Processes": self.Processes
|
||||
}
|
||||
|
||||
|
||||
class CPU:
|
||||
def __init__(self):
|
||||
self.cpu_percent: float = 0
|
||||
self.cpu_percent_per_cpu: list[float] = []
|
||||
|
||||
def getCPUPercent(self):
|
||||
try:
|
||||
self.cpu_percent = psutil.cpu_percent(interval=1)
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get CPU Percent error", e)
|
||||
|
||||
def getPerCPUPercent(self):
|
||||
try:
|
||||
self.cpu_percent_per_cpu = psutil.cpu_percent(interval=1, percpu=True)
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get Per CPU Percent error", e)
|
||||
|
||||
def toJson(self):
|
||||
return self.__dict__
|
||||
|
||||
class Memory:
|
||||
def __init__(self, memoryType: str):
|
||||
self.__memoryType__ = memoryType
|
||||
self.total = 0
|
||||
self.available = 0
|
||||
self.percent = 0
|
||||
def getData(self):
|
||||
try:
|
||||
if self.__memoryType__ == "virtual":
|
||||
memory = psutil.virtual_memory()
|
||||
self.available = memory.available
|
||||
else:
|
||||
memory = psutil.swap_memory()
|
||||
self.available = memory.free
|
||||
self.total = memory.total
|
||||
|
||||
self.percent = memory.percent
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get Memory percent error", e)
|
||||
def toJson(self):
|
||||
self.getData()
|
||||
return self.__dict__
|
||||
|
||||
class Disks:
|
||||
def __init__(self):
|
||||
self.disks : list[Disk] = []
|
||||
def getData(self):
|
||||
try:
|
||||
self.disks = list(map(lambda x : Disk(x.mountpoint), psutil.disk_partitions()))
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get Disk percent error", e)
|
||||
def toJson(self):
|
||||
self.getData()
|
||||
return self.disks
|
||||
|
||||
class Disk:
|
||||
def __init__(self, mountPoint: str):
|
||||
self.total = 0
|
||||
self.used = 0
|
||||
self.free = 0
|
||||
self.percent = 0
|
||||
self.mountPoint = mountPoint
|
||||
def getData(self):
|
||||
try:
|
||||
disk = psutil.disk_usage(self.mountPoint)
|
||||
self.total = disk.total
|
||||
self.free = disk.free
|
||||
self.used = disk.used
|
||||
self.percent = disk.percent
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get Disk percent error", e)
|
||||
def toJson(self):
|
||||
self.getData()
|
||||
return self.__dict__
|
||||
|
||||
class NetworkInterfaces:
|
||||
def __init__(self):
|
||||
self.interfaces = {}
|
||||
|
||||
def getInterfacePriorities(self):
|
||||
if shutil.which("ip"):
|
||||
result = subprocess.check_output(["ip", "route", "show"]).decode()
|
||||
priorities = {}
|
||||
for line in result.splitlines():
|
||||
if "metric" in line and "dev" in line:
|
||||
parts = line.split()
|
||||
dev = parts[parts.index("dev")+1]
|
||||
metric = int(parts[parts.index("metric")+1])
|
||||
if dev not in priorities:
|
||||
priorities[dev] = metric
|
||||
return priorities
|
||||
return {}
|
||||
|
||||
def getData(self):
|
||||
self.interfaces.clear()
|
||||
try:
|
||||
network = psutil.net_io_counters(pernic=True, nowrap=True)
|
||||
for i in network.keys():
|
||||
self.interfaces[i] = network[i]._asdict()
|
||||
time.sleep(1)
|
||||
network = psutil.net_io_counters(pernic=True, nowrap=True)
|
||||
for i in network.keys():
|
||||
self.interfaces[i]['realtime'] = {
|
||||
'sent': round((network[i].bytes_sent - self.interfaces[i]['bytes_sent']) / 1024 / 1024, 4),
|
||||
'recv': round((network[i].bytes_recv - self.interfaces[i]['bytes_recv']) / 1024 / 1024, 4)
|
||||
}
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get network error", e)
|
||||
|
||||
def toJson(self):
|
||||
return self.interfaces
|
||||
|
||||
class Process:
|
||||
def __init__(self, name, command, pid, percent):
|
||||
self.name = name
|
||||
self.command = command
|
||||
self.pid = pid
|
||||
self.percent = percent
|
||||
def toJson(self):
|
||||
return self.__dict__
|
||||
|
||||
class Processes:
|
||||
def __init__(self):
|
||||
self.CPU_Top_10_Processes: list[Process] = []
|
||||
self.Memory_Top_10_Processes: list[Process] = []
|
||||
def getData(self):
|
||||
try:
|
||||
processes = list(psutil.process_iter())
|
||||
|
||||
cpu_processes = []
|
||||
memory_processes = []
|
||||
|
||||
for proc in processes:
|
||||
try:
|
||||
name = proc.name()
|
||||
cmdline = " ".join(proc.cmdline())
|
||||
pid = proc.pid
|
||||
cpu_percent = proc.cpu_percent()
|
||||
mem_percent = proc.memory_percent()
|
||||
|
||||
# Create Process object for CPU and memory tracking
|
||||
cpu_process = Process(name, cmdline, pid, cpu_percent)
|
||||
mem_process = Process(name, cmdline, pid, mem_percent)
|
||||
|
||||
cpu_processes.append(cpu_process)
|
||||
memory_processes.append(mem_process)
|
||||
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
# Skip processes we can’t access or that no longer exist
|
||||
continue
|
||||
|
||||
# Sort by CPU and memory usage (descending order)
|
||||
cpu_sorted = sorted(cpu_processes, key=lambda p: p.percent, reverse=True)
|
||||
mem_sorted = sorted(memory_processes, key=lambda p: p.percent, reverse=True)
|
||||
|
||||
# Get top 20 processes for each
|
||||
self.CPU_Top_10_Processes = cpu_sorted[:20]
|
||||
self.Memory_Top_10_Processes = mem_sorted[:20]
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error("Get processes error", e)
|
||||
|
||||
def toJson(self):
|
||||
self.getData()
|
||||
return {
|
||||
"cpu_top_10": self.CPU_Top_10_Processes,
|
||||
"memory_top_10": self.Memory_Top_10_Processes
|
||||
}
|
||||
104
src/modules/Utilities.py
Normal file
104
src/modules/Utilities.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import re, ipaddress
|
||||
import subprocess
|
||||
|
||||
|
||||
def RegexMatch(regex, text) -> bool:
|
||||
"""
|
||||
Regex Match
|
||||
@param regex: Regex patter
|
||||
@param text: Text to match
|
||||
@return: Boolean indicate if the text match the regex pattern
|
||||
"""
|
||||
pattern = re.compile(regex)
|
||||
return pattern.search(text) is not None
|
||||
|
||||
def GetRemoteEndpoint() -> str:
|
||||
"""
|
||||
Using socket to determine default interface IP address. Thanks, @NOXICS
|
||||
@return:
|
||||
"""
|
||||
import socket
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
||||
s.connect(("1.1.1.1", 80)) # Connecting to a public IP
|
||||
wgd_remote_endpoint = s.getsockname()[0]
|
||||
return str(wgd_remote_endpoint)
|
||||
|
||||
|
||||
def StringToBoolean(value: str):
|
||||
"""
|
||||
Convert string boolean to boolean
|
||||
@param value: Boolean value in string came from Configuration file
|
||||
@return: Boolean value
|
||||
"""
|
||||
return (value.strip().replace(" ", "").lower() in
|
||||
("yes", "true", "t", "1", 1))
|
||||
|
||||
def ValidateIPAddressesWithRange(ips: str) -> bool:
|
||||
s = ips.replace(" ", "").split(",")
|
||||
for ip in s:
|
||||
try:
|
||||
ipaddress.ip_network(ip)
|
||||
except ValueError as e:
|
||||
return False
|
||||
return True
|
||||
|
||||
def ValidateIPAddresses(ips) -> bool:
|
||||
s = ips.replace(" ", "").split(",")
|
||||
for ip in s:
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError as e:
|
||||
return False
|
||||
return True
|
||||
|
||||
def ValidateDNSAddress(addresses) -> tuple[bool, str]:
|
||||
s = addresses.replace(" ", "").split(",")
|
||||
for address in s:
|
||||
if not ValidateIPAddresses(address) and not RegexMatch(
|
||||
r"(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z][a-z]{0,61}[a-z]", address):
|
||||
return False, f"{address} does not appear to be an valid DNS address"
|
||||
return True, ""
|
||||
|
||||
def ValidateEndpointAllowedIPs(IPs) -> tuple[bool, str] | tuple[bool, None]:
|
||||
ips = IPs.replace(" ", "").split(",")
|
||||
for ip in ips:
|
||||
try:
|
||||
ipaddress.ip_network(ip, strict=False)
|
||||
except ValueError as e:
|
||||
return False, str(e)
|
||||
return True, None
|
||||
|
||||
def GenerateWireguardPublicKey(privateKey: str) -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
publicKey = subprocess.check_output(f"wg pubkey", input=privateKey.encode(), shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
return True, publicKey.decode().strip('\n')
|
||||
except subprocess.CalledProcessError:
|
||||
return False, None
|
||||
|
||||
def GenerateWireguardPrivateKey() -> tuple[bool, str] | tuple[bool, None]:
|
||||
try:
|
||||
publicKey = subprocess.check_output(f"wg genkey", shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
return True, publicKey.decode().strip('\n')
|
||||
except subprocess.CalledProcessError:
|
||||
return False, None
|
||||
|
||||
def ValidatePasswordStrength(password: str) -> tuple[bool, str] | tuple[bool, None]:
|
||||
# Rules:
|
||||
# - Must be over 8 characters & numbers
|
||||
# - Must contain at least 1 Uppercase & Lowercase letters
|
||||
# - Must contain at least 1 Numbers (0-9)
|
||||
# - Must contain at least 1 special characters from $&+,:;=?@#|'<>.-^*()%!~_-
|
||||
if len(password) < 8:
|
||||
return False, "Password must be 8 characters or more"
|
||||
if not re.search(r'[a-z]', password):
|
||||
return False, "Password must contain at least 1 lowercase character"
|
||||
if not re.search(r'[A-Z]', password):
|
||||
return False, "Password must contain at least 1 uppercase character"
|
||||
if not re.search(r'\d', password):
|
||||
return False, "Password must contain at least 1 number"
|
||||
if not re.search(r'[$&+,:;=?@#|\'<>.\-^*()%!~_-]', password):
|
||||
return False, "Password must contain at least 1 special character from $&+,:;=?@#|'<>.-^*()%!~_-"
|
||||
|
||||
return True, None
|
||||
1236
src/modules/WireguardConfiguration.py
Normal file
1236
src/modules/WireguardConfiguration.py
Normal file
File diff suppressed because it is too large
Load Diff
21
src/modules/WireguardConfigurationInfo.py
Normal file
21
src/modules/WireguardConfigurationInfo.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class OverridePeerSettingsClass(BaseModel):
|
||||
DNS: str = ''
|
||||
EndpointAllowedIPs: str = ''
|
||||
MTU: str | int = ''
|
||||
PersistentKeepalive: int | str = ''
|
||||
PeerRemoteEndpoint: str = ''
|
||||
ListenPort: int | str = ''
|
||||
|
||||
class PeerGroupsClass(BaseModel):
|
||||
GroupName: str = ''
|
||||
Description: str = ''
|
||||
BackgroundColor: str = ''
|
||||
Icon: str = ''
|
||||
Peers: list[str] = []
|
||||
|
||||
class WireguardConfigurationInfo(BaseModel):
|
||||
Description: str = ''
|
||||
OverridePeerSettings: OverridePeerSettingsClass = OverridePeerSettingsClass(**{})
|
||||
PeerGroups: dict[str, PeerGroupsClass] = {}
|
||||
@@ -1,9 +1,17 @@
|
||||
bcrypt
|
||||
ifcfg
|
||||
psutil
|
||||
pyotp
|
||||
Flask
|
||||
flask-cors
|
||||
icmplib
|
||||
gunicorn
|
||||
requests
|
||||
bcrypt==5.0.0
|
||||
ifcfg==0.24
|
||||
psutil==7.1.3
|
||||
pyotp==2.9.0
|
||||
Flask==3.1.2
|
||||
flask-cors==6.0.1
|
||||
icmplib==3.0.4
|
||||
gunicorn==23.0.0
|
||||
requests==2.32.5
|
||||
tcconfig==0.30.1
|
||||
sqlalchemy==2.0.44
|
||||
sqlalchemy_utils==0.42.0
|
||||
psycopg==3.2.12
|
||||
PyMySQL==1.1.2
|
||||
tzlocal==5.3.1
|
||||
python-jose==3.5.0
|
||||
pydantic==2.12.3
|
||||
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
import{_ as r,c as i,d as o,w as e,j as l,a as t,T as _,i as a,l as d,S as u}from"./index-DeLT-ag4.js";const m={name:"configuration"},p={class:"mt-md-5 mt-3 text-body"};function f(x,h,k,w,$,v){const n=l("RouterView");return t(),i("div",p,[o(n,null,{default:e(({Component:s,route:c})=>[o(_,{name:"fade2",mode:"out-in"},{default:e(()=>[(t(),a(u,null,{default:e(()=>[(t(),a(d(s),{key:c.path}))]),_:2},1024))]),_:2},1024)]),_:1})])}const B=r(m,[["render",f]]);export{B as default};
|
||||
@@ -1 +0,0 @@
|
||||
.confirmationContainer[data-v-a575be12]{background-color:#00000087;z-index:9999;backdrop-filter:blur(1px);-webkit-backdrop-filter:blur(1px)}.list1-enter-active[data-v-a575be12]{transition-delay:var(--6919ade8)!important}.card[data-v-1f718118],.title[data-v-1f718118]{width:100%}@media screen and (min-width: 700px){.card[data-v-1f718118],.title[data-v-1f718118]{width:700px}}.animate__fadeInUp[data-v-1f718118]{animation-timing-function:cubic-bezier(.42,0,.22,1)}.list1-move[data-v-1f718118],.list1-enter-active[data-v-1f718118],.list1-leave-active[data-v-1f718118]{transition:all .5s cubic-bezier(.42,0,.22,1)}.list1-enter-from[data-v-1f718118],.list1-leave-to[data-v-1f718118]{opacity:0;transform:translateY(30px)}.list1-leave-active[data-v-1f718118]{width:100%;position:absolute}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.fade-enter-active[data-v-a85a04a5]{transition-delay:var(--1d5189b2)!important}.configurationListTitle{.btn[data-v-16b5ab33]{border-radius:50%!important}}
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
import{$ as w,r as c,H as x,D as B,o as _,a as l,c as b,b as t,d as o,n as D,m as $,s as N,B as m,i as v,q as M,g as T}from"./index-DeLT-ag4.js";import{L as s}from"./localeText-Bm68I_nB.js";const I={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0 overflow-y-scroll"},R={class:"container d-flex h-100 w-100"},S={class:"m-auto modal-dialog-centered dashboardModal",style:{width:"700px"}},V={class:"card rounded-3 shadow flex-grow-1 bg-danger-subtle border-danger-subtle",id:"deleteConfigurationContainer"},A={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-0"},L={class:"mb-0"},P={class:"card-body px-4 text-muted"},W={class:"mb-0"},q={key:0},z={key:1},E={key:2,class:"d-flex align-items-center gap-2"},G=["placeholder"],H=["disabled"],J={__name:"deleteConfiguration",emits:["backup"],setup(O,{emit:k}){const i=w().params.id,g=c(""),h=x(),p=B(),r=c(!1),y=()=>{clearInterval(p.Peers.RefreshInterval),r.value=!0,M("/api/deleteWireguardConfiguration",{Name:i},n=>{n.status?(h.push("/"),p.newMessage("Server","Configuration deleted","success")):r.value=!1})},u=c(!0),d=c([]),f=()=>{u.value=!0,T("/api/getWireguardConfigurationBackup",{configurationName:i},n=>{d.value=n.data,u.value=!1})};_(()=>{f()});const C=k;return(n,e)=>(l(),b("div",I,[t("div",R,[t("div",S,[t("div",V,[t("div",A,[t("h5",L,[o(s,{t:"Are you sure to delete this configuration?"})]),t("button",{type:"button",class:"btn-close ms-auto",onClick:e[0]||(e[0]=a=>n.$emit("close"))})]),t("div",P,[t("p",W,[o(s,{t:"Once you deleted this configuration:"})]),t("ul",null,[t("li",null,[o(s,{t:"All connected peers will get disconnected"})]),t("li",null,[o(s,{t:"Both configuration file (.conf) and database table related to this configuration will get deleted"})])]),t("div",{class:D(["alert",[u.value?"alert-secondary":d.value.length>0?"alert-success":"alert-danger"]])},[u.value?(l(),b("div",q,[e[5]||(e[5]=t("i",{class:"bi bi-search me-2"},null,-1)),o(s,{t:"Checking backups..."})])):d.value.length>0?(l(),b("div",z,[e[6]||(e[6]=t("i",{class:"bi bi-check-circle-fill me-2"},null,-1)),o(s,{t:"This configuration have "+d.value.length+" backups"},null,8,["t"])])):(l(),b("div",E,[e[9]||(e[9]=t("i",{class:"bi bi-x-circle-fill me-2"},null,-1)),o(s,{t:"This configuration have no backup"}),t("a",{role:"button",onClick:e[1]||(e[1]=a=>C("backup")),class:"ms-auto btn btn-sm btn-primary rounded-3"},[e[7]||(e[7]=t("i",{class:"bi bi-clock-history me-2"},null,-1)),o(s,{t:"Backup"})]),t("a",{role:"button",onClick:e[2]||(e[2]=a=>f()),class:"btn btn-sm btn-primary rounded-3"},e[8]||(e[8]=[t("i",{class:"bi bi-arrow-clockwise"},null,-1)]))]))],2),e[11]||(e[11]=t("hr",null,null,-1)),t("p",null,[o(s,{t:"If you're sure, please type in the configuration name below and click Delete"})]),$(t("input",{class:"form-control rounded-3 mb-3",placeholder:m(i),"onUpdate:modelValue":e[3]||(e[3]=a=>g.value=a),type:"text"},null,8,G),[[N,g.value]]),t("button",{class:"btn btn-danger w-100",onClick:e[4]||(e[4]=a=>y()),disabled:g.value!==m(i)||r.value},[e[10]||(e[10]=t("i",{class:"bi bi-trash-fill me-2 rounded-3"},null,-1)),r.value?(l(),v(s,{key:1,t:"Deleting..."})):(l(),v(s,{key:0,t:"Delete"}))],8,H)])])])])]))}};export{J as default};
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
@media screen and (max-width: 567px){.inputGroup{&[data-v-4be4f48a]{flex-direction:column}h3[data-v-4be4f48a]{transform:rotate(90deg)}}}
|
||||
@@ -1 +0,0 @@
|
||||
@media screen and (max-width: 768px){.navbar-container[data-v-83a7789f]{position:absolute;z-index:1000;animation-duration:.4s;animation-fill-mode:both;display:none;animation-timing-function:cubic-bezier(.82,.58,.17,.9)}.navbar-container.active[data-v-83a7789f]{animation-direction:normal;display:block!important;animation-name:zoomInFade-83a7789f}}.navbar-container[data-v-83a7789f]{height:100vh}@supports (height: 100dvh){@media screen and (max-width: 768px){.navbar-container[data-v-83a7789f]{height:calc(100dvh - 50px)}}}@keyframes zoomInFade-83a7789f{0%{opacity:0;transform:translateY(60px);filter:blur(3px)}to{opacity:1;transform:translateY(0);filter:blur(0px)}}.messageCentre[data-v-ce114a8b]{top:1rem;right:1rem;width:calc(100% - 2rem)}main[data-v-ce114a8b]{height:100vh}@supports (height: 100dvh){@media screen and (max-width: 768px){main[data-v-ce114a8b]{height:calc(100dvh - 50px)}}}
|
||||
1
src/static/app/dist/assets/index-ClszkkhL.js
vendored
1
src/static/app/dist/assets/index-ClszkkhL.js
vendored
File diff suppressed because one or more lines are too long
15
src/static/app/dist/assets/index-D2eeEsuX.css
vendored
15
src/static/app/dist/assets/index-D2eeEsuX.css
vendored
File diff suppressed because one or more lines are too long
44
src/static/app/dist/assets/index-DeLT-ag4.js
vendored
44
src/static/app/dist/assets/index-DeLT-ag4.js
vendored
File diff suppressed because one or more lines are too long
1
src/static/app/dist/assets/index-L60y6kc9.js
vendored
1
src/static/app/dist/assets/index-L60y6kc9.js
vendored
@@ -1 +0,0 @@
|
||||
function f(e){return e.includes(":")?6:e.includes(".")?4:0}function b(e){const i=f(e);if(!i)throw new Error(`Invalid IP address: ${e}`);let n=0n,o=0n;const r=Object.create(null);if(i===4)for(const s of e.split(".").map(BigInt).reverse())n+=s*2n**o,o+=8n;else{if(e.includes(".")&&(r.ipv4mapped=!0,e=e.split(":").map(t=>{if(t.includes(".")){const[c,l,d,a]=t.split(".").map($=>Number($).toString(16).padStart(2,"0"));return`${c}${l}:${d}${a}`}else return t}).join(":")),e.includes("%")){let t;[,e,t]=/(.+)%(.+)/.exec(e)||[],r.scopeid=t}const s=e.split(":"),u=s.indexOf("");if(u!==-1)for(;s.length<8;)s.splice(u,0,"");for(const t of s.map(c=>BigInt(parseInt(c||"0",16))).reverse())n+=t*2n**o,o+=16n}return r.number=n,r.version=i,r}const p={4:32,6:128},I=e=>e.includes("/")?f(e):0;function m(e){const i=I(e),n=Object.create(null);if(i)n.cidr=e,n.version=i;else{const a=f(e);if(a)n.cidr=`${e}/${p[a]}`,n.version=a;else throw new Error(`Network is not a CIDR or IP: ${e}`)}const[o,r]=n.cidr.split("/");if(!/^[0-9]+$/.test(r))throw new Error(`Network is not a CIDR or IP: ${e}`);n.prefix=r,n.single=r===String(p[n.version]);const{number:s,version:u}=b(o),t=p[u],c=s.toString(2).padStart(t,"0"),l=Number(t-r),d=c.substring(0,t-l);return n.start=BigInt(`0b${d}${"0".repeat(l)}`),n.end=BigInt(`0b${d}${"1".repeat(l)}`),n}export{m as p};
|
||||
@@ -1 +0,0 @@
|
||||
import{_ as t,G as e,t as o}from"./index-DeLT-ag4.js";const s={name:"localeText",props:{t:""},computed:{getLocaleText(){return e(this.t)}}};function a(c,r,n,p,_,i){return o(this.getLocaleText)}const x=t(s,[["render",a]]);export{x as L};
|
||||
BIN
src/static/app/dist/assets/logo-XE_HdY0J.png
vendored
BIN
src/static/app/dist/assets/logo-XE_HdY0J.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -1 +0,0 @@
|
||||
import{L as c}from"./localeText-Bm68I_nB.js";import{d as n}from"./dayjs.min-BXJG8oxU.js";import{_ as d,a as r,c as m,b as s,d as i,f as t,t as e,n as l,j as _}from"./index-DeLT-ag4.js";const p={name:"message",methods:{dayjs:n},components:{LocaleText:c},props:{message:Object},mounted(){setTimeout(()=>{this.message.show=!1},5e3)}},g=["id"],h={class:"card-body"},f={class:"d-flex"},x={class:"fw-bold d-block",style:{"text-transform":"uppercase"}},u={class:"ms-auto"};function b(y,v,w,T,j,a){const o=_("LocaleText");return r(),m("div",{class:l(["card shadow rounded-3 position-relative message ms-auto",{"text-bg-danger":this.message.type==="danger","text-bg-success":this.message.type==="success","text-bg-warning":this.message.type==="warning"}]),id:this.message.id},[s("div",h,[s("div",f,[s("small",x,[i(o,{t:"FROM "}),t(" "+e(this.message.from),1)]),s("small",u,e(a.dayjs().format("hh:mm A")),1)]),t(" "+e(this.message.content),1)])],10,g)}const M=d(p,[["render",b],["__scopeId","data-v-f50b8f0c"]]);export{M};
|
||||
@@ -1 +0,0 @@
|
||||
.message[data-v-f50b8f0c]{width:100%}@media screen and (min-width: 576px){.message[data-v-f50b8f0c]{width:400px}}
|
||||
File diff suppressed because one or more lines are too long
10
src/static/app/dist/assets/osmap-BpsUKiQ8.js
vendored
10
src/static/app/dist/assets/osmap-BpsUKiQ8.js
vendored
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.slide-up-enter-active[data-v-fcd3ae95],.slide-up-leave-active[data-v-fcd3ae95]{transition:all .2s cubic-bezier(.42,0,.22,1)}.slide-up-enter-from[data-v-fcd3ae95],.slide-up-leave-to[data-v-fcd3ae95]{opacity:0;transform:scale(.9)}@keyframes spin-fcd3ae95{0%{transform:rotate(0)}to{transform:rotate(360deg)}}#check[data-v-fcd3ae95]{animation:cubic-bezier(.42,0,.22,1.3) .7s spin-fcd3ae95}
|
||||
@@ -1 +0,0 @@
|
||||
import{_ as f,D as m,r as _,a as s,c as a,b as e,d as l,w as g,T as h}from"./index-DeLT-ag4.js";import{L as v}from"./localeText-Bm68I_nB.js";const y={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0"},x={class:"container d-flex h-100 w-100"},w={class:"m-auto modal-dialog-centered dashboardModal justify-content-center"},C={class:"card rounded-3 shadow w-100"},k={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-0"},F={class:"mb-0"},T={class:"card-body p-4"},D={class:"d-flex"},S=["disabled"],B={key:0,class:"d-block"},M={key:1,class:"d-block",id:"check"},G=["value"],L={__name:"peerConfigurationFile",props:{configurationFile:String},emits:["close"],setup(i,{emit:r}){const c=r,d=i,n=m(),o=_(!1),u=async()=>{navigator.clipboard&&navigator.clipboard.writeText?navigator.clipboard.writeText(d.configurationFile).then(()=>{o.value=!0,setTimeout(()=>{o.value=!1},3e3)}).catch(()=>{n.newMessage("WGDashboard","Failed to copy","danger")}):(document.querySelector("#peerConfigurationFile").select(),document.execCommand("copy")?(o.value=!0,setTimeout(()=>{o.value=!1},3e3)):n.newMessage("WGDashboard","Failed to copy","danger"))};return(p,t)=>(s(),a("div",y,[e("div",x,[e("div",w,[e("div",C,[e("div",k,[e("h4",F,[l(v,{t:"Peer Configuration File"})]),e("button",{type:"button",class:"btn-close ms-auto",onClick:t[0]||(t[0]=b=>c("close"))})]),e("div",T,[e("div",D,[e("button",{onClick:t[1]||(t[1]=b=>u()),disabled:o.value,class:"ms-auto btn bg-primary-subtle border-primary-subtle text-primary-emphasis rounded-3 position-relative"},[l(h,{name:"slide-up",mode:"out-in"},{default:g(()=>[o.value?(s(),a("span",M,t[3]||(t[3]=[e("i",{class:"bi bi-check-circle-fill"},null,-1)]))):(s(),a("span",B,t[2]||(t[2]=[e("i",{class:"bi bi-clipboard-fill"},null,-1)])))]),_:1})],8,S)]),e("textarea",{style:{height:"300px"},class:"form-control w-100 rounded-3 mt-2",id:"peerConfigurationFile",value:i.configurationFile},null,8,G)])])])])]))}},W=f(L,[["__scopeId","data-v-fcd3ae95"]]);export{W as default};
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.list-move[data-v-6d5fc831],.list-enter-active[data-v-6d5fc831],.list-leave-active[data-v-6d5fc831]{transition:all .3s ease}.list-enter-from[data-v-6d5fc831],.list-leave-to[data-v-6d5fc831]{opacity:0;transform:translateY(10px)}.list-leave-active[data-v-6d5fc831]{position:absolute}.peerSettingContainer[data-v-ddffd6ec]{background-color:#00000060;z-index:9998}div[data-v-ddffd6ec]{transition:.2s ease-in-out}.inactiveField[data-v-ddffd6ec]{opacity:.4}.card[data-v-ddffd6ec]{max-height:100%}
|
||||
@@ -1 +0,0 @@
|
||||
import{S as p,a as b}from"./schedulePeerJob-B4AuR_O4.js";import{_ as h,W as u,p as m,j as i,a as o,c as a,b as e,d as r,w as _,F as v,h as f,i as J,e as x,k as g}from"./index-DeLT-ag4.js";import{L as w}from"./localeText-Bm68I_nB.js";import"./vue-datepicker-BYU1fpEw.js";import"./dayjs.min-BXJG8oxU.js";const P={name:"peerJobs",setup(){return{store:u()}},props:{selectedPeer:Object},components:{LocaleText:w,SchedulePeerJob:p,ScheduleDropdown:b},data(){return{}},methods:{deleteJob(d){this.selectedPeer.jobs=this.selectedPeer.jobs.filter(t=>t.JobID!==d.JobID)},addJob(){this.selectedPeer.jobs.unshift(JSON.parse(JSON.stringify({JobID:m().toString(),Configuration:this.selectedPeer.configuration.Name,Peer:this.selectedPeer.id,Field:this.store.PeerScheduleJobs.dropdowns.Field[0].value,Operator:this.store.PeerScheduleJobs.dropdowns.Operator[0].value,Value:"",CreationDate:"",ExpireDate:"",Action:this.store.PeerScheduleJobs.dropdowns.Action[0].value})))}}},S={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0 overflow-y-scroll"},y={class:"container d-flex h-100 w-100"},$={class:"m-auto modal-dialog-centered dashboardModal"},C={class:"card rounded-3 shadow",style:{width:"700px"}},D={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-2"},j={class:"mb-0 fw-normal"},k={class:"card-body px-4 pb-4 pt-2 position-relative"},N={class:"d-flex align-items-center mb-3"},T={class:"card shadow-sm",key:"none",style:{height:"153px"}},I={class:"card-body text-muted text-center d-flex"},L={class:"m-auto"};function O(d,t,B,F,V,A){const n=i("LocaleText"),l=i("SchedulePeerJob");return o(),a("div",S,[e("div",y,[e("div",$,[e("div",C,[e("div",D,[e("h4",j,[r(n,{t:"Schedule Jobs"})]),e("button",{type:"button",class:"btn-close ms-auto",onClick:t[0]||(t[0]=s=>this.$emit("close"))})]),e("div",k,[e("div",N,[e("button",{class:"btn bg-primary-subtle border-1 border-primary-subtle text-primary-emphasis rounded-3 shadow",onClick:t[1]||(t[1]=s=>this.addJob())},[t[3]||(t[3]=e("i",{class:"bi bi-plus-lg me-2"},null,-1)),r(n,{t:"Job"})])]),r(g,{name:"schedulePeerJobTransition",tag:"div",class:"position-relative"},{default:_(()=>[(o(!0),a(v,null,f(this.selectedPeer.jobs,(s,E)=>(o(),J(l,{onRefresh:t[2]||(t[2]=c=>this.$emit("refresh")),onDelete:c=>this.deleteJob(s),dropdowns:this.store.PeerScheduleJobs.dropdowns,key:s.JobID,pjob:s},null,8,["onDelete","dropdowns","pjob"]))),128)),this.selectedPeer.jobs.length===0?(o(),a("div",T,[e("div",I,[e("h6",L,[r(n,{t:"This peer does not have any job yet."})])])])):x("",!0)]),_:1})])])])])])}const z=h(P,[["render",O],["__scopeId","data-v-5bbdd42b"]]);export{z as default};
|
||||
@@ -1 +0,0 @@
|
||||
import{S as b}from"./schedulePeerJob-B4AuR_O4.js";import{_ as g,W as v,p as f,j as l,a as o,c as t,b as e,d as i,F as p,h,t as _,e as y,i as x}from"./index-DeLT-ag4.js";import{L as J}from"./localeText-Bm68I_nB.js";import"./vue-datepicker-BYU1fpEw.js";import"./dayjs.min-BXJG8oxU.js";const w={name:"peerJobsAllModal",setup(){return{store:v()}},components:{LocaleText:J,SchedulePeerJob:b},props:{configurationPeers:Array[Object]},methods:{getuuid(){return f()}},computed:{getAllJobs(){return this.configurationPeers.filter(r=>r.jobs.length>0)}}},A={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0 overflow-y-scroll"},$={class:"container d-flex h-100 w-100"},k={class:"m-auto modal-dialog-centered dashboardModal"},S={class:"card rounded-3 shadow",style:{width:"700px"}},L={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-2"},j={class:"mb-0 fw-normal"},P={class:"card-body px-4 pb-4 pt-2"},C={key:0,class:"accordion",id:"peerJobsLogsModalAccordion"},M={class:"accordion-header"},B=["data-bs-target"],N={key:0},D={class:"text-muted"},T=["id"],V={class:"accordion-body"},F={key:1,class:"card shadow-sm",style:{height:"153px"}},O={class:"card-body text-muted text-center d-flex"},W={class:"m-auto"};function E(r,s,I,R,q,z){const n=l("LocaleText"),u=l("SchedulePeerJob");return o(),t("div",A,[e("div",$,[e("div",k,[e("div",S,[e("div",L,[e("h4",j,[i(n,{t:"All Active Jobs"})]),e("button",{type:"button",class:"btn-close ms-auto",onClick:s[0]||(s[0]=a=>this.$emit("close"))})]),e("div",P,[this.getAllJobs.length>0?(o(),t("div",C,[(o(!0),t(p,null,h(this.getAllJobs,(a,d)=>(o(),t("div",{class:"accordion-item",key:a.id},[e("h2",M,[e("button",{class:"accordion-button collapsed",type:"button","data-bs-toggle":"collapse","data-bs-target":"#collapse_"+d},[e("small",null,[e("strong",null,[a.name?(o(),t("span",N,_(a.name)+" • ",1)):y("",!0),e("samp",D,_(a.id),1)])])],8,B)]),e("div",{id:"collapse_"+d,class:"accordion-collapse collapse","data-bs-parent":"#peerJobsLogsModalAccordion"},[e("div",V,[(o(!0),t(p,null,h(a.jobs,c=>(o(),x(u,{onDelete:s[1]||(s[1]=m=>this.$emit("refresh")),onRefresh:s[2]||(s[2]=m=>this.$emit("refresh")),dropdowns:this.store.PeerScheduleJobs.dropdowns,viewOnly:!0,key:c.JobID,pjob:c},null,8,["dropdowns","pjob"]))),128))])],8,T)]))),128))])):(o(),t("div",F,[e("div",O,[e("span",W,[i(n,{t:"No active job at the moment."})])])]))])])])])])}const X=g(w,[["render",E]]);export{X as default};
|
||||
File diff suppressed because one or more lines are too long
19
src/static/app/dist/assets/peerList-BnWkSiVa.js
vendored
19
src/static/app/dist/assets/peerList-BnWkSiVa.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
import{b as a}from"./browser-CjSdxGTc.js";import{L as n}from"./localeText-Bm68I_nB.js";import{_ as c,j as r,a as d,c as i,b as e,d as l}from"./index-DeLT-ag4.js";const p={name:"peerQRCode",components:{LocaleText:n},props:{peerConfigData:String},mounted(){a.toCanvas(document.querySelector("#qrcode"),this.peerConfigData,o=>{o&&console.error(o)})}},_={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0"},m={class:"container d-flex h-100 w-100"},h={class:"m-auto modal-dialog-centered dashboardModal justify-content-center"},u={class:"card rounded-3 shadow"},f={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4 pb-0"},b={class:"mb-0"},v={class:"card-body"},C={id:"qrcode",class:"rounded-3 shadow",ref:"qrcode"};function g(o,t,x,$,w,q){const s=r("LocaleText");return d(),i("div",_,[e("div",m,[e("div",h,[e("div",u,[e("div",f,[e("h4",b,[l(s,{t:"QR Code"})]),e("button",{type:"button",class:"btn-close ms-auto",onClick:t[0]||(t[0]=y=>this.$emit("close"))})]),e("div",v,[e("canvas",C,null,512)])])])])])}const Q=c(p,[["render",g]]);export{Q as default};
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.toggleShowKey[data-v-a63ae8cb]{position:absolute;top:35px;right:12px}
|
||||
@@ -1 +0,0 @@
|
||||
import{_ as g,D as f,q as h,j as p,a as s,c as r,b as t,d as o,n as m,i as n,t as _,e as b}from"./index-DeLT-ag4.js";import{d}from"./dayjs.min-BXJG8oxU.js";import{V as y}from"./vue-datepicker-BYU1fpEw.js";import{L as S}from"./localeText-Bm68I_nB.js";const k={name:"peerShareLinkModal",props:{peer:Object},components:{LocaleText:S,VueDatePicker:y},data(){return{dataCopy:void 0,loading:!1}},setup(){return{store:f()}},mounted(){this.dataCopy=JSON.parse(JSON.stringify(this.peer.ShareLink)).at(0)},watch:{"peer.ShareLink":{deep:!0,handler(e,a){a.length!==e.length&&(this.dataCopy=JSON.parse(JSON.stringify(this.peer.ShareLink)).at(0))}}},methods:{startSharing(){this.loading=!0,h("/api/sharePeer/create",{Configuration:this.peer.configuration.Name,Peer:this.peer.id,ExpireDate:d().add(7,"d").format("YYYY-MM-DD HH:mm:ss")},e=>{e.status?(this.peer.ShareLink=e.data,this.dataCopy=e.data.at(0)):this.store.newMessage("Server","Share link failed to create. Reason: "+e.message,"danger"),this.loading=!1})},updateLinkExpireDate(){h("/api/sharePeer/update",this.dataCopy,e=>{e.status?(this.dataCopy=e.data.at(0),this.peer.ShareLink=e.data,this.store.newMessage("Server","Link expire date updated","success")):this.store.newMessage("Server","Link expire date failed to update. Reason: "+e.message,"danger"),this.loading=!1})},stopSharing(){this.loading=!0,this.dataCopy.ExpireDate=d().format("YYYY-MM-DD HH:mm:ss"),this.updateLinkExpireDate()},parseTime(e){e?this.dataCopy.ExpireDate=d(e).format("YYYY-MM-DD HH:mm:ss"):this.dataCopy.ExpireDate=void 0,this.updateLinkExpireDate()}},computed:{getUrl(){const e=this.store.getActiveCrossServer();return e?`${e.host}/${this.$router.resolve({path:"/share",query:{ShareID:this.dataCopy.ShareID}}).href}`:window.location.origin+window.location.pathname+this.$router.resolve({path:"/share",query:{ShareID:this.dataCopy.ShareID}}).href}}},x={class:"peerSettingContainer w-100 h-100 position-absolute top-0 start-0 overflow-y-scroll"},v={class:"container d-flex h-100 w-100"},C={class:"m-auto modal-dialog-centered dashboardModal",style:{width:"500px"}},D={class:"card rounded-3 shadow flex-grow-1"},w={class:"card-header bg-transparent d-flex align-items-center gap-2 border-0 p-4"},L={class:"mb-0"},M={key:0,class:"card-body px-4 pb-4"},Y={key:0},$={class:"mb-3 text-muted"},E=["disabled"],H={key:1},V={class:"d-flex gap-2 mb-4"},N=["href"],P={class:"d-flex flex-column gap-2 mb-3"},O=["disabled"];function T(e,a,U,B,I,c){const i=p("LocaleText"),u=p("VueDatePicker");return s(),r("div",x,[t("div",v,[t("div",C,[t("div",D,[t("div",w,[t("h4",L,[o(i,{t:"Share Peer"})]),t("button",{type:"button",class:"btn-close ms-auto",onClick:a[0]||(a[0]=l=>this.$emit("close"))})]),this.peer.ShareLink?(s(),r("div",M,[this.dataCopy?(s(),r("div",H,[t("div",V,[a[4]||(a[4]=t("i",{class:"bi bi-link-45deg"},null,-1)),t("a",{href:this.getUrl,class:"text-decoration-none",target:"_blank"},_(c.getUrl),9,N)]),t("div",P,[t("small",null,[a[5]||(a[5]=t("i",{class:"bi bi-calendar me-2"},null,-1)),o(i,{t:"Expire At"})]),o(u,{is24:!0,"min-date":new Date,"model-value":this.dataCopy.ExpireDate,"onUpdate:modelValue":this.parseTime,"time-picker-inline":"",format:"yyyy-MM-dd HH:mm:ss","preview-format":"yyyy-MM-dd HH:mm:ss",dark:this.store.Configuration.Server.dashboard_theme==="dark"},null,8,["min-date","model-value","onUpdate:modelValue","dark"])]),t("button",{onClick:a[2]||(a[2]=l=>this.stopSharing()),disabled:this.loading,class:"w-100 btn bg-danger-subtle text-danger-emphasis border-1 border-danger-subtle rounded-3 shadow-sm"},[t("span",{class:m({"animate__animated animate__flash animate__infinite animate__slower":this.loading})},a[6]||(a[6]=[t("i",{class:"bi bi-send-slash-fill me-2"},null,-1)]),2),this.loading?(s(),n(i,{key:0,t:"Stop Sharing..."})):(s(),n(i,{key:1,t:"Stop Sharing"}))],8,O)])):(s(),r("div",Y,[t("h6",$,[o(i,{t:"Currently the peer is not sharing"})]),t("button",{onClick:a[1]||(a[1]=l=>this.startSharing()),disabled:this.loading,class:"w-100 btn bg-success-subtle text-success-emphasis border-1 border-success-subtle rounded-3 shadow-sm"},[t("span",{class:m({"animate__animated animate__flash animate__infinite animate__slower":this.loading})},a[3]||(a[3]=[t("i",{class:"bi bi-send-fill me-2"},null,-1)]),2),this.loading?(s(),n(i,{key:0,t:"Sharing..."})):(s(),n(i,{key:1,t:"Start Sharing"}))],8,E)]))])):b("",!0)])])])])}const R=g(k,[["render",T]]);export{R as default};
|
||||
1
src/static/app/dist/assets/ping-mYQpHqaI.js
vendored
1
src/static/app/dist/assets/ping-mYQpHqaI.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.dropdownIcon[data-v-626f1988]{transition:all .2s ease-in-out}.dropdownIcon.active[data-v-626f1988]{transform:rotate(180deg)}.steps{&[data-v-f0245d51]{transition:all .3s ease-in-out;opacity:.3}&.active[data-v-f0245d51]{opacity:1}}
|
||||
@@ -1 +0,0 @@
|
||||
.btn.disabled[data-v-6a5aba2a]{opacity:1;background-color:#0d6efd17;border-color:transparent}[data-v-8f3f1b93]{font-size:.875rem}input[data-v-8f3f1b93]{padding:.1rem .4rem}input[data-v-8f3f1b93]:disabled{border-color:transparent;background-color:#0d6efd17;color:#0d6efd}.dp__main[data-v-8f3f1b93]{width:auto;flex-grow:1;--dp-input-padding: 2.5px 30px 2.5px 12px;--dp-border-radius: .5rem}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
@media screen and (max-width: 992px){.apiKey-card-body{&[data-v-a76253c8]{flex-direction:column!important;align-items:start!important}div.ms-auto[data-v-a76253c8]{margin-left:0!important}div[data-v-a76253c8]{width:100%;align-items:start!important}small[data-v-a76253c8]{margin-right:auto}}}.apiKey-move[data-v-100ee9f9],.apiKey-enter-active[data-v-100ee9f9],.apiKey-leave-active[data-v-100ee9f9]{transition:all .5s ease}.apiKey-enter-from[data-v-100ee9f9],.apiKey-leave-to[data-v-100ee9f9]{opacity:0;transform:translateY(30px) scale(.9)}.apiKey-leave-active[data-v-100ee9f9]{position:absolute;width:100%}.dropdown-menu[data-v-0f26916d]{width:100%}.list-group{&[data-v-4aa2aed9]:first-child{border-top-left-radius:var(--bs-border-radius-lg);border-top-right-radius:var(--bs-border-radius-lg)}&[data-v-4aa2aed9]:last-child{border-bottom-left-radius:var(--bs-border-radius-lg);border-bottom-right-radius:var(--bs-border-radius-lg)}}
|
||||
1
src/static/app/dist/assets/setup-CwMP0Me6.js
vendored
1
src/static/app/dist/assets/setup-CwMP0Me6.js
vendored
@@ -1 +0,0 @@
|
||||
import{_ as u,D as m,q as p,c as r,b as e,d as o,f as c,t as h,e as f,m as l,s as d,a as i,j as w}from"./index-DeLT-ag4.js";import{L as g}from"./localeText-Bm68I_nB.js";const b={name:"setup",components:{LocaleText:g},setup(){return{store:m()}},data(){return{setup:{username:"",newPassword:"",repeatNewPassword:"",enable_totp:!0},loading:!1,errorMessage:"",done:!1}},computed:{goodToSubmit(){return this.setup.username&&this.setup.newPassword.length>=8&&this.setup.repeatNewPassword.length>=8&&this.setup.newPassword===this.setup.repeatNewPassword}},methods:{submit(){this.loading=!0,p("/api/Welcome_Finish",this.setup,n=>{n.status?(this.done=!0,this.$router.push("/2FASetup")):(document.querySelectorAll("#createAccount input").forEach(s=>s.classList.add("is-invalid")),this.errorMessage=n.message,document.querySelector(".login-container-fluid").scrollTo({top:0,left:0,behavior:"smooth"})),this.loading=!1})}}},_=["data-bs-theme"],x={class:"m-auto text-body",style:{width:"500px"}},v={class:"dashboardLogo display-4"},y={class:"mb-5"},P={key:0,class:"alert alert-danger"},N={class:"d-flex flex-column gap-3"},k={id:"createAccount",class:"d-flex flex-column gap-2"},S={class:"form-group text-body"},T={for:"username",class:"mb-1 text-muted"},C={class:"form-group text-body"},L={for:"password",class:"mb-1 text-muted"},V={class:"form-group text-body"},q={for:"confirmPassword",class:"mb-1 text-muted"},$=["disabled"],A={key:0,class:"d-flex align-items-center w-100"},M={key:1,class:"d-flex align-items-center w-100"};function B(n,s,D,E,U,F){const t=w("LocaleText");return i(),r("div",{class:"container-fluid login-container-fluid d-flex main pt-5 overflow-scroll","data-bs-theme":this.store.Configuration.Server.dashboard_theme},[e("div",x,[e("span",v,[o(t,{t:"Nice to meet you!"})]),e("p",y,[o(t,{t:"Please fill in the following fields to finish setup"}),s[4]||(s[4]=c(" 😊"))]),e("div",null,[e("h3",null,[o(t,{t:"Create an account"})]),this.errorMessage?(i(),r("div",P,h(this.errorMessage),1)):f("",!0),e("div",N,[e("form",k,[e("div",S,[e("label",T,[e("small",null,[o(t,{t:"Enter an username you like"})])]),l(e("input",{type:"text",autocomplete:"username","onUpdate:modelValue":s[0]||(s[0]=a=>this.setup.username=a),class:"form-control",id:"username",name:"username",required:""},null,512),[[d,this.setup.username]])]),e("div",C,[e("label",L,[e("small",null,[o(t,{t:"Enter a password"}),e("code",null,[o(t,{t:"(At least 8 characters and make sure is strong enough!)"})])])]),l(e("input",{type:"password",autocomplete:"new-password","onUpdate:modelValue":s[1]||(s[1]=a=>this.setup.newPassword=a),class:"form-control",id:"password",name:"password",required:""},null,512),[[d,this.setup.newPassword]])]),e("div",V,[e("label",q,[e("small",null,[o(t,{t:"Confirm password"})])]),l(e("input",{type:"password",autocomplete:"confirm-new-password","onUpdate:modelValue":s[2]||(s[2]=a=>this.setup.repeatNewPassword=a),class:"form-control",id:"confirmPassword",name:"confirmPassword",required:""},null,512),[[d,this.setup.repeatNewPassword]])])]),e("button",{class:"btn btn-dark btn-lg mb-5 d-flex btn-brand shadow align-items-center",ref:"signInBtn",disabled:!this.goodToSubmit||this.loading||this.done,onClick:s[3]||(s[3]=a=>this.submit())},[!this.loading&&!this.done?(i(),r("span",A,[o(t,{t:"Next"}),s[5]||(s[5]=e("i",{class:"bi bi-chevron-right ms-auto"},null,-1))])):(i(),r("span",M,[o(t,{t:"Saving..."}),s[6]||(s[6]=e("span",{class:"spinner-border ms-auto spinner-border-sm",role:"status"},[e("span",{class:"visually-hidden"},"Loading...")],-1))]))],8,$)])])])],8,_)}const W=u(b,[["render",B]]);export{W as default};
|
||||
1
src/static/app/dist/assets/share-w3jPIecW.js
vendored
1
src/static/app/dist/assets/share-w3jPIecW.js
vendored
@@ -1 +0,0 @@
|
||||
import{_,r,D as p,g as u,c as m,b as t,d as c,$ as h,a as f,j as b}from"./index-DeLT-ag4.js";import{b as v}from"./browser-CjSdxGTc.js";import{L as y}from"./localeText-Bm68I_nB.js";const g={name:"share",components:{LocaleText:y},async setup(){const o=h(),e=r(!1),i=p(),n=r(""),s=r(void 0),l=r(new Blob);await u("/api/getDashboardTheme",{},d=>{n.value=d.data});const a=o.query.ShareID;return a===void 0||a.length===0?(s.value=void 0,e.value=!0):await u("/api/sharePeer/get",{ShareID:a},d=>{d.status?(s.value=d.data,l.value=new Blob([s.value.file],{type:"text/plain"})):s.value=void 0,e.value=!0}),{store:i,theme:n,peerConfiguration:s,blob:l}},mounted(){this.peerConfiguration&&v.toCanvas(document.querySelector("#qrcode"),this.peerConfiguration.file,o=>{o&&console.error(o)})},methods:{download(){const o=new Blob([this.peerConfiguration.file],{type:"text/plain"}),e=URL.createObjectURL(o),i=`${this.peerConfiguration.fileName}.conf`,n=document.createElement("a");n.href=e,n.download=i,n.click()}},computed:{getBlob(){return URL.createObjectURL(this.blob)}}},w=["data-bs-theme"],x={class:"m-auto text-body",style:{width:"500px"}},C={key:0,class:"text-center position-relative",style:{}},U={class:"position-absolute w-100 h-100 top-0 start-0 d-flex animate__animated animate__fadeInUp",style:{"animation-delay":"0.1s"}},I={class:"m-auto"},L={key:1,class:"d-flex align-items-center flex-column gap-3"},B={class:"h1 dashboardLogo text-center animate__animated animate__fadeInUp"},k={id:"qrcode",class:"rounded-3 shadow animate__animated animate__fadeInUp mb-3",ref:"qrcode"},D={class:"text-muted animate__animated animate__fadeInUp mb-1",style:{"animation-delay":"0.2s"}},R=["download","href"];function j(o,e,i,n,s,l){const a=b("LocaleText");return f(),m("div",{class:"container-fluid login-container-fluid d-flex main pt-5 overflow-scroll","data-bs-theme":this.theme},[t("div",x,[this.peerConfiguration?(f(),m("div",L,[t("div",B,[e[1]||(e[1]=t("h6",null,"WGDashboard",-1)),c(a,{t:"Scan QR Code with the WireGuard App to add peer"})]),t("canvas",k,null,512),t("p",D,[c(a,{t:"or click the button below to download the "}),e[2]||(e[2]=t("samp",null,".conf",-1)),c(a,{t:" file"})]),t("a",{download:this.peerConfiguration.fileName+".conf",href:l.getBlob,class:"btn btn-lg bg-primary-subtle text-primary-emphasis border-1 border-primary-subtle animate__animated animate__fadeInUp shadow-sm",style:{"animation-delay":"0.25s"}},e[3]||(e[3]=[t("i",{class:"bi bi-download"},null,-1)]),8,R)])):(f(),m("div",C,[e[0]||(e[0]=t("div",{class:"animate__animated animate__fadeInUp"},[t("h1",{style:{"font-size":"20rem",filter:"blur(1rem)","animation-duration":"7s"},class:"animate__animated animate__flash animate__infinite"},[t("i",{class:"bi bi-file-binary"})])],-1)),t("div",U,[t("h3",I,[c(a,{t:"Oh no... This link is either expired or invalid."})])])]))])],8,w)}const $=_(g,[["render",j],["__scopeId","data-v-1b44aacd"]]);export{$ as default};
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.dot.inactive[data-v-ed7817c7]{background-color:#dc3545;box-shadow:0 0 0 .2rem #dc354545}.spin[data-v-ed7817c7]{animation:spin-ed7817c7 1s infinite cubic-bezier(.82,.58,.17,.9)}@keyframes spin-ed7817c7{0%{transform:rotate(0)}to{transform:rotate(360deg)}}@media screen and (max-width: 768px){.remoteServerContainer[data-v-ed7817c7]{flex-direction:column}.remoteServerContainer .button-group button[data-v-ed7817c7]{width:100%}}@media screen and (max-width: 768px){.login-box[data-v-eca07c7a]{width:100%!important}.login-box div[data-v-eca07c7a]{width:auto!important}}.navbar[data-v-eca07c7a]{display:none!important}
|
||||
1
src/static/app/dist/assets/totp-azFEowlZ.js
vendored
1
src/static/app/dist/assets/totp-azFEowlZ.js
vendored
@@ -1 +0,0 @@
|
||||
import{_ as h,D as m,g as p,q as f,c as b,b as t,d as i,t as _,m as v,s as g,i as d,w as r,j as c,a as n}from"./index-DeLT-ag4.js";import{b as x}from"./browser-CjSdxGTc.js";import{L as y}from"./localeText-Bm68I_nB.js";const T={name:"totp",components:{LocaleText:y},async setup(){const s=m();let e="";return await p("/api/Welcome_GetTotpLink",{},a=>{a.status&&(e=a.data)}),{l:e,store:s}},mounted(){this.l&&x.toCanvas(document.getElementById("qrcode"),this.l,function(s){})},data(){return{totp:"",totpInvalidMessage:"",verified:!1}},methods:{validateTotp(){}},watch:{totp(s){const e=document.querySelector("#totp");e.classList.remove("is-invalid","is-valid"),s.length===6&&(console.log(s),/[0-9]{6}/.test(s)?f("/api/Welcome_VerifyTotpLink",{totp:s},a=>{a.status?(this.verified=!0,e.classList.add("is-valid"),this.$emit("verified")):(e.classList.add("is-invalid"),this.totpInvalidMessage="TOTP does not match.")}):(e.classList.add("is-invalid"),this.totpInvalidMessage="TOTP can only contain numbers"))}}},k=["data-bs-theme"],w={class:"m-auto text-body",style:{width:"500px"}},L={class:"d-flex flex-column"},M={class:"dashboardLogo display-4"},C={class:"mb-2"},P={class:"text-muted"},I={class:"p-3 bg-body-secondary rounded-3 border mb-3"},O={class:"text-muted mb-0"},B=["href"],$={style:{"line-break":"anywhere"}},q={for:"totp",class:"mb-2"},D={class:"text-muted"},S={class:"form-group mb-2"},A=["disabled"],E={class:"invalid-feedback"},F={class:"valid-feedback"},R={class:"d-flex gap-3 mt-5 flex-column"};function G(s,e,a,N,W,j){const o=c("LocaleText"),l=c("RouterLink");return n(),b("div",{class:"container-fluid login-container-fluid d-flex main pt-5 overflow-scroll","data-bs-theme":this.store.Configuration.Server.dashboard_theme},[t("div",w,[t("div",L,[t("div",null,[t("h1",M,[i(o,{t:"Multi-Factor Authentication (MFA)"})]),t("p",C,[t("small",P,[i(o,{t:"1. Please scan the following QR Code to generate TOTP with your choice of authenticator"})])]),e[1]||(e[1]=t("canvas",{id:"qrcode",class:"rounded-3 mb-2"},null,-1)),t("div",I,[t("p",O,[t("small",null,[i(o,{t:"Or you can click the link below:"})])]),t("a",{href:this.l},[t("code",$,_(this.l),1)],8,B)]),t("label",q,[t("small",D,[i(o,{t:"2. Enter the TOTP generated by your authenticator to verify"})])]),t("div",S,[v(t("input",{class:"form-control text-center totp",id:"totp",maxlength:"6",type:"text",inputmode:"numeric",autocomplete:"one-time-code","onUpdate:modelValue":e[0]||(e[0]=u=>this.totp=u),disabled:this.verified},null,8,A),[[g,this.totp]]),t("div",E,[i(o,{t:this.totpInvalidMessage},null,8,["t"])]),t("div",F,[i(o,{t:"TOTP verified!"})])])]),e[4]||(e[4]=t("hr",null,null,-1)),t("div",R,[this.verified?(n(),d(l,{key:1,to:"/",class:"btn btn-dark btn-lg d-flex btn-brand shadow align-items-center flex-grow-1 rounded-3"},{default:r(()=>[i(o,{t:"Complete"}),e[3]||(e[3]=t("i",{class:"bi bi-chevron-right ms-auto"},null,-1))]),_:1})):(n(),d(l,{key:0,to:"/",class:"btn bg-secondary-subtle text-secondary-emphasis rounded-3 flex-grow-1 btn-lg border-1 border-secondary-subtle shadow d-flex"},{default:r(()=>[i(o,{t:"I don't need MFA"}),e[2]||(e[2]=t("i",{class:"bi bi-chevron-right ms-auto"},null,-1))]),_:1}))])])])],8,k)}const z=h(T,[["render",G]]);export{z as default};
|
||||
@@ -1 +0,0 @@
|
||||
import{_ as h,W as b,g,c as o,b as t,d as n,m as y,s as f,A as v,w as r,T as c,a,f as x,F as u,h as m,n as T,z as k,t as i,e as A,j as _}from"./index-DeLT-ag4.js";import{O as w}from"./osmap-BpsUKiQ8.js";import{L as R}from"./localeText-Bm68I_nB.js";const M={name:"traceroute",components:{LocaleText:R,OSMap:w},data(){return{tracing:!1,ipAddress:void 0,tracerouteResult:void 0}},setup(){return{store:b()}},methods:{execute(){this.ipAddress&&(this.tracing=!0,this.tracerouteResult=void 0,g("/api/traceroute/execute",{ipAddress:this.ipAddress},d=>{d.status?this.tracerouteResult=d.data:this.store.newMessage("Server",d.message,"danger"),this.tracing=!1}))}}},S={class:"mt-md-5 mt-3 text-body"},$={class:"container-md"},C={class:"mb-3 text-body"},L={class:"d-flex gap-2 flex-column mb-5"},P={class:"mb-1 text-muted",for:"ipAddress"},V=["disabled"],N=["disabled"],O={key:0,class:"d-block"},z={key:1,class:"d-block"},B={class:"position-relative"},I={key:"pingPlaceholder"},D={key:1},E={key:"table",class:"w-100 mt-2"},F={class:"table table-sm rounded-3 w-100"},G={scope:"col"},H={scope:"col"},K={scope:"col"},W={scope:"col"},j={scope:"col"},U={scope:"col"},q={key:0};function J(d,s,Q,X,Y,Z){const l=_("LocaleText"),p=_("OSMap");return a(),o("div",S,[t("div",$,[t("h3",C,[n(l,{t:"Traceroute"})]),t("div",L,[t("div",null,[t("label",P,[t("small",null,[n(l,{t:"Enter IP Address / Hostname"})])]),y(t("input",{disabled:this.tracing,id:"ipAddress",class:"form-control","onUpdate:modelValue":s[0]||(s[0]=e=>this.ipAddress=e),onKeyup:s[1]||(s[1]=v(e=>this.execute(),["enter"])),type:"text"},null,40,V),[[f,this.ipAddress]])]),t("button",{class:"btn btn-primary rounded-3 mt-3 position-relative",disabled:this.tracing||!this.ipAddress,onClick:s[2]||(s[2]=e=>this.execute())},[n(c,{name:"slide"},{default:r(()=>[this.tracing?(a(),o("span",z,s[4]||(s[4]=[t("span",{class:"spinner-border spinner-border-sm","aria-hidden":"true"},null,-1),t("span",{class:"visually-hidden",role:"status"},"Loading...",-1)]))):(a(),o("span",O,s[3]||(s[3]=[t("i",{class:"bi bi-person-walking me-2"},null,-1),x("Trace! ")])))]),_:1})],8,N)]),t("div",B,[n(c,{name:"ping"},{default:r(()=>[this.tracerouteResult?(a(),o("div",D,[n(p,{d:this.tracerouteResult,type:"traceroute"},null,8,["d"]),t("div",E,[t("table",F,[t("thead",null,[t("tr",null,[t("th",G,[n(l,{t:"Hop"})]),t("th",H,[n(l,{t:"IP Address"})]),t("th",K,[n(l,{t:"Average RTT (ms)"})]),t("th",W,[n(l,{t:"Min RTT (ms)"})]),t("th",j,[n(l,{t:"Max RTT (ms)"})]),t("th",U,[n(l,{t:"Geolocation"})])])]),t("tbody",null,[(a(!0),o(u,null,m(this.tracerouteResult,(e,tt)=>(a(),o("tr",null,[t("td",null,[t("small",null,i(e.hop),1)]),t("td",null,[t("small",null,i(e.ip),1)]),t("td",null,[t("small",null,i(e.avg_rtt),1)]),t("td",null,[t("small",null,i(e.min_rtt),1)]),t("td",null,[t("small",null,i(e.max_rtt),1)]),t("td",null,[e.geo.city&&e.geo.country?(a(),o("span",q,[t("small",null,i(e.geo.city)+", "+i(e.geo.country),1)])):A("",!0)])]))),256))])])])])):(a(),o("div",I,[s[5]||(s[5]=t("div",{class:"pingPlaceholder bg-body-secondary rounded-3 mb-3",style:{height:"300px !important"}},null,-1)),(a(),o(u,null,m(5,e=>t("div",{class:T(["pingPlaceholder bg-body-secondary rounded-3 mb-3",{"animate__animated animate__flash animate__slower animate__infinite":this.tracing}]),style:k({"animation-delay":`${e*.05}s`})},null,6)),64))]))]),_:1})])])])}const ot=h(M,[["render",J],["__scopeId","data-v-549eb223"]]);export{ot as default};
|
||||
@@ -1 +0,0 @@
|
||||
.pingPlaceholder[data-v-549eb223]{width:100%;height:40px}.ping-leave-active[data-v-549eb223]{position:absolute}table th[data-v-549eb223],table td[data-v-549eb223]{padding:.5rem}.table[data-v-549eb223]>:not(caption)>*>*{background-color:transparent!important}.ping-move[data-v-549eb223],.ping-enter-active[data-v-549eb223],.ping-leave-active[data-v-549eb223]{transition:all .4s cubic-bezier(.82,.58,.17,.9)}.ping-leave-active[data-v-549eb223]{position:absolute;width:100%}.ping-enter-from[data-v-549eb223],.ping-leave-to[data-v-549eb223]{opacity:0;filter:blur(3px)}
|
||||
File diff suppressed because one or more lines are too long
BIN
src/static/app/dist/favicon.png
vendored
BIN
src/static/app/dist/favicon.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 180 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user