Compare commits
1319 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
958a4565db | ||
|
2403a03fcb | ||
|
a01402fa46 | ||
|
8b0cfe1236 | ||
|
39a739eadb | ||
|
a44a235b56 | ||
|
6634229cc1 | ||
|
fcb5d1cb5a | ||
|
dd628eb525 | ||
|
1e41c773a0 | ||
|
22b42e91f3 | ||
|
b9f35cadb3 | ||
|
59a723aec8 | ||
|
71f7d68783 | ||
|
6686489c06 | ||
|
c3e74e6e8d | ||
|
2b70c3d0c0 | ||
|
9204f01312 | ||
|
86c5ac44e4 | ||
|
2ef4534fee | ||
|
efe4fd3e24 | ||
|
01126c43f7 | ||
|
753d1b2aad | ||
|
b2d664c63c | ||
|
53d46a0385 | ||
|
1fd223c815 | ||
|
f2a356a80c | ||
|
6636f17e0f | ||
|
9e48e6a40b | ||
|
205ab26e92 | ||
|
70df037690 | ||
|
32faad9333 | ||
|
a6d78a8615 | ||
|
fe7108ae75 | ||
|
78b161e14c | ||
|
6036018f35 | ||
|
1b0f37a93c | ||
|
5f38a574ce | ||
|
5ce1c69803 | ||
|
60ba921f56 | ||
|
96d8882f1e | ||
|
f55d5ffd8c | ||
|
914b6247e4 | ||
|
da87e9cbb3 | ||
|
484b147a89 | ||
|
015be770c3 | ||
|
93d2f7fc85 | ||
|
7844157a90 | ||
|
6e046884af | ||
|
a784f63e9a | ||
|
ff9ed1abad | ||
|
354d3c0cda | ||
|
eeb177110e | ||
|
70848a258d | ||
|
3958e53aaa | ||
|
dfa7d1fc27 | ||
|
2dc34779d5 | ||
|
f6d832c6d9 | ||
|
87a3115073 | ||
|
085f81ec9e | ||
|
0ec38e0cfd | ||
|
cdd4745693 | ||
|
1fb2e9558f | ||
|
5b3f031590 | ||
|
4511634f3a | ||
|
738e95b875 | ||
|
b6e8b9df35 | ||
|
52ec0d1046 | ||
|
7563050f17 | ||
|
0da0600836 | ||
|
54ddc1a4c2 | ||
|
aa3da092a0 | ||
|
63efb3ff3e | ||
|
665cf4431d | ||
|
01d45ed12e | ||
|
7b8b73e651 | ||
|
692c6bf1fd | ||
|
88d6a7fbff | ||
|
9c6b745f06 | ||
|
b9d48c3278 | ||
|
1389c8f5b6 | ||
|
733f716819 | ||
|
bc359675a2 | ||
|
09f8904545 | ||
|
08ef5ad2d8 | ||
|
1c6f966579 | ||
|
16af10a5bc | ||
|
b7553d20d4 | ||
|
7d84ef2e2c | ||
|
521381ebf0 | ||
|
790534e0f8 | ||
|
cfa5b3f12c | ||
|
277245c69d | ||
|
b420614d65 | ||
|
975bf8fe88 | ||
|
47b3143534 | ||
|
42eb508515 | ||
|
76a3e97e05 | ||
|
70a77ba3d9 | ||
|
1fada53ddd | ||
|
85b43ec1a1 | ||
|
fde469a253 | ||
|
075e9b8526 | ||
|
46e8d9a5e4 | ||
|
18fab86431 | ||
|
0461a89348 | ||
|
f70b0bab80 | ||
|
66910bfe63 | ||
|
9c38c27eed | ||
|
72c34291e3 | ||
|
3983368228 | ||
|
83ca168bb8 | ||
|
c615e1bc62 | ||
|
b9667f50cf | ||
|
e7902bffa0 | ||
|
e0883a4ea0 | ||
|
819bc71941 | ||
|
463cf66881 | ||
|
c8d7c2caac | ||
|
8d182768f9 | ||
|
0b0e7eaf96 | ||
|
24690c1918 | ||
|
3b44dc52e1 | ||
|
ea6bc47d7a | ||
|
5dde011b31 | ||
|
a1e4fbf313 | ||
|
1ac81aa316 | ||
|
c865814a8e | ||
|
4c0fda400f | ||
|
fa89368c02 | ||
|
96d2f61812 | ||
|
b8c1cf0107 | ||
|
15a1c59a91 | ||
|
a73e4f8e41 | ||
|
2fb7a3091d | ||
|
711b6b1a1a | ||
|
a5b438e41e | ||
|
1dd56e35d5 | ||
|
e4b7bcaeab | ||
|
e818797427 | ||
|
c0bdb71810 | ||
|
f2b6ff910f | ||
|
09ee9089fb | ||
|
e6af9a6903 | ||
|
c3f159bd57 | ||
|
22241c55d5 | ||
|
15e85797c2 | ||
|
6c32331740 | ||
|
053ab12ba6 | ||
|
c7e1719215 | ||
|
686b72a82d | ||
|
398b2946b5 | ||
|
490c3a30ed | ||
|
3caf0f9df3 | ||
|
b7b74a430c | ||
|
4ae9b48d89 | ||
|
dba7d7fd65 | ||
|
a0c348cf97 | ||
|
ce892d4cde | ||
|
6fb5fbdd30 | ||
|
bc79027cf4 | ||
|
2581acd75e | ||
|
baa0af68b2 | ||
|
025ff27dd2 | ||
|
96c279f86c | ||
|
4b708caa6a | ||
|
3f6d427084 | ||
|
006b11e5d5 | ||
|
8961b8d560 | ||
|
ad846cdb76 | ||
|
464d99808f | ||
|
d442383a15 | ||
|
a29402ddde | ||
|
3a9ec76c91 | ||
|
a5e96881f4 | ||
|
c08a89378d | ||
|
e7513c96b3 | ||
|
24f1dc4ecc | ||
|
044cf8bb2e | ||
|
22ac291c3a | ||
|
c9c128f781 | ||
|
8d9284a524 | ||
|
7a2b4dbb99 | ||
|
58de20af0f | ||
|
31be707cc8 | ||
|
3e38c1b0bd | ||
|
7d448fd4ac | ||
|
1f192be43b | ||
|
b1b76a2dbe | ||
|
23cc21ce59 | ||
|
61acbf21d0 | ||
|
7075b00e20 | ||
|
7c18ec4053 | ||
|
e09fbe9e53 | ||
|
d36da95941 | ||
|
82ac8cb41f | ||
|
0b92c30abd | ||
|
5aaab75d1c | ||
|
1ac6ec1446 | ||
|
b682fc446e | ||
|
c190d57f1a | ||
|
3918f4abbd | ||
|
3b827ee60a | ||
|
49989012ab | ||
|
f6545ebdb8 | ||
|
e3a5b97b45 | ||
|
9513c39a17 | ||
|
3bcb47d75d | ||
|
902afc2f02 | ||
|
da253f12fe | ||
|
0e61c2d057 | ||
|
df701b5862 | ||
|
2312b86a66 | ||
|
ccc0ad6f64 | ||
|
923f73a516 | ||
|
fb4b73ce89 | ||
|
b427c7ff13 | ||
|
d93bb82193 | ||
|
aa1bf2adbd | ||
|
cc885e25ac | ||
|
de690b0a69 | ||
|
dd4e44931e | ||
|
f7502bcc92 | ||
|
2cae3c42e6 | ||
|
ace9626483 | ||
|
c0d60c63ab | ||
|
ed004236ce | ||
|
e7cb1b7375 | ||
|
91d0c91287 | ||
|
adc8ee88e2 | ||
|
573964b19f | ||
|
53251e7140 | ||
|
ce2c9bf26d | ||
|
736884c5a9 | ||
|
b5c5a95b64 | ||
|
4289c5c684 | ||
|
5a16d5a512 | ||
|
e7de812948 | ||
|
aef086b02e | ||
|
02646a4a08 | ||
|
9a82898d6b | ||
|
77b3b8a134 | ||
|
20b4134787 | ||
|
8a18609be4 | ||
|
0c7d862aae | ||
|
05fb4de68b | ||
|
21649712a9 | ||
|
001b6c087a | ||
|
77b6025f12 | ||
|
71c88244fe | ||
|
97c077171a | ||
|
a45a35f38c | ||
|
7fd3f98ae8 | ||
|
11a2eb6cc5 | ||
|
ea64f43e52 | ||
|
67c722c9c8 | ||
|
e48e82232d | ||
|
0b2104fc7a | ||
|
5182f755f1 | ||
|
6ded2d5b7c | ||
|
d3780b931c | ||
|
d1998f7ed0 | ||
|
eff8cd7ecb | ||
|
daf015d007 | ||
|
82aecc81f3 | ||
|
aaa5349003 | ||
|
78e129034e | ||
|
eb8bde37c1 | ||
|
bfa859e618 | ||
|
5250189f77 | ||
|
47a30047eb | ||
|
b16f57cb0d | ||
|
c172ce1011 | ||
|
45d68222a1 | ||
|
fdc82f8302 | ||
|
f8f1ade163 | ||
|
2687633941 | ||
|
b12dd15f4f | ||
|
07763d0d4f | ||
|
ce8fbbf743 | ||
|
60d782e5c5 | ||
|
a42a060ab5 | ||
|
a3799c4d5d | ||
|
29b7b014e5 | ||
|
db1d367941 | ||
|
26de992d20 | ||
|
05ec5c5e54 | ||
|
9545402452 | ||
|
29e41cc817 | ||
|
7675187c37 | ||
|
cffc769549 | ||
|
c6e121ffb4 | ||
|
a8541d86fb | ||
|
debc73b654 | ||
|
c2a3e2776e | ||
|
987bbb8e12 | ||
|
df4a5a7573 | ||
|
c3d06257be | ||
|
8bf056ca39 | ||
|
51a6b4289f | ||
|
fe1b8515a8 | ||
|
55360b4c08 | ||
|
febd809119 | ||
|
29225e4baf | ||
|
778833f90e | ||
|
95327750dc | ||
|
eae82d0222 | ||
|
95d3009a95 | ||
|
9df10c6b5b | ||
|
ae0d6f63fa | ||
|
87e5460aed | ||
|
895ebbfd18 | ||
|
694bea133b | ||
|
3b90bdf980 | ||
|
d75e0a9820 | ||
|
707a4e7c9e | ||
|
f3154a4313 | ||
|
e9b7e1e600 | ||
|
70dcff3b23 | ||
|
dce16909b4 | ||
|
f82724bbc1 | ||
|
3013282dbf | ||
|
97064a9ce3 | ||
|
79b650258e | ||
|
ed230dd750 | ||
|
372be54252 | ||
|
b4ded59c63 | ||
|
a75fa26caf | ||
|
7a696f58f9 | ||
|
946d4c7cfc | ||
|
4e68626bcb | ||
|
d830105605 | ||
|
153336d424 | ||
|
659870312d | ||
|
cbb05354a8 | ||
|
a4bada3ebe | ||
|
61693f6c8b | ||
|
e6ebc0443e | ||
|
369c6da5d8 | ||
|
15424169ad | ||
|
09e5fb2f55 | ||
|
c2eaa3d2cd | ||
|
bad15f077c | ||
|
dc82675f00 | ||
|
fc31c890e3 | ||
|
d046f0cc5e | ||
|
dba7a7257d | ||
|
845cecd38f | ||
|
4da96bc511 | ||
|
15752ce3c2 | ||
|
ff4cc5d316 | ||
|
9852733ef7 | ||
|
f57ecb1861 | ||
|
8711b7d99f | ||
|
995be90f91 | ||
|
046ae18411 | ||
|
dd8288c090 | ||
|
28b4773083 | ||
|
d4e8ab1cac | ||
|
d70650b074 | ||
|
f22b140782 | ||
|
08d3ac7ef8 | ||
|
59624181bd | ||
|
c84d54b35e | ||
|
efbd83c56d | ||
|
a2a0d35a24 | ||
|
3273881282 | ||
|
cc3ead9d7b | ||
|
f31106dc61 | ||
|
31ddec8348 | ||
|
2595e40e47 | ||
|
0adfa4d9ef | ||
|
7bac054668 | ||
|
229e8864bb | ||
|
bc760b7eb2 | ||
|
a0b9388757 | ||
|
324e54c015 | ||
|
3f149c4067 | ||
|
ad25a4cb56 | ||
|
fb4e8430cd | ||
|
e213d0ad55 | ||
|
56b17e6f3c | ||
|
4c68bec171 | ||
|
ea112fb583 | ||
|
55cf378ec2 | ||
|
897f18a8c8 | ||
|
7b105532d1 | ||
|
4abc26b582 | ||
|
c9d46a5237 | ||
|
0806f253b1 | ||
|
4b8132f3c6 | ||
|
47b52d4bab | ||
|
40969f20bf | ||
|
93340f546b | ||
|
b7f5beea40 | ||
|
c0080f2241 | ||
|
43343d0e55 | ||
|
3ce46ff09e | ||
|
fba3c3c649 | ||
|
bc87171243 | ||
|
f93a3a5fca | ||
|
98d0ad76bf | ||
|
d5933fb2af | ||
|
1b49e45222 | ||
|
ab587747fb | ||
|
520ee3f7a1 | ||
|
1885deb632 | ||
|
70b7a254af | ||
|
61c41fd919 | ||
|
83cac7bee2 | ||
|
6e691a016d | ||
|
88e10f7306 | ||
|
fff39eff9e | ||
|
95f5218ceb | ||
|
2eb1d18c2a | ||
|
f3d46613ee | ||
|
81c1aa3c13 | ||
|
8a3cffcd1b | ||
|
62f7606d2c | ||
|
8fa6e8b4ba | ||
|
c91e23dc50 | ||
|
7682c9ace7 | ||
|
24a786bedd | ||
|
80845807e1 | ||
|
a02d02ac12 | ||
|
50d630a155 | ||
|
a1cff377ec | ||
|
c2d6a0e891 | ||
|
3acc869570 | ||
|
5c4f60f376 | ||
|
e97468964a | ||
|
36dc9be7aa | ||
|
32c3f62934 | ||
|
5559e605b8 | ||
|
40f00196eb | ||
|
accc629e32 | ||
|
98c8a447b2 | ||
|
afcb0bec00 | ||
|
0b21750e76 | ||
|
ac0f484918 | ||
|
3205788bce | ||
|
8033e0bf23 | ||
|
183dec866a | ||
|
e694ea1cfd | ||
|
ca4dd58642 | ||
|
8f86b0deaa | ||
|
e7337728bf | ||
|
c9a6dc88a1 | ||
|
6c5e48dd4f | ||
|
a99c126266 | ||
|
4e5d60fdc9 | ||
|
921a7ef216 | ||
|
286bd0c40b | ||
|
c43935e82a | ||
|
88d769d801 | ||
|
d43c146676 | ||
|
78f77f6d35 | ||
|
ac2e8d760e | ||
|
b609dbcd86 | ||
|
9c051958a6 | ||
|
714d9534b6 | ||
|
75e190ff1d | ||
|
ed0f8b1189 | ||
|
99d5fbc9c0 | ||
|
0daa9d3e57 | ||
|
7365d23db8 | ||
|
df538f9cd6 | ||
|
9d261c88e6 | ||
|
8a1c95247d | ||
|
ea523136fc | ||
|
d2ef248781 | ||
|
f07ad7aa87 | ||
|
cb63d5e3df | ||
|
5f820ab0a6 | ||
|
2c6fb617a6 | ||
|
921f3899f0 | ||
|
41eeb99177 | ||
|
46be1b8778 | ||
|
05a5ae4fcf | ||
|
9d184586f1 | ||
|
9347677c60 | ||
|
3bb4f2c7c2 | ||
|
f6bfd89cef | ||
|
423af371c0 | ||
|
4172f92bfc | ||
|
8b2535a8da | ||
|
8d2e22f009 | ||
|
004bf31142 | ||
|
3eb2131d0b | ||
|
bf07d8fe87 | ||
|
357000c478 | ||
|
d03dfb3934 | ||
|
ed64e4299b | ||
|
415780a4fe | ||
|
7b8a5585dd | ||
|
7c4dd4c48c | ||
|
40e2da10f3 | ||
|
e53e530874 | ||
|
2e642593e5 | ||
|
29efe75a6f | ||
|
1c7f60103d | ||
|
4ef2ed2f1b | ||
|
fada432f49 | ||
|
b657a4df23 | ||
|
ca2029a46b | ||
|
cdc58058d7 | ||
|
4141d165ff | ||
|
ef409dd345 | ||
|
fea63fba12 | ||
|
8ce6b18318 | ||
|
0669d93f56 | ||
|
5c164efdb6 | ||
|
b9ba94d644 | ||
|
bf992fd9df | ||
|
f9d3775d4c | ||
|
9a3a2f9013 | ||
|
8e8f026ea7 | ||
|
ed03ef47ef | ||
|
ec3179156c | ||
|
3fc92b1b21 | ||
|
64f89af69e | ||
|
6ac1aa15f5 | ||
|
f8e35d8760 | ||
|
523d8a84a8 | ||
|
7d6b3d0e02 | ||
|
0600c4d70e | ||
|
2bba071b6a | ||
|
a4901ae9a7 | ||
|
04ec44edc3 | ||
|
50d368f3ec | ||
|
0bb8c8feba | ||
|
9b3032390c | ||
|
c06b524b4e | ||
|
7c6c2c4d6e | ||
|
7b998378ce | ||
|
2bc78fd045 | ||
|
fa158ba8de | ||
|
9d453ffa08 | ||
|
6aac4f9990 | ||
|
d5e45d9c43 | ||
|
719fa6f8e1 | ||
|
c98786a4f6 | ||
|
b1d34dba94 | ||
|
5070a04a82 | ||
|
9086176f73 | ||
|
494e0529d2 | ||
|
607455919e | ||
|
819cc9c0e4 | ||
|
58b18770e3 | ||
|
9313a2d294 | ||
|
59b0fd1166 | ||
|
ea5f41aa6d | ||
|
2e1061af64 | ||
|
aab59a8caf | ||
|
c98e7ea055 | ||
|
b7167ec880 | ||
|
5b733a723d | ||
|
81f7d77d74 | ||
|
2499276fca | ||
|
e52f82b565 | ||
|
b39508f64d | ||
|
d9acdc9767 | ||
|
2dc46ca0b8 | ||
|
dbc3376fe9 | ||
|
da9dac64f2 | ||
|
514f7d491c | ||
|
647f9b5460 | ||
|
4cac67fd66 | ||
|
6f0721ae2b | ||
|
fe8083c7f8 | ||
|
6da3fa08e4 | ||
|
edc9a42a4c | ||
|
14fb499a71 | ||
|
5820fc3b44 | ||
|
fe0a64154d | ||
|
d993216ec2 | ||
|
f589e13cf2 | ||
|
0a8a0c66b4 | ||
|
dd21d963fc | ||
|
a7fa84f681 | ||
|
05e8abb934 | ||
|
9a8d03b1f5 | ||
|
0555d7783c | ||
|
b16bb23cc8 | ||
|
92d189a84f | ||
|
eda9464d30 | ||
|
2db5cc177d | ||
|
bd3a6ba2fe | ||
|
8ac8d53c32 | ||
|
a6077ac7f4 | ||
|
c1d4078518 | ||
|
d25ec6d0b8 | ||
|
07aa372e2a | ||
|
c5e6520fee | ||
|
4ff0ef7359 | ||
|
f2fdc21374 | ||
|
906c7b92fe | ||
|
ffb39a5029 | ||
|
df8c9fc4e1 | ||
|
106131ff0f | ||
|
93e1410ed9 | ||
|
6c7d02cb18 | ||
|
3c1380fbc6 | ||
|
86f4077024 | ||
|
f2bc35e058 | ||
|
0a5225695a | ||
|
74471e41db | ||
|
8b1798522c | ||
|
7de7425e24 | ||
|
37dff8dc82 | ||
|
0c69a08863 | ||
|
f6e058a327 | ||
|
d60127a6d8 | ||
|
11a8151653 | ||
|
e3abaaa1b7 | ||
|
7dfbd432d1 | ||
|
82ef97af7e | ||
|
74fdda6846 | ||
|
9eaf0400fa | ||
|
01185ab483 | ||
|
8405bf767b | ||
|
9a9d1a8974 | ||
|
0ef2c812db | ||
|
85d1b433bc | ||
|
d8f616cf35 | ||
|
870c25c81f | ||
|
fb3bc189b5 | ||
|
6510c8d330 | ||
|
efee148e43 | ||
|
8b7dc031f7 | ||
|
963f38a690 | ||
|
45db2347dc | ||
|
4840c7d2fd | ||
|
92dbb0d366 | ||
|
68bafa9517 | ||
|
051b99791d | ||
|
b5d0bc997d | ||
|
ca88ea50c5 | ||
|
2b07d34611 | ||
|
8bf0bf10c5 | ||
|
ddc355feb6 | ||
|
90feccf33c | ||
|
06571e99aa | ||
|
53e5483daa | ||
|
cc4e5b26f0 | ||
|
e2a94d75b4 | ||
|
405ea74f16 | ||
|
3a0f31fe89 | ||
|
852706cd6b | ||
|
eebd624baf | ||
|
15fac746a8 | ||
|
7756c11454 | ||
|
5e8bfb576b | ||
|
3189b284c0 | ||
|
165755fb33 | ||
|
1cd2b0504a | ||
|
e1e3a903f9 | ||
|
996372b8f6 | ||
|
50c19ece53 | ||
|
f9668ede4a | ||
|
0804fc7a3a | ||
|
55fb7656df | ||
|
8406010260 | ||
|
b35c64b6c0 | ||
|
0d967f93ba | ||
|
0809f9aef6 | ||
|
bb61250bfe | ||
|
0168343b76 | ||
|
474e6705e6 | ||
|
53bfa7931d | ||
|
8c46d19071 | ||
|
3599d18ff6 | ||
|
b7e4dea6c5 | ||
|
40c9abc7e1 | ||
|
6a15d36d14 | ||
|
d77ce468ea | ||
|
03815cb81b | ||
|
d62273294d | ||
|
017fd03180 | ||
|
616bf315cb | ||
|
fda8248d41 | ||
|
6da7a98857 | ||
|
5e914d5756 | ||
|
f631ae911b | ||
|
6bdf9c2a94 | ||
|
91f9818ae3 | ||
|
d7770c507b | ||
|
76cae8e8e3 | ||
|
575b4ead1a | ||
|
14a859c190 | ||
|
61040c9f8e | ||
|
0b0688a91e | ||
|
121edc3e42 | ||
|
36f7315481 | ||
|
c5de0c49e4 | ||
|
4d472a0ea1 | ||
|
8f32fa5cb3 | ||
|
f9e2e87346 | ||
|
ec40e79362 | ||
|
e2e6c790be | ||
|
4a5ed5a273 | ||
|
14110bd5ca | ||
|
c391ca08de | ||
|
29d8aeb9b3 | ||
|
3c62df6b86 | ||
|
6bb342f23a | ||
|
01a68e1060 | ||
|
1ffee96bad | ||
|
d5fd1f9c38 | ||
|
848a5d85c6 | ||
|
d7901132b8 | ||
|
dca639cf26 | ||
|
11603e70c9 | ||
|
35adeb6412 | ||
|
850f5d3842 | ||
|
9923462907 | ||
|
46a214e41a | ||
|
fdca583c67 | ||
|
29c38e0623 | ||
|
a56ee4ee94 | ||
|
cb2f89bca6 | ||
|
43b8b0a083 | ||
|
71f314d4c4 | ||
|
ee0b9e3a5c | ||
|
5e4b3882e6 | ||
|
4030a5df8e | ||
|
e67d29cd2f | ||
|
70966c8a8f | ||
|
8fd245c28b | ||
|
43c871f2f4 | ||
|
390e600f55 | ||
|
40c7caac16 | ||
|
7619fd08d6 | ||
|
dff83ef620 | ||
|
56652c2b39 | ||
|
c981ad4608 | ||
|
75a248cf42 | ||
|
2e1ed132f7 | ||
|
c9761f4736 | ||
|
9c65fad73f | ||
|
4b70e03daa | ||
|
fdfa94bcc3 | ||
|
f816c15e1e | ||
|
3a06337601 | ||
|
9ba11f7bcc | ||
|
76827b31a9 | ||
|
0a801c0223 | ||
|
1a5c3c587d | ||
|
ab6a306e07 | ||
|
2c7c5f9a6e | ||
|
eb47c74096 | ||
|
76f87377ba | ||
|
e8f8cd9d36 | ||
|
7142394121 | ||
|
ad3c01736e | ||
|
2218313f5c | ||
|
2e67e2f911 | ||
|
dce9fdd0e4 | ||
|
8fb743b91d | ||
|
dd32127014 | ||
|
3c2ba99fc4 | ||
|
a9c7ad8a0f | ||
|
1ddd5f1901 | ||
|
88f8cbe172 | ||
|
b211a5156f | ||
|
a547001601 | ||
|
d4dd026310 | ||
|
3cb15a2a54 | ||
|
c550cd8b0d | ||
|
6a7ffd5483 | ||
|
d265b8adb6 | ||
|
7eacb847b0 | ||
|
ac40ae89b9 | ||
|
381d64833d | ||
|
d9b79d94e4 | ||
|
66800c7a45 | ||
|
f8f25e36ef | ||
|
15d049cffe | ||
|
ca281c5722 | ||
|
9534d6cca1 | ||
|
cab8f517b4 | ||
|
4b26b6aaec | ||
|
3c2e314ee5 | ||
|
e6c5e737a2 | ||
|
bf19055e53 | ||
|
2451ed8c88 | ||
|
5007024f63 | ||
|
de79192432 | ||
|
057be50941 | ||
|
4eb6e80b4f | ||
|
c00a7b65af | ||
|
0b806af487 | ||
|
82c5a6b29d | ||
|
ea9b68badd | ||
|
99f6c75c40 | ||
|
e2948857bf | ||
|
767de555a6 | ||
|
73043f2ccc | ||
|
55cda53325 | ||
|
a96dce0f8f | ||
|
05922e9ebc | ||
|
4affa75ff5 | ||
|
963dc0221c | ||
|
35316ec068 | ||
|
6547f3aadb | ||
|
04cb49b7e4 | ||
|
786bc36163 | ||
|
79107fd062 | ||
|
8369d5bedd | ||
|
c0ff554d5b | ||
|
f709222943 | ||
|
c499bb051f | ||
|
a790bad1e4 | ||
|
27bea580d4 | ||
|
d6b8801f41 | ||
|
e8c0dcf9f3 | ||
|
f2762e3b4b | ||
|
16b4a5b71f | ||
|
15a971346d | ||
|
7d41542f93 | ||
|
fea39254d9 | ||
|
b37c31cc21 | ||
|
4ac6ef2972 | ||
|
ace951bf7e | ||
|
eb4adeab4d | ||
|
45c47bda60 | ||
|
afd8e85835 | ||
|
833d25bda0 | ||
|
0b0dd8dd80 | ||
|
c57db0a330 | ||
|
f5087a82dc | ||
|
7fe8b7661d | ||
|
34a44b9dd2 | ||
|
66edbcd3d5 | ||
|
7523ed825e | ||
|
3549176370 | ||
|
88845f6d88 | ||
|
eee337c764 | ||
|
9b3b08a2bb | ||
|
bac4ced382 | ||
|
ea537b32c7 | ||
|
70adf55643 | ||
|
0306f5ca13 | ||
|
cce8d1aa4d | ||
|
be6e0813db | ||
|
45f4f0f603 | ||
|
29d2f59f12 | ||
|
606f18e5c1 | ||
|
c285ad0e2b | ||
|
d950b0acbe | ||
|
5b4c649d43 | ||
|
e229902381 | ||
|
a20651efd8 | ||
|
d8df9fdccf | ||
|
8e2c7e1298 | ||
|
f323cbc769 | ||
|
b73fd0ac69 | ||
|
5bf021be2e | ||
|
eaa656f859 | ||
|
2b2967f34e | ||
|
7962092092 | ||
|
386d3e0353 | ||
|
ad8ff10a05 | ||
|
41052b4e1e | ||
|
8837e1937b | ||
|
d83b204f4b | ||
|
5d801ff287 | ||
|
23fa00e29a | ||
|
a937f36997 | ||
|
9366c1d36f | ||
|
e7c78529e9 | ||
|
b52fd0b4df | ||
|
2f1a2c1cd7 | ||
|
d59eac3321 | ||
|
f65df4901e | ||
|
a79032bf75 | ||
|
056047f635 | ||
|
3f72263278 | ||
|
cc6cae47ec | ||
|
4eb4753e20 | ||
|
9a068c0b14 | ||
|
24b02127ec | ||
|
e6affcc23e | ||
|
1ee08d22d2 | ||
|
0aa7162055 | ||
|
fe36b08fce | ||
|
ce365eb9e3 | ||
|
df1c36e5aa | ||
|
c59209a01a | ||
|
e7c5818d16 | ||
|
a875a7dc40 | ||
|
f64f2b1ad8 | ||
|
4eb29c8810 | ||
|
83dd453723 | ||
|
e54614fa2f | ||
|
eed0d67005 | ||
|
2a4d1e2d64 | ||
|
7870a86e9a | ||
|
0bf915054d | ||
|
c5a16e91fb | ||
|
a1d54f5ae0 | ||
|
a4a7c6536d | ||
|
3e7bf6a9ef | ||
|
b04fe5d4ee | ||
|
b8f9c3557b | ||
|
891fb87712 | ||
|
65fdebab75 | ||
|
c080571b7a | ||
|
24cf044646 | ||
|
8a501831d6 | ||
|
23c30dbc10 | ||
|
6193205012 | ||
|
43b7955fc2 | ||
|
682daa4e94 | ||
|
145faf9817 | ||
|
da970cca82 | ||
|
e1c6cf5f91 | ||
|
537d10c627 | ||
|
3e66275c98 | ||
|
023f817179 | ||
|
ff531c416f | ||
|
d79983c791 | ||
|
7593339c14 | ||
|
b79d4e8876 | ||
|
7486d9d9e2 | ||
|
b2968df5dc | ||
|
7ff3258607 | ||
|
35bed842cb | ||
|
21e6c14e1e | ||
|
f5c2930889 | ||
|
2873ca6d38 | ||
|
9e4c68a5b4 | ||
|
43f726ba8f | ||
|
edd474e663 | ||
|
22b9805e47 | ||
|
3adda84b96 | ||
|
d6773bc32c | ||
|
a8ee77cd5e | ||
|
58b5abbaa6 | ||
|
31ae2b3060 | ||
|
8c03ebb78f | ||
|
255d35976e | ||
|
80c6190c05 | ||
|
ae1ede58da | ||
|
a1a09a802b | ||
|
9488e8992d | ||
|
059c285425 | ||
|
7f3853bbcd | ||
|
904f094b80 | ||
|
23e089061b | ||
|
0a713faca8 | ||
|
f1a72e448a | ||
|
3f68c3b68e | ||
|
502404c0cc | ||
|
7f4161ff78 | ||
|
07ec3b27fe | ||
|
b0d2d13eb1 | ||
|
42ae8ba6fb | ||
|
e1c068ca66 | ||
|
5c4014ee62 | ||
|
dede128648 | ||
|
ee3cdd0ffe | ||
|
063fc5174d | ||
|
34b1231df3 | ||
|
b88dfe4297 | ||
|
bb1e1a9680 | ||
|
2b79398dba | ||
|
f6e2c2c0da | ||
|
cc3ec279c2 | ||
|
734803aa44 | ||
|
596aeec652 | ||
|
eb5fe9e3ae | ||
|
66497c28e8 | ||
|
c28cdc3d86 | ||
|
8973554595 | ||
|
26d5b22974 | ||
|
7f5650699e | ||
|
34657639f8 | ||
|
ff9dcfe789 | ||
|
40f63ae51c | ||
|
f819fafa1c | ||
|
27019339b5 | ||
|
3587bd82e1 | ||
|
af0cc21af9 | ||
|
e3beaae8be | ||
|
0b5544ef9e | ||
|
42d95af829 | ||
|
938a66511a | ||
|
3692fcd3d5 | ||
|
ce3bfd59f5 | ||
|
b7388557a9 | ||
|
bdb904e714 | ||
|
1315d02437 | ||
|
26d394ca74 | ||
|
ea8fda0dee | ||
|
1ff1e3b43d | ||
|
f006978caf | ||
|
681ef13174 | ||
|
97abcf4b32 | ||
|
963cc17c18 | ||
|
0d388b561b | ||
|
2df42a3035 | ||
|
6bd5535d6c | ||
|
0e158b66b0 | ||
|
c499a92f57 | ||
|
0138114fc2 | ||
|
c3e3188c6a | ||
|
843bf0631e | ||
|
b3acfb3c6f | ||
|
2cf17e04be | ||
|
c5ecf94177 | ||
|
46ea135b6b | ||
|
219363fffb | ||
|
56a73575a1 | ||
|
1fae6c9ef7 | ||
|
67eb94c69d | ||
|
89eacf2f47 | ||
|
5e18e51ce0 | ||
|
a3d9384bc0 | ||
|
0a95ef6ab2 | ||
|
363098d32d | ||
|
736f9f4972 | ||
|
d5486f17d8 | ||
|
2b61aa282a | ||
|
c41d4c4f45 | ||
|
37e4ede65c | ||
|
bb758da940 | ||
|
c708dd3186 | ||
|
c81b960791 | ||
|
db66b82f6f | ||
|
7b9439f2e4 | ||
|
d1d451c27e | ||
|
8664e8f9a3 | ||
|
34684ec86a | ||
|
c6bf6779f8 | ||
|
bb7ffd8fbe | ||
|
0585b378b3 | ||
|
6e8f24f6a7 | ||
|
8d46e16c46 | ||
|
1cd8ebc8c8 | ||
|
6fd003c655 | ||
|
b022680962 | ||
|
7cd0f8a7b1 | ||
|
905b24bd4d | ||
|
a2a8e4fdc7 | ||
|
99aea454b5 | ||
|
f2e2e57237 | ||
|
fb7c0792c0 | ||
|
76637d3939 | ||
|
2e65a1793d | ||
|
a1048fb619 | ||
|
9607d04279 | ||
|
d09b462930 | ||
|
c8e0fc926d | ||
|
a793cf8f05 | ||
|
528509f809 | ||
|
860a15ff40 | ||
|
1913565507 | ||
|
c54919e4ce | ||
|
d6c452a93e | ||
|
f5183df0f1 | ||
|
bd65236e17 | ||
|
36d95a3a30 | ||
|
f8e9dc0650 | ||
|
66621d6723 | ||
|
f015985062 | ||
|
f1474cea7a | ||
|
5c5b9534c1 | ||
|
dd1b84f938 | ||
|
a8b4066f85 | ||
|
9e44d69774 | ||
|
9fc21686ed | ||
|
748055892c | ||
|
47c116a423 | ||
|
4fc6857d87 | ||
|
2cb8eecf18 | ||
|
e21f6a7787 | ||
|
36e514e825 | ||
|
8198bfd997 | ||
|
008ee14889 | ||
|
3d36d35e30 | ||
|
86af3fe0e7 | ||
|
80dcd88abf | ||
|
9e94d28860 | ||
|
e5759d950b | ||
|
f4296173e9 | ||
|
717df891b1 | ||
|
a8022c104a | ||
|
a7029e35b5 | ||
|
9b3e5faebe | ||
|
22bd5556ed | ||
|
178c2014b0 | ||
|
a4f5811a5b | ||
|
aae233bd6c | ||
|
f653ace24b | ||
|
b08c0888bb | ||
|
b03c7b514d | ||
|
e9a7b68bc1 | ||
|
a0b25938f4 | ||
|
a8f064a8cb | ||
|
3020218096 | ||
|
00ff0c9b91 | ||
|
66715c5ba4 | ||
|
def71a0afe | ||
|
764f9449b4 | ||
|
29c2d1d189 | ||
|
99f7e44c30 | ||
|
2600ba4e74 | ||
|
630d201546 | ||
|
b40f8f88ac | ||
|
fc837c4daa | ||
|
706994340f | ||
|
ebab02fce3 | ||
|
cf001db396 | ||
|
18fd3bb333 | ||
|
9143e9ecb1 | ||
|
d60d0f64d2 | ||
|
116b58e97c | ||
|
a947a1316b | ||
|
3b14439240 | ||
|
c27e0a0a1b | ||
|
ec54b47b6e | ||
|
1c20fb7638 | ||
|
5767d652bf | ||
|
2a1368d508 | ||
|
bb1b283d95 | ||
|
111b04c9e6 | ||
|
64668b11da | ||
|
8e9384e8e6 | ||
|
80ebd8f875 | ||
|
64670726a6 | ||
|
9d13c87292 | ||
|
71a80cab3a | ||
|
8a3c2c6cad | ||
|
c299601ece | ||
|
5444f4ee6f | ||
|
891900c186 | ||
|
1fc041d0d6 | ||
|
ae463fcdf2 | ||
|
7c1838427f | ||
|
b2b503f043 | ||
|
8a6a6ec911 | ||
|
f374c9da70 | ||
|
044afdf7af | ||
|
340a97d1df | ||
|
fab197edf2 | ||
|
31cce741ac | ||
|
269630e755 | ||
|
c19be34e71 | ||
|
0958c06b84 | ||
|
c3b0f6b64b | ||
|
0f499469fc | ||
|
e66e1317dc | ||
|
54450bcb8c | ||
|
35ec657ef1 | ||
|
a5beacbdd0 | ||
|
bfcb8c9b82 | ||
|
a69b8d2fb6 | ||
|
2dd655eda0 | ||
|
ad81cd280f | ||
|
162a517411 | ||
|
5f0c79cc47 | ||
|
4e5a8ce87e | ||
|
5080245a73 | ||
|
0756027e33 | ||
|
5f7b19cb56 | ||
|
69b79cd799 | ||
|
1ae74c1197 | ||
|
77a22a6b1c | ||
|
74b309cf50 | ||
|
30cc8e92a1 | ||
|
df48399a90 | ||
|
511afbcd9d | ||
|
f71b2624ab | ||
|
30d6eeffd0 | ||
|
b58e811b14 | ||
|
af1a5e0449 | ||
|
3221726d85 | ||
|
ab91758c7e | ||
|
4a7515e66a | ||
|
1436bc1a70 | ||
|
f43ae0ea43 | ||
|
d79b90a98f | ||
|
45b328af2e | ||
|
bfc7898654 | ||
|
c9498d0117 | ||
|
277e07589e | ||
|
eca8d16c61 | ||
|
f5f599c7f0 | ||
|
26648e54cc | ||
|
dc0c1bf87d | ||
|
149704e748 | ||
|
6fdcf3a10a | ||
|
2da284b921 | ||
|
68a97a898d | ||
|
108903f7f0 | ||
|
70bac41d89 | ||
|
182a6f475d | ||
|
5b3eaa3003 | ||
|
d11c44940e | ||
|
2d9be6dace | ||
|
29f1edbde7 | ||
|
495708df76 | ||
|
2bed0eab0c | ||
|
25c74e26d1 | ||
|
1a37c6ff42 | ||
|
ae01afdd0f | ||
|
496bf84e3a | ||
|
dbecc097df | ||
|
10cbb5e67c | ||
|
86ad5dd02a | ||
|
dac9931b4a | ||
|
5d9aee6b7e | ||
|
e8803477df | ||
|
b73f770955 | ||
|
b2f33944ec | ||
|
5c82cce06c | ||
|
ce035a5947 | ||
|
2705096ce6 | ||
|
091cb4fb8d | ||
|
eb996a152a | ||
|
65ab6d2468 | ||
|
8e1cdb9103 | ||
|
88c8fe5570 | ||
|
7a57629918 | ||
|
3f64c6307f | ||
|
1e2523af61 | ||
|
52d510c331 | ||
|
4c74601073 | ||
|
59397cdd19 | ||
|
b83cd95a02 | ||
|
c1b10bbb91 | ||
|
46993ce3ae | ||
|
2a6efab8a2 | ||
|
38dffe1ed6 | ||
|
24ce90ba9b | ||
|
541ee436c5 | ||
|
725706beab | ||
|
24cb40fe98 | ||
|
e3c298e892 | ||
|
67dd9be95a | ||
|
bd07d356bd | ||
|
71ae92274d | ||
|
49c1b310c2 | ||
|
ba28fa6c3c | ||
|
9de0652b2c | ||
|
13dc0a0b4d | ||
|
3f25fb2139 | ||
|
093bea4230 | ||
|
73aafb886b | ||
|
4990534bf4 | ||
|
3d730661ee | ||
|
a0e27d82aa | ||
|
04c51d2d1a | ||
|
7160f9085a | ||
|
f9244aad92 | ||
|
4e43194dfe | ||
|
582e30bca6 | ||
|
910addd02b | ||
|
995c48b642 | ||
|
2cedbe5704 | ||
|
9d205132d0 | ||
|
8c19953cdd | ||
|
53a2f55cf0 | ||
|
8b5d454b50 | ||
|
e49b3ef051 | ||
|
f6a7e6b785 | ||
|
11d447cd5a | ||
|
4262f84744 | ||
|
3be2afdd88 | ||
|
ad0c5d9440 | ||
|
f9977c26e7 | ||
|
09089b160e | ||
|
17650d7e60 | ||
|
eb23170c43 | ||
|
bc5048e4f3 | ||
|
4444259078 | ||
|
5ff2261b74 | ||
|
9bc6bbe472 | ||
|
e8aec967dd | ||
|
086cc6be93 | ||
|
4de0fdbfca | ||
|
6623192108 | ||
|
737bdfe844 | ||
|
144e4da96e | ||
|
b0a8bf3025 | ||
|
4942d73693 | ||
|
845f960a4e | ||
|
fc201bb4ff | ||
|
420836b1b2 | ||
|
7c79d937e0 | ||
|
b7cada1edd | ||
|
9e199165b4 | ||
|
6ff3b178b0 | ||
|
0f943c482b | ||
|
76558f284f | ||
|
cea4f663d5 | ||
|
d24ee9032a | ||
|
d9f838a65f | ||
|
3166739ec9 | ||
|
95e009b9cb | ||
|
2cac1b7dcc | ||
|
541147c801 | ||
|
17da4ca099 | ||
|
698c25f133 | ||
|
d65b64a46f | ||
|
237d116d8c | ||
|
452f44206a | ||
|
bf5799ef9e | ||
|
f8a7fdd5ed | ||
|
317c1e0746 | ||
|
76c545ba0d | ||
|
e5d4f7766e | ||
|
16b6b08227 | ||
|
c8e4687833 | ||
|
178240aa6c | ||
|
47a6ef4f00 | ||
|
da0688b6aa | ||
|
3c8387ab61 |
74
.github/workflows/ci.yml
vendored
@@ -13,20 +13,24 @@ on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 4'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_linux:
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-18.04, ubuntu-20.04 ]
|
||||
os: [ ubuntu-18.04, ubuntu-20.04, ubuntu-22.04 ]
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -62,15 +66,15 @@ jobs:
|
||||
- name: Tests
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
||||
if: matrix.python-version != '3.9'
|
||||
if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04'
|
||||
|
||||
- name: Tests incl. ccxt compatibility tests
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
|
||||
if: matrix.python-version == '3.9'
|
||||
if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04'
|
||||
|
||||
- name: Coveralls
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.8')
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.9')
|
||||
env:
|
||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||
@@ -78,11 +82,13 @@ jobs:
|
||||
# Allow failure for coveralls
|
||||
coveralls || true
|
||||
|
||||
- name: Backtesting
|
||||
- name: Backtesting (multi)
|
||||
run: |
|
||||
cp config_examples/config_bittrex.example.json config.json
|
||||
freqtrade create-userdir --userdir user_data
|
||||
freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
|
||||
freqtrade new-strategy -s AwesomeStrategy
|
||||
freqtrade new-strategy -s AwesomeStrategyMin --template minimal
|
||||
freqtrade backtesting --datadir tests/testdata --strategy-list AwesomeStrategy AwesomeStrategyMin -i 5m
|
||||
|
||||
- name: Hyperopt
|
||||
run: |
|
||||
@@ -121,7 +127,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -157,29 +163,15 @@ jobs:
|
||||
pip install -e .
|
||||
|
||||
- name: Tests
|
||||
if: (runner.os != 'Linux' || matrix.python-version != '3.8')
|
||||
run: |
|
||||
pytest --random-order
|
||||
|
||||
- name: Tests (with cov)
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.8')
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
||||
|
||||
- name: Coveralls
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.8')
|
||||
env:
|
||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||
run: |
|
||||
# Allow failure for coveralls
|
||||
coveralls -v || true
|
||||
|
||||
- name: Backtesting
|
||||
run: |
|
||||
cp config_examples/config_bittrex.example.json config.json
|
||||
freqtrade create-userdir --userdir user_data
|
||||
freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
|
||||
freqtrade new-strategy -s AwesomeStrategyAdv --template advanced
|
||||
freqtrade backtesting --datadir tests/testdata --strategy AwesomeStrategyAdv
|
||||
|
||||
- name: Hyperopt
|
||||
run: |
|
||||
@@ -219,7 +211,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -271,9 +263,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: "3.10"
|
||||
|
||||
- name: pre-commit dependencies
|
||||
run: |
|
||||
@@ -290,9 +282,9 @@ jobs:
|
||||
./tests/test_docs.sh
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Documentation build
|
||||
run: |
|
||||
@@ -308,18 +300,6 @@ jobs:
|
||||
details: Freqtrade doc test failed!
|
||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
|
||||
cleanup-prior-runs:
|
||||
permissions:
|
||||
actions: write # for rokroskar/workflow-run-cleanup-action to obtain workflow name & cancel it
|
||||
contents: read # for rokroskar/workflow-run-cleanup-action to obtain branch
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Cleanup previous runs on this branch
|
||||
uses: rokroskar/workflow-run-cleanup-action@v0.3.3
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/stable' && github.repository == 'freqtrade/freqtrade'"
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||
notify-complete:
|
||||
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check ]
|
||||
@@ -327,7 +307,7 @@ jobs:
|
||||
# Discord notification can't handle schedule events
|
||||
if: (github.event_name != 'schedule')
|
||||
permissions:
|
||||
repository-projects: read
|
||||
repository-projects: read
|
||||
steps:
|
||||
|
||||
- name: Check user permission
|
||||
@@ -356,9 +336,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
@@ -371,7 +351,7 @@ jobs:
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI (Test)
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
@@ -379,7 +359,7 @@ jobs:
|
||||
repository_url: https://test.pypi.org/legacy/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
@@ -419,7 +399,7 @@ jobs:
|
||||
|
||||
- name: Discord notification
|
||||
uses: rjstone/discord-webhook-notify@v1
|
||||
if: always() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||
if: always() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && (github.event_name != 'schedule')
|
||||
with:
|
||||
severity: info
|
||||
details: Deploy Succeeded!
|
||||
|
8
.gitignore
vendored
@@ -7,10 +7,15 @@ logfile.txt
|
||||
user_data/*
|
||||
!user_data/strategy/sample_strategy.py
|
||||
!user_data/notebooks
|
||||
!user_data/models
|
||||
!user_data/freqaimodels
|
||||
user_data/freqaimodels/*
|
||||
user_data/models/*
|
||||
user_data/notebooks/*
|
||||
freqtrade-plot.html
|
||||
freqtrade-profit-plot.html
|
||||
freqtrade/rpc/api_server/ui/*
|
||||
build_helpers/ta-lib/*
|
||||
|
||||
# Macos related
|
||||
.DS_Store
|
||||
@@ -80,6 +85,8 @@ instance/
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
# Mkdocs documentation
|
||||
site/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
@@ -105,3 +112,4 @@ target/
|
||||
!config_examples/config_ftx.example.json
|
||||
!config_examples/config_full.example.json
|
||||
!config_examples/config_kraken.example.json
|
||||
!config_examples/config_freqai.example.json
|
||||
|
@@ -13,11 +13,11 @@ repos:
|
||||
- id: mypy
|
||||
exclude: build_helpers
|
||||
additional_dependencies:
|
||||
- types-cachetools==5.0.1
|
||||
- types-filelock==3.2.5
|
||||
- types-requests==2.27.20
|
||||
- types-tabulate==0.8.7
|
||||
- types-python-dateutil==2.8.12
|
||||
- types-cachetools==5.2.1
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.28.9
|
||||
- types-tabulate==0.8.11
|
||||
- types-python-dateutil==2.8.19
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM python:3.9.9-slim-bullseye as base
|
||||
FROM python:3.10.6-slim-bullseye as base
|
||||
|
||||
# Setup env
|
||||
ENV LANG C.UTF-8
|
||||
@@ -11,7 +11,7 @@ ENV FT_APP_ENV="docker"
|
||||
# Prepare environment
|
||||
RUN mkdir /freqtrade \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-serial-dev \
|
||||
&& apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-serial-dev libgomp1 \
|
||||
&& apt-get clean \
|
||||
&& useradd -u 1000 -G sudo -U -m -s /bin/bash ftuser \
|
||||
&& chown ftuser:ftuser /freqtrade \
|
||||
|
11
README.md
@@ -9,10 +9,6 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
|
||||
|
||||

|
||||
|
||||
## Sponsored promotion
|
||||
|
||||
[](https://tokenbot.com/?utm_source=github&utm_medium=freqtrade&utm_campaign=algodevs)
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This software is for educational purposes only. Do not risk money which
|
||||
@@ -39,7 +35,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
|
||||
- [X] [OKX](https://okx.com/) (Former OKEX)
|
||||
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
||||
|
||||
### Experimentally, freqtrade also supports futures on the following exchanges
|
||||
### Supported Futures Exchanges (experimental)
|
||||
|
||||
- [X] [Binance](https://www.binance.com/)
|
||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||
@@ -67,6 +63,7 @@ Please find the complete documentation on the [freqtrade website](https://www.fr
|
||||
- [x] **Dry-run**: Run the bot without paying money.
|
||||
- [x] **Backtesting**: Run a simulation of your buy/sell strategy.
|
||||
- [x] **Strategy Optimization by machine learning**: Use machine learning to optimize your buy/sell strategy parameters with real exchange data.
|
||||
- [X] **Adaptive prediction modeling**: Build a smart strategy with FreqAI that self-trains to the market via adaptive machine learning methods. [Learn more](https://www.freqtrade.io/en/stable/freqai/)
|
||||
- [x] **Edge position sizing** Calculate your win rate, risk reward ratio, the best stoploss and adjust your position size before taking a position for each specific market. [Learn more](https://www.freqtrade.io/en/stable/edge/).
|
||||
- [x] **Whitelist crypto-currencies**: Select which crypto-currency you want to trade or use dynamic whitelists.
|
||||
- [x] **Blacklist crypto-currencies**: Select which crypto-currency you want to avoid.
|
||||
@@ -133,7 +130,7 @@ Telegram is not mandatory. However, this is a great way to control your bot. Mor
|
||||
|
||||
- `/start`: Starts the trader.
|
||||
- `/stop`: Stops the trader.
|
||||
- `/stopbuy`: Stop entering new trades.
|
||||
- `/stopentry`: Stop entering new trades.
|
||||
- `/status <trade_id>|[table]`: Lists all or specific open trades.
|
||||
- `/profit [<n>]`: Lists cumulative profit from all finished trades, over the last n days.
|
||||
- `/forceexit <trade_id>|all`: Instantly exits the given trade (Ignoring `minimum_roi`).
|
||||
@@ -197,7 +194,7 @@ Issues labeled [good first issue](https://github.com/freqtrade/freqtrade/labels/
|
||||
|
||||
The clock must be accurate, synchronized to a NTP server very frequently to avoid problems with communication to the exchanges.
|
||||
|
||||
### Min hardware required
|
||||
### Minimum hardware required
|
||||
|
||||
To run this bot we recommend you a cloud instance with a minimum of:
|
||||
|
||||
|
@@ -4,7 +4,7 @@ else
|
||||
INSTALL_LOC=${1}
|
||||
fi
|
||||
echo "Installing to ${INSTALL_LOC}"
|
||||
if [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
||||
if [ -n "$2" ] || [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
||||
tar zxvf ta-lib-0.4.0-src.tar.gz
|
||||
cd ta-lib \
|
||||
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
||||
@@ -17,11 +17,17 @@ if [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
||||
cd .. && rm -rf ./ta-lib/
|
||||
exit 1
|
||||
fi
|
||||
which sudo && sudo make install || make install
|
||||
if [ -x "$(command -v apt-get)" ]; then
|
||||
echo "Updating library path using ldconfig"
|
||||
sudo ldconfig
|
||||
if [ -z "$2" ]; then
|
||||
which sudo && sudo make install || make install
|
||||
if [ -x "$(command -v apt-get)" ]; then
|
||||
echo "Updating library path using ldconfig"
|
||||
sudo ldconfig
|
||||
fi
|
||||
else
|
||||
# Don't install with sudo
|
||||
make install
|
||||
fi
|
||||
|
||||
cd .. && rm -rf ./ta-lib/
|
||||
else
|
||||
echo "TA-lib already installed, skipping installation"
|
||||
|
@@ -6,10 +6,12 @@ export DOCKER_BUILDKIT=1
|
||||
# Replace / with _ to create a valid tag
|
||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||
TAG_PLOT=${TAG}_plot
|
||||
TAG_FREQAI=${TAG}_freqai
|
||||
TAG_PI="${TAG}_pi"
|
||||
|
||||
TAG_ARM=${TAG}_arm
|
||||
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
||||
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||
|
||||
echo "Running for ${TAG}"
|
||||
@@ -38,8 +40,10 @@ fi
|
||||
docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
||||
|
||||
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
||||
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
||||
|
||||
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||
|
||||
# Run backtest
|
||||
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||
@@ -53,6 +57,7 @@ docker images
|
||||
|
||||
# docker push ${IMAGE_NAME}
|
||||
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||
docker push ${CACHE_IMAGE}:$TAG_ARM
|
||||
|
||||
# Create multi-arch image
|
||||
@@ -66,6 +71,9 @@ docker manifest push -p ${IMAGE_NAME}:${TAG}
|
||||
docker manifest create ${IMAGE_NAME}:${TAG_PLOT} ${CACHE_IMAGE}:${TAG_PLOT_ARM} ${CACHE_IMAGE}:${TAG_PLOT}
|
||||
docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT}
|
||||
|
||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM} ${CACHE_IMAGE}:${TAG_FREQAI}
|
||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
||||
|
||||
# Tag as latest for develop builds
|
||||
if [ "${TAG}" = "develop" ]; then
|
||||
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
# Replace / with _ to create a valid tag
|
||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||
TAG_PLOT=${TAG}_plot
|
||||
TAG_FREQAI=${TAG}_freqai
|
||||
TAG_PI="${TAG}_pi"
|
||||
|
||||
PI_PLATFORM="linux/arm/v7"
|
||||
@@ -49,8 +50,10 @@ fi
|
||||
docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
||||
|
||||
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
||||
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
||||
|
||||
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
||||
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
||||
|
||||
# Run backtest
|
||||
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||
@@ -64,6 +67,7 @@ docker images
|
||||
|
||||
docker push ${CACHE_IMAGE}
|
||||
docker push ${CACHE_IMAGE}:$TAG_PLOT
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI
|
||||
docker push ${CACHE_IMAGE}:$TAG
|
||||
|
||||
|
||||
|
96
config_examples/config_freqai.example.json
Normal file
@@ -0,0 +1,96 @@
|
||||
{
|
||||
"trading_mode": "futures",
|
||||
"margin_mode": "isolated",
|
||||
"max_open_trades": 5,
|
||||
"stake_currency": "USDT",
|
||||
"stake_amount": 200,
|
||||
"tradable_balance_ratio": 1,
|
||||
"fiat_display_currency": "USD",
|
||||
"dry_run": true,
|
||||
"timeframe": "3m",
|
||||
"dry_run_wallet": 1000,
|
||||
"cancel_open_orders_on_exit": true,
|
||||
"unfilledtimeout": {
|
||||
"entry": 10,
|
||||
"exit": 30
|
||||
},
|
||||
"exchange": {
|
||||
"name": "binance",
|
||||
"key": "",
|
||||
"secret": "",
|
||||
"ccxt_config": {
|
||||
"enableRateLimit": true
|
||||
},
|
||||
"ccxt_async_config": {
|
||||
"enableRateLimit": true,
|
||||
"rateLimit": 200
|
||||
},
|
||||
"pair_whitelist": [
|
||||
"1INCH/USDT",
|
||||
"ALGO/USDT"
|
||||
],
|
||||
"pair_blacklist": []
|
||||
},
|
||||
"entry_pricing": {
|
||||
"price_side": "same",
|
||||
"use_order_book": true,
|
||||
"order_book_top": 1,
|
||||
"price_last_balance": 0.0,
|
||||
"check_depth_of_market": {
|
||||
"enabled": false,
|
||||
"bids_to_ask_delta": 1
|
||||
}
|
||||
},
|
||||
"exit_pricing": {
|
||||
"price_side": "other",
|
||||
"use_order_book": true,
|
||||
"order_book_top": 1
|
||||
},
|
||||
"pairlists": [
|
||||
{
|
||||
"method": "StaticPairList"
|
||||
}
|
||||
],
|
||||
"freqai": {
|
||||
"enabled": true,
|
||||
"startup_candles": 10000,
|
||||
"purge_old_models": true,
|
||||
"train_period_days": 15,
|
||||
"backtest_period_days": 7,
|
||||
"live_retrain_hours": 0,
|
||||
"identifier": "uniqe-id",
|
||||
"feature_parameters": {
|
||||
"include_timeframes": [
|
||||
"3m",
|
||||
"15m",
|
||||
"1h"
|
||||
],
|
||||
"include_corr_pairlist": [
|
||||
"BTC/USDT",
|
||||
"ETH/USDT"
|
||||
],
|
||||
"label_period_candles": 20,
|
||||
"include_shifted_candles": 2,
|
||||
"DI_threshold": 0.9,
|
||||
"weight_factor": 0.9,
|
||||
"principal_component_analysis": false,
|
||||
"use_SVM_to_remove_outliers": true,
|
||||
"stratify_training_data": 0,
|
||||
"indicator_max_period_candles": 20,
|
||||
"indicator_periods_candles": [10, 20]
|
||||
},
|
||||
"data_split_parameters": {
|
||||
"test_size": 0.33,
|
||||
"random_state": 1
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"n_estimators": 1000
|
||||
}
|
||||
},
|
||||
"bot_name": "",
|
||||
"force_entry_enable": true,
|
||||
"initial_state": "running",
|
||||
"internals": {
|
||||
"process_throttle_secs": 5
|
||||
}
|
||||
}
|
@@ -5,6 +5,7 @@
|
||||
"tradable_balance_ratio": 0.99,
|
||||
"fiat_display_currency": "USD",
|
||||
"amount_reserve_percent": 0.05,
|
||||
"available_capital": 1000,
|
||||
"amend_last_stake_amount": false,
|
||||
"last_stake_amount_min_ratio": 0.5,
|
||||
"dry_run": true,
|
||||
@@ -92,6 +93,7 @@
|
||||
"secret": "your_exchange_secret",
|
||||
"password": "",
|
||||
"log_responses": false,
|
||||
// "unknown_fee_rate": 1,
|
||||
"ccxt_config": {},
|
||||
"ccxt_async_config": {},
|
||||
"pair_whitelist": [
|
||||
@@ -155,7 +157,8 @@
|
||||
"entry_cancel": "on",
|
||||
"exit_cancel": "on",
|
||||
"protection_trigger": "off",
|
||||
"protection_trigger_global": "on"
|
||||
"protection_trigger_global": "on",
|
||||
"show_candle": "off"
|
||||
},
|
||||
"reload": true,
|
||||
"balance_dust_level": 0.01
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM python:3.9.9-slim-bullseye as base
|
||||
FROM python:3.9.12-slim-bullseye as base
|
||||
|
||||
# Setup env
|
||||
ENV LANG C.UTF-8
|
||||
|
@@ -7,4 +7,5 @@ FROM freqtradeorg/freqtrade:develop
|
||||
# The below dependency - pyti - serves as an example. Please use whatever you need!
|
||||
RUN pip install --user pyti
|
||||
|
||||
# Switch back to user (only if you required root above)
|
||||
# USER ftuser
|
||||
|
9
docker/Dockerfile.freqai
Normal file
@@ -0,0 +1,9 @@
|
||||
ARG sourceimage=freqtradeorg/freqtrade
|
||||
ARG sourcetag=develop
|
||||
FROM ${sourceimage}:${sourcetag}
|
||||
|
||||
# Install dependencies
|
||||
COPY requirements-freqai.txt /freqtrade/
|
||||
|
||||
RUN pip install -r requirements-freqai.txt --user --no-cache-dir
|
||||
|
@@ -22,50 +22,79 @@ DataFrame of the candles that resulted in buy signals. Depending on how many buy
|
||||
makes, this file may get quite large, so periodically check your `user_data/backtest_results`
|
||||
folder to delete old exports.
|
||||
|
||||
To analyze the buy tags, we need to use the `buy_reasons.py` script from
|
||||
[froggleston's repo](https://github.com/froggleston/freqtrade-buyreasons). Follow the instructions
|
||||
in their README to copy the script into your `freqtrade/scripts/` folder.
|
||||
|
||||
Before running your next backtest, make sure you either delete your old backtest results or run
|
||||
backtesting with the `--cache none` option to make sure no cached results are used.
|
||||
|
||||
If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl` file in the
|
||||
`user_data/backtest_results` folder.
|
||||
|
||||
Now run the `buy_reasons.py` script, supplying a few options:
|
||||
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
||||
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`):
|
||||
|
||||
``` bash
|
||||
python3 scripts/buy_reasons.py -c <config.json> -s <strategy_name> -t <timerange> -g0,1,2,3,4
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4
|
||||
```
|
||||
|
||||
The `-g` option is used to specify the various tabular outputs, ranging from the simplest (0)
|
||||
to the most detailed per pair, per buy and per sell tag (4). More options are available by
|
||||
running with the `-h` option.
|
||||
This command will read from the last backtesting results. The `--analysis-groups` option is
|
||||
used to specify the various tabular outputs showing the profit fo each group or trade,
|
||||
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
||||
|
||||
* 1: profit summaries grouped by enter_tag
|
||||
* 2: profit summaries grouped by enter_tag and exit_tag
|
||||
* 3: profit summaries grouped by pair and enter_tag
|
||||
* 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large)
|
||||
|
||||
More options are available by running with the `-h` option.
|
||||
|
||||
### Using export-filename
|
||||
|
||||
Normally, `backtesting-analysis` uses the latest backtest results, but if you wanted to go
|
||||
back to a previous backtest output, you need to supply the `--export-filename` option.
|
||||
You can supply the same parameter to `backtest-analysis` with the name of the final backtest
|
||||
output file. This allows you to keep historical versions of backtest results and re-analyse
|
||||
them at a later date:
|
||||
|
||||
``` bash
|
||||
freqtrade backtesting -c <config.json> --timeframe <tf> --strategy <strategy_name> --timerange=<timerange> --export=signals --export-filename=/tmp/mystrat_backtest.json
|
||||
```
|
||||
|
||||
You should see some output similar to below in the logs with the name of the timestamped
|
||||
filename that was exported:
|
||||
|
||||
```
|
||||
2022-06-14 16:28:32,698 - freqtrade.misc - INFO - dumping json to "/tmp/mystrat_backtest-2022-06-14_16-28-32.json"
|
||||
```
|
||||
|
||||
You can then use that filename in `backtesting-analysis`:
|
||||
|
||||
```
|
||||
freqtrade backtesting-analysis -c <config.json> --export-filename=/tmp/mystrat_backtest-2022-06-14_16-28-32.json
|
||||
```
|
||||
|
||||
### Tuning the buy tags and sell tags to display
|
||||
|
||||
To show only certain buy and sell tags in the displayed output, use the following two options:
|
||||
|
||||
```
|
||||
--enter_reason_list : Comma separated list of enter signals to analyse. Default: "all"
|
||||
--exit_reason_list : Comma separated list of exit signals to analyse. Default: "stop_loss,trailing_stop_loss"
|
||||
--enter-reason-list : Space-separated list of enter signals to analyse. Default: "all"
|
||||
--exit-reason-list : Space-separated list of exit signals to analyse. Default: "all"
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
python3 scripts/buy_reasons.py -c <config.json> -s <strategy_name> -t <timerange> -g0,1,2,3,4 --enter_reason_list "enter_tag_a,enter_tag_b" --exit_reason_list "roi,custom_exit_tag_a,stop_loss"
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 2 --enter-reason-list enter_tag_a enter_tag_b --exit-reason-list roi custom_exit_tag_a stop_loss
|
||||
```
|
||||
|
||||
### Outputting signal candle indicators
|
||||
|
||||
The real power of the buy_reasons.py script comes from the ability to print out the indicator
|
||||
The real power of `freqtrade backtesting-analysis` comes from the ability to print out the indicator
|
||||
values present on signal candles to allow fine-grained investigation and tuning of buy signal
|
||||
indicators. To print out a column for a given set of indicators, use the `--indicator-list`
|
||||
option:
|
||||
|
||||
```bash
|
||||
python3 scripts/buy_reasons.py -c <config.json> -s <strategy_name> -t <timerange> -g0,1,2,3,4 --enter_reason_list "enter_tag_a,enter_tag_b" --exit_reason_list "roi,custom_exit_tag_a,stop_loss" --indicator_list "rsi,rsi_1h,bb_lowerband,ema_9,macd,macdsignal"
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 2 --enter-reason-list enter_tag_a enter_tag_b --exit-reason-list roi custom_exit_tag_a stop_loss --indicator-list rsi rsi_1h bb_lowerband ema_9 macd macdsignal
|
||||
```
|
||||
|
||||
The indicators have to be present in your strategy's main DataFrame (either for your main
|
||||
|
@@ -98,6 +98,23 @@ class MyAwesomeStrategy(IStrategy):
|
||||
!!! Note
|
||||
All overrides are optional and can be mixed/matched as necessary.
|
||||
|
||||
### Dynamic parameters
|
||||
|
||||
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||
|
||||
``` python
|
||||
|
||||
class MyAwesomeStrategy(IStrategy):
|
||||
|
||||
def bot_start(self, **kwargs) -> None:
|
||||
self.buy_adx = IntParameter(20, 30, default=30, optimize=True)
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
Parameters created this way will not show up in the `list-strategies` parameter count.
|
||||
|
||||
### Overriding Base estimator
|
||||
|
||||
You can define your own estimator for Hyperopt by implementing `generate_estimator()` in the Hyperopt subclass.
|
||||
|
BIN
docs/assets/discord_notification.png
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
docs/assets/freqai_DI.jpg
Normal file
After Width: | Height: | Size: 307 KiB |
BIN
docs/assets/freqai_algo.jpg
Normal file
After Width: | Height: | Size: 345 KiB |
BIN
docs/assets/freqai_dbscan.jpg
Normal file
After Width: | Height: | Size: 66 KiB |
304
docs/assets/freqai_doc_logo.svg
Normal file
After Width: | Height: | Size: 2.0 MiB |
BIN
docs/assets/freqai_moving-window.jpg
Normal file
After Width: | Height: | Size: 270 KiB |
BIN
docs/assets/freqai_weight-factor.jpg
Normal file
After Width: | Height: | Size: 191 KiB |
@@ -287,45 +287,55 @@ A backtesting result will look like that:
|
||||
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
||||
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
||||
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
||||
================ SUMMARY METRICS ===============
|
||||
| Metric | Value |
|
||||
|------------------------+---------------------|
|
||||
| Backtesting from | 2019-01-01 00:00:00 |
|
||||
| Backtesting to | 2019-05-01 00:00:00 |
|
||||
| Max open trades | 3 |
|
||||
| | |
|
||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||
| Starting balance | 0.01000000 BTC |
|
||||
| Final balance | 0.01762792 BTC |
|
||||
| Absolute profit | 0.00762792 BTC |
|
||||
| Total profit % | 76.2% |
|
||||
| CAGR % | 460.87% |
|
||||
| Trades per day | 3.575 |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
| Best Pair | LSK/BTC 26.26% |
|
||||
| Worst Pair | ZEC/BTC -10.18% |
|
||||
| Best Trade | LSK/BTC 4.25% |
|
||||
| Worst Trade | ZEC/BTC -10.25% |
|
||||
| Best day | 0.00076 BTC |
|
||||
| Worst day | -0.00036 BTC |
|
||||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| | |
|
||||
| Min balance | 0.00945123 BTC |
|
||||
| Max balance | 0.01846651 BTC |
|
||||
| Drawdown (Account) | 13.33% |
|
||||
| Drawdown | 0.0015 BTC |
|
||||
| Drawdown high | 0.0013 BTC |
|
||||
| Drawdown low | -0.0002 BTC |
|
||||
| Drawdown Start | 2019-02-15 14:10:00 |
|
||||
| Drawdown End | 2019-04-11 18:15:00 |
|
||||
| Market change | -5.88% |
|
||||
===============================================
|
||||
================== SUMMARY METRICS ==================
|
||||
| Metric | Value |
|
||||
|-----------------------------+---------------------|
|
||||
| Backtesting from | 2019-01-01 00:00:00 |
|
||||
| Backtesting to | 2019-05-01 00:00:00 |
|
||||
| Max open trades | 3 |
|
||||
| | |
|
||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||
| Starting balance | 0.01000000 BTC |
|
||||
| Final balance | 0.01762792 BTC |
|
||||
| Absolute profit | 0.00762792 BTC |
|
||||
| Total profit % | 76.2% |
|
||||
| CAGR % | 460.87% |
|
||||
| Profit factor | 1.11 |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
| Long / Short | 352 / 77 |
|
||||
| Total profit Long % | 1250.58% |
|
||||
| Total profit Short % | -15.02% |
|
||||
| Absolute profit Long | 0.00838792 BTC |
|
||||
| Absolute profit Short | -0.00076 BTC |
|
||||
| | |
|
||||
| Best Pair | LSK/BTC 26.26% |
|
||||
| Worst Pair | ZEC/BTC -10.18% |
|
||||
| Best Trade | LSK/BTC 4.25% |
|
||||
| Worst Trade | ZEC/BTC -10.25% |
|
||||
| Best day | 0.00076 BTC |
|
||||
| Worst day | -0.00036 BTC |
|
||||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| Canceled Trade Entries | 34 |
|
||||
| Canceled Entry Orders | 123 |
|
||||
| Replaced Entry Orders | 89 |
|
||||
| | |
|
||||
| Min balance | 0.00945123 BTC |
|
||||
| Max balance | 0.01846651 BTC |
|
||||
| Max % of account underwater | 25.19% |
|
||||
| Absolute Drawdown (Account) | 13.33% |
|
||||
| Drawdown | 0.0015 BTC |
|
||||
| Drawdown high | 0.0013 BTC |
|
||||
| Drawdown low | -0.0002 BTC |
|
||||
| Drawdown Start | 2019-02-15 14:10:00 |
|
||||
| Drawdown End | 2019-04-11 18:15:00 |
|
||||
| Market change | -5.88% |
|
||||
=====================================================
|
||||
```
|
||||
|
||||
### Backtesting report table
|
||||
@@ -377,50 +387,55 @@ The last element of the backtest report is the summary metrics table.
|
||||
It contains some useful key metrics about performance of your strategy on backtesting data.
|
||||
|
||||
```
|
||||
================ SUMMARY METRICS ===============
|
||||
| Metric | Value |
|
||||
|------------------------+---------------------|
|
||||
| Backtesting from | 2019-01-01 00:00:00 |
|
||||
| Backtesting to | 2019-05-01 00:00:00 |
|
||||
| Max open trades | 3 |
|
||||
| | |
|
||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||
| Starting balance | 0.01000000 BTC |
|
||||
| Final balance | 0.01762792 BTC |
|
||||
| Absolute profit | 0.00762792 BTC |
|
||||
| Total profit % | 76.2% |
|
||||
| CAGR % | 460.87% |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
| Long / Short | 352 / 77 |
|
||||
| Total profit Long % | 1250.58% |
|
||||
| Total profit Short % | -15.02% |
|
||||
| Absolute profit Long | 0.00838792 BTC |
|
||||
| Absolute profit Short | -0.00076 BTC |
|
||||
| | |
|
||||
| Best Pair | LSK/BTC 26.26% |
|
||||
| Worst Pair | ZEC/BTC -10.18% |
|
||||
| Best Trade | LSK/BTC 4.25% |
|
||||
| Worst Trade | ZEC/BTC -10.25% |
|
||||
| Best day | 0.00076 BTC |
|
||||
| Worst day | -0.00036 BTC |
|
||||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| | |
|
||||
| Min balance | 0.00945123 BTC |
|
||||
| Max balance | 0.01846651 BTC |
|
||||
| Drawdown (Account) | 13.33% |
|
||||
| Drawdown | 0.0015 BTC |
|
||||
| Drawdown high | 0.0013 BTC |
|
||||
| Drawdown low | -0.0002 BTC |
|
||||
| Drawdown Start | 2019-02-15 14:10:00 |
|
||||
| Drawdown End | 2019-04-11 18:15:00 |
|
||||
| Market change | -5.88% |
|
||||
================================================
|
||||
================== SUMMARY METRICS ==================
|
||||
| Metric | Value |
|
||||
|-----------------------------+---------------------|
|
||||
| Backtesting from | 2019-01-01 00:00:00 |
|
||||
| Backtesting to | 2019-05-01 00:00:00 |
|
||||
| Max open trades | 3 |
|
||||
| | |
|
||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||
| Starting balance | 0.01000000 BTC |
|
||||
| Final balance | 0.01762792 BTC |
|
||||
| Absolute profit | 0.00762792 BTC |
|
||||
| Total profit % | 76.2% |
|
||||
| CAGR % | 460.87% |
|
||||
| Profit factor | 1.11 |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
| Long / Short | 352 / 77 |
|
||||
| Total profit Long % | 1250.58% |
|
||||
| Total profit Short % | -15.02% |
|
||||
| Absolute profit Long | 0.00838792 BTC |
|
||||
| Absolute profit Short | -0.00076 BTC |
|
||||
| | |
|
||||
| Best Pair | LSK/BTC 26.26% |
|
||||
| Worst Pair | ZEC/BTC -10.18% |
|
||||
| Best Trade | LSK/BTC 4.25% |
|
||||
| Worst Trade | ZEC/BTC -10.25% |
|
||||
| Best day | 0.00076 BTC |
|
||||
| Worst day | -0.00036 BTC |
|
||||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| Canceled Trade Entries | 34 |
|
||||
| Canceled Entry Orders | 123 |
|
||||
| Replaced Entry Orders | 89 |
|
||||
| | |
|
||||
| Min balance | 0.00945123 BTC |
|
||||
| Max balance | 0.01846651 BTC |
|
||||
| Max % of account underwater | 25.19% |
|
||||
| Absolute Drawdown (Account) | 13.33% |
|
||||
| Drawdown | 0.0015 BTC |
|
||||
| Drawdown high | 0.0013 BTC |
|
||||
| Drawdown low | -0.0002 BTC |
|
||||
| Drawdown Start | 2019-02-15 14:10:00 |
|
||||
| Drawdown End | 2019-04-11 18:15:00 |
|
||||
| Market change | -5.88% |
|
||||
=====================================================
|
||||
|
||||
```
|
||||
|
||||
@@ -431,6 +446,8 @@ It contains some useful key metrics about performance of your strategy on backte
|
||||
- `Final balance`: Final balance - starting balance + absolute profit.
|
||||
- `Absolute profit`: Profit made in stake currency.
|
||||
- `Total profit %`: Total profit. Aligned to the `TOTAL` row's `Tot Profit %` from the first table. Calculated as `(End capital − Starting capital) / Starting capital`.
|
||||
- `CAGR %`: Compound annual growth rate.
|
||||
- `Profit factor`: profit / loss.
|
||||
- `Avg. stake amount`: Average stake amount, either `stake_amount` or the average when using dynamic stake amount.
|
||||
- `Total trade volume`: Volume generated on the exchange to reach the above profit.
|
||||
- `Best Pair` / `Worst Pair`: Best and worst performing pair, and it's corresponding `Cum Profit %`.
|
||||
@@ -440,8 +457,13 @@ It contains some useful key metrics about performance of your strategy on backte
|
||||
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
||||
- `Rejected Entry signals`: Trade entry signals that could not be acted upon due to `max_open_trades` being reached.
|
||||
- `Entry/Exit Timeouts`: Entry/exit orders which did not fill (only applicable if custom pricing is used).
|
||||
- `Canceled Trade Entries`: Number of trades that have been canceled by user request via `adjust_entry_price`.
|
||||
- `Canceled Entry Orders`: Number of entry orders that have been canceled by user request via `adjust_entry_price`.
|
||||
- `Replaced Entry Orders`: Number of entry orders that have been replaced by user request via `adjust_entry_price`.
|
||||
- `Min balance` / `Max balance`: Lowest and Highest Wallet balance during the backtest period.
|
||||
- `Drawdown (Account)`: Maximum Account Drawdown experienced. Calculated as $(Absolute Drawdown) / (DrawdownHigh + startingBalance)$.
|
||||
- `Max % of account underwater`: Maximum percentage your account has decreased from the top since the simulation started.
|
||||
Calculated as the maximum of `(Max Balance - Current Balance) / (Max Balance)`.
|
||||
- `Absolute Drawdown (Account)`: Maximum Account Drawdown experienced. Calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
||||
- `Drawdown`: Maximum, absolute drawdown experienced. Difference between Drawdown High and Subsequent Low point.
|
||||
- `Drawdown high` / `Drawdown low`: Profit at the beginning and end of the largest drawdown period. A negative low value means initial capital lost.
|
||||
- `Drawdown Start` / `Drawdown End`: Start and end datetime for this largest drawdown (can also be visualized via the `plot-dataframe` sub-command).
|
||||
@@ -457,7 +479,7 @@ You can get an overview over daily / weekly or monthly results by using the `--b
|
||||
To visualize daily and weekly breakdowns, you can use the following:
|
||||
|
||||
``` bash
|
||||
freqtrade backtesting --strategy MyAwesomeStrategy --breakdown day month
|
||||
freqtrade backtesting --strategy MyAwesomeStrategy --breakdown day week
|
||||
```
|
||||
|
||||
``` output
|
||||
@@ -473,7 +495,7 @@ freqtrade backtesting --strategy MyAwesomeStrategy --breakdown day month
|
||||
|
||||
```
|
||||
|
||||
The output will show a table containing the realized absolute Profit (in stake currency) for the given timeperiod, as well as wins, draws and losses that materialized (closed) on this day.
|
||||
The output will show a table containing the realized absolute Profit (in stake currency) for the given timeperiod, as well as wins, draws and losses that materialized (closed) on this day. Below that there will be a second table for the summarized values of weeks indicated by the date of the closing Sunday. The same would apply to a monthly breakdown indicated by the last day of the month.
|
||||
|
||||
### Backtest result caching
|
||||
|
||||
@@ -492,6 +514,7 @@ You can then load the trades to perform further analysis as shown in the [data a
|
||||
|
||||
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
||||
|
||||
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
||||
- Buys happen at open-price
|
||||
- All orders are filled at the requested price (no slippage, no unfilled orders)
|
||||
- Exit-signal exits happen at open-price of the consecutive candle
|
||||
@@ -512,15 +535,41 @@ Since backtesting lacks some detailed information about what happens within a ca
|
||||
- Exit-reason does not explain if a trade was positive or negative, just what triggered the exit (this can look odd if negative ROI values are used)
|
||||
- Evaluation sequence (if multiple signals happen on the same candle)
|
||||
- Exit-signal
|
||||
- ROI (if not stoploss)
|
||||
- Stoploss
|
||||
- ROI
|
||||
- Trailing stoploss
|
||||
|
||||
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
|
||||
Also, keep in mind that past results don't guarantee future success.
|
||||
|
||||
In addition to the above assumptions, strategy authors should carefully read the [Common Mistakes](strategy-customization.md#common-mistakes-when-developing-strategies) section, to avoid using data in backtesting which is not available in real market conditions.
|
||||
|
||||
### Improved backtest accuracy
|
||||
### Trading limits in backtesting
|
||||
|
||||
Exchanges have certain trading limits, like minimum base currency, or minimum stake (quote) currency.
|
||||
These limits are usually listed in the exchange documentation as "trading rules" or similar.
|
||||
|
||||
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
|
||||
Freqtrade has however no information about historic limits.
|
||||
|
||||
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50$.
|
||||
|
||||
For example:
|
||||
|
||||
BTC minimum tradable amount is 0.001.
|
||||
BTC trades at 22.000\$ today (0.001 BTC is related to this) - but the backtesting period includes prices as high as 50.000\$.
|
||||
Today's minimum would be `0.001 * 22_000` - or 22\$.
|
||||
However the limit could also be 50$ - based on `0.001 * 50_000` in some historic setting.
|
||||
|
||||
#### Trading precision limits
|
||||
|
||||
Most exchanges pose precision limits on both price and amounts, so you cannot buy 1.0020401 of a pair, or at a price of 1.24567123123.
|
||||
Instead, these prices and amounts will be rounded or truncated (based on the exchange definition) to the defined trading precision.
|
||||
The above values may for example be rounded to an amount of 1.002, and a price of 1.24567.
|
||||
|
||||
These precision values are based on current exchange limits (as described in the [above section](#trading-limits-in-backtesting)), as historic precision limits are not available.
|
||||
|
||||
## Improved backtest accuracy
|
||||
|
||||
One big limitation of backtesting is it's inability to know how prices moved intra-candle (was high before close, or viceversa?).
|
||||
So assuming you run backtesting with a 1h timeframe, there will be 4 prices for that candle (Open, High, Low, Close).
|
||||
|
@@ -20,7 +20,9 @@ All profit calculations of Freqtrade include fees. For Backtesting / Hyperopt /
|
||||
## Bot execution logic
|
||||
|
||||
Starting freqtrade in dry-run or live mode (using `freqtrade trade`) will start the bot and start the bot iteration loop.
|
||||
By default, loop runs every few seconds (`internals.process_throttle_secs`) and does roughly the following in the following sequence:
|
||||
This will also run the `bot_start()` callback.
|
||||
|
||||
By default, the bot loop runs every few seconds (`internals.process_throttle_secs`) and performs the following actions:
|
||||
|
||||
* Fetch open trades from persistence.
|
||||
* Calculate current list of tradable pairs.
|
||||
@@ -34,6 +36,7 @@ By default, loop runs every few seconds (`internals.process_throttle_secs`) and
|
||||
* Check timeouts for open orders.
|
||||
* Calls `check_entry_timeout()` strategy callback for open entry orders.
|
||||
* Calls `check_exit_timeout()` strategy callback for open exit orders.
|
||||
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
||||
* Verifies existing positions and eventually places exit orders.
|
||||
* Considers stoploss, ROI and exit-signal, `custom_exit()` and `custom_stoploss()`.
|
||||
* Determine exit-price based on `exit_pricing` configuration setting or by using the `custom_exit_price()` callback.
|
||||
@@ -53,11 +56,13 @@ This loop will be repeated again and again until the bot is stopped.
|
||||
[backtesting](backtesting.md) or [hyperopt](hyperopt.md) do only part of the above logic, since most of the trading operations are fully simulated.
|
||||
|
||||
* Load historic data for configured pairlist.
|
||||
* Calls `bot_start()` once.
|
||||
* Calls `bot_loop_start()` once.
|
||||
* Calculate indicators (calls `populate_indicators()` once per pair).
|
||||
* Calculate entry / exit signals (calls `populate_entry_trend()` and `populate_exit_trend()` once per pair).
|
||||
* Loops per candle simulating entry and exit points.
|
||||
* Check for Order timeouts, either via the `unfilledtimeout` configuration, or via `check_entry_timeout()` / `check_exit_timeout()` strategy callbacks.
|
||||
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
||||
* Check for trade entry signals (`enter_long` / `enter_short` columns).
|
||||
* Confirm trade entry / exits (calls `confirm_trade_entry()` and `confirm_trade_exit()` if implemented in the strategy).
|
||||
* Call `custom_entry_price()` (if implemented in the strategy) to determine entry price (Prices are moved to be within the opening candle).
|
||||
@@ -65,7 +70,7 @@ This loop will be repeated again and again until the bot is stopped.
|
||||
* Determine stake size by calling the `custom_stake_amount()` callback.
|
||||
* Check position adjustments for open trades if enabled and call `adjust_trade_position()` to determine if an additional order is requested.
|
||||
* Call `custom_stoploss()` and `custom_exit()` to find custom exit points.
|
||||
* For exits based on exit-signal and custom-exit: Call `custom_exit_price()` to determine exit price (Prices are moved to be within the closing candle).
|
||||
* For exits based on exit-signal, custom-exit and partial exits: Call `custom_exit_price()` to determine exit price (Prices are moved to be within the closing candle).
|
||||
* Generate backtest report output
|
||||
|
||||
!!! Note
|
||||
|
@@ -105,7 +105,7 @@ This is similar to using multiple `--config` parameters, but simpler in usage as
|
||||
|
||||
``` json title="Result"
|
||||
{
|
||||
"max_open_trades": 10,
|
||||
"max_open_trades": 3,
|
||||
"stake_currency": "USDT",
|
||||
"stake_amount": "unlimited"
|
||||
}
|
||||
@@ -116,6 +116,9 @@ This is similar to using multiple `--config` parameters, but simpler in usage as
|
||||
The table below will list all configuration parameters available.
|
||||
|
||||
Freqtrade can also load many options via command line (CLI) arguments (check out the commands `--help` output for details).
|
||||
|
||||
### Configuration option prevalence
|
||||
|
||||
The prevalence for all Options is as follows:
|
||||
|
||||
- CLI arguments override any other option
|
||||
@@ -123,6 +126,8 @@ The prevalence for all Options is as follows:
|
||||
- Configuration files are used in sequence (the last file wins) and override Strategy configurations.
|
||||
- Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
|
||||
|
||||
### Parameters table
|
||||
|
||||
Mandatory parameters are marked as **Required**, which means that they are required to be set in one of the possible ways.
|
||||
|
||||
| Parameter | Description |
|
||||
@@ -135,12 +140,12 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `amend_last_stake_amount` | Use reduced last stake amount if necessary. [More information below](#configuring-amount-per-trade). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||
| `last_stake_amount_min_ratio` | Defines minimum stake amount that has to be left and executed. Applies only to the last stake amount when it's amended to a reduced value (i.e. if `amend_last_stake_amount` is set to `true`). [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.5`.* <br> **Datatype:** Float (as ratio)
|
||||
| `amount_reserve_percent` | Reserve some amount in min pair stake amount. The bot will reserve `amount_reserve_percent` + stoploss value when calculating min pair stake amount in order to avoid possible trade refusals. <br>*Defaults to `0.05` (5%).* <br> **Datatype:** Positive Float as ratio.
|
||||
| `timeframe` | The timeframe to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
|
||||
| `timeframe` | The timeframe to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). Usually missing in configuration, and specified in the strategy. [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
|
||||
| `fiat_display_currency` | Fiat currency used to show your profits. [More information below](#what-values-can-be-used-for-fiat_display_currency). <br> **Datatype:** String
|
||||
| `dry_run` | **Required.** Define if the bot must be in Dry Run or production mode. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||
| `dry_run_wallet` | Define the starting amount in stake currency for the simulated wallet used by the bot running in Dry Run mode.<br>*Defaults to `1000`.* <br> **Datatype:** Float
|
||||
| `cancel_open_orders_on_exit` | Cancel open orders when the `/stop` RPC command is issued, `Ctrl+C` is pressed or the bot dies unexpectedly. When set to `true`, this allows you to use `/stop` to cancel unfilled and partially filled orders in the event of a market crash. It does not impact open positions. <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||
| `minimal_roi` | **Required.** Set the threshold as ratio the bot will use to exit a trade. [More information below](#understand-minimal_roi). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
|
||||
| `stoploss` | **Required.** Value as ratio of the stoploss used by the bot. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Float (as ratio)
|
||||
| `trailing_stop` | Enables trailing stoploss (based on `stoploss` in either configuration or strategy file). More details in the [stoploss documentation](stoploss.md#trailing-stop-loss). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Boolean
|
||||
@@ -148,13 +153,16 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md#trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> **Datatype:** Float
|
||||
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||
| `fee` | Fee used during backtesting / dry-runs. Should normally not be configured, which has freqtrade fall back to the exchange default fee. Set as ratio (e.g. 0.001 = 0.1%). Fee is applied twice for each trade, once when buying, once when selling. <br> **Datatype:** Float (as ratio)
|
||||
| `futures_funding_rate` | User-specified funding rate to be used when historical funding rates are not available from the exchange. This does not overwrite real historical rates. It is recommended that this be set to 0 unless you are testing a specific coin and you understand how the funding rate will affect freqtrade's profit calculations. [More information here](leverage.md#unavailable-funding-rates) <br>*Defaults to None.*<br> **Datatype:** Float
|
||||
| `trading_mode` | Specifies if you want to trade regularly, trade with leverage, or trade contracts whose prices are derived from matching cryptocurrency prices. [leverage documentation](leverage.md). <br>*Defaults to `"spot"`.* <br> **Datatype:** String
|
||||
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
|
||||
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
|
||||
| | **Unfilled timeout**
|
||||
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
||||
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
||||
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `minutes`.* <br> **Datatype:** String
|
||||
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
|
||||
| | **Pricing**
|
||||
| `entry_pricing.price_side` | Select the side of the spread the bot should look at to get the entry rate. [More information below](#buy-price-side).<br> *Defaults to `same`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
|
||||
| `entry_pricing.price_last_balance` | **Required.** Interpolate the bidding price. More information [below](#entry-price-without-orderbook-enabled).
|
||||
| `entry_pricing.use_order_book` | Enable entering using the rates in [Order Book Entry](#entry-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
|
||||
@@ -165,6 +173,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `exit_pricing.price_last_balance` | Interpolate the exiting price. More information [below](#exit-price-without-orderbook-enabled).
|
||||
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
|
||||
| `exit_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to exit. I.e. a value of 2 will allow the bot to pick the 2nd ask rate in [Order Book Exit](#exit-price-with-orderbook-enabled)<br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
|
||||
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
|
||||
| | **TODO**
|
||||
| `use_exit_signal` | Use exit signals produced by the strategy in addition to the `minimal_roi`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||
| `exit_profit_only` | Wait until the bot reaches `exit_profit_offset` before taking an exit decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||
| `exit_profit_offset` | Exit-signal is only active above this value. Only active in combination with `exit_profit_only=True`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0`.* <br> **Datatype:** Float (as ratio)
|
||||
@@ -172,8 +182,9 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `ignore_buying_expired_candle_after` | Specifies the number of seconds until a buy signal is no longer used. <br> **Datatype:** Integer
|
||||
| `order_types` | Configure order-types depending on the action (`"entry"`, `"exit"`, `"stoploss"`, `"stoploss_on_exchange"`). [More information below](#understand-order_types). [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Dict
|
||||
| `order_time_in_force` | Configure time in force for entry and exit orders. [More information below](#understand-order_time_in_force). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
|
||||
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
|
||||
| `recursive_strategy_search` | Set to `true` to recursively search sub-directories inside `user_data/strategies` for a strategy. <br> **Datatype:** Boolean
|
||||
| `position_adjustment_enable` | Enables the strategy to use position adjustments (additional buys or sells). [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.*<br> **Datatype:** Boolean
|
||||
| `max_entry_position_adjustment` | Maximum additional order(s) for each open trade on top of the first entry Order. Set it to `-1` for unlimited additional orders. [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `-1`.*<br> **Datatype:** Positive Integer or -1
|
||||
| | **Exchange**
|
||||
| `exchange.name` | **Required.** Name of the exchange class to use. [List below](#user-content-what-values-for-exchangename). <br> **Datatype:** String
|
||||
| `exchange.sandbox` | Use the 'sandbox' version of the exchange, where the exchange provides a sandbox for risk-free integration. See [here](sandbox-testing.md) in more details.<br> **Datatype:** Boolean
|
||||
| `exchange.key` | API key to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||
@@ -190,14 +201,19 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `exchange.skip_open_order_update` | Skips open order updates on startup should the exchange cause problems. Only relevant in live conditions.<br>*Defaults to `false`<br> **Datatype:** Boolean
|
||||
| `exchange.unknown_fee_rate` | Fallback value to use when calculating trading fees. This can be useful for exchanges which have fees in non-tradable currencies. The value provided here will be multiplied with the "fee cost".<br>*Defaults to `None`<br> **Datatype:** float
|
||||
| `exchange.log_responses` | Log relevant exchange responses. For debug mode only - use with care.<br>*Defaults to `false`<br> **Datatype:** Boolean
|
||||
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation.
|
||||
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||
| | **Plugins**
|
||||
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation of all possible configuration options.
|
||||
| `pairlists` | Define one or more pairlists to be used. [More information](plugins.md#pairlists-and-pairlist-handlers). <br>*Defaults to `StaticPairList`.* <br> **Datatype:** List of Dicts
|
||||
| `protections` | Define one or more protections to be used. [More information](plugins.md#protections). <br> **Datatype:** List of Dicts
|
||||
| | **Telegram**
|
||||
| `telegram.enabled` | Enable the usage of Telegram. <br> **Datatype:** Boolean
|
||||
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||
| `telegram.balance_dust_level` | Dust-level (in stake currency) - currencies with a balance below this will not be shown by `/balance`. <br> **Datatype:** float
|
||||
| `telegram.reload` | Allow "reload" buttons on telegram messages. <br>*Defaults to `True`.<br> **Datatype:** boolean
|
||||
| `telegram.notification_settings.*` | Detailed notification settings. Refer to the [telegram documentation](telegram-usage.md) for details.<br> **Datatype:** dictionary
|
||||
| | **Webhook**
|
||||
| `webhook.enabled` | Enable usage of Webhook notifications <br> **Datatype:** Boolean
|
||||
| `webhook.url` | URL for the webhook. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||
| `webhook.webhookentry` | Payload to send on entry. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||
@@ -207,6 +223,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `webhook.webhookexitcancel` | Payload to send on exit order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||
| `webhook.webhookexitfill` | Payload to send on exit order filled. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||
| `webhook.webhookstatus` | Payload to send on status calls. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||
| | **Rest API / FreqUI**
|
||||
| `api_server.enabled` | Enable usage of API Server. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** Boolean
|
||||
| `api_server.listen_ip_address` | Bind IP address. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** IPv4
|
||||
| `api_server.listen_port` | Bind Port. See the [API Server documentation](rest-api.md) for more details. <br>**Datatype:** Integer between 1024 and 65535
|
||||
@@ -214,22 +231,22 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `api_server.username` | Username for API server. See the [API Server documentation](rest-api.md) for more details. <br>**Keep it in secret, do not disclose publicly.**<br> **Datatype:** String
|
||||
| `api_server.password` | Password for API server. See the [API Server documentation](rest-api.md) for more details. <br>**Keep it in secret, do not disclose publicly.**<br> **Datatype:** String
|
||||
| `bot_name` | Name of the bot. Passed via API to a client - can be shown to distinguish / name bots.<br> *Defaults to `freqtrade`*<br> **Datatype:** String
|
||||
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
|
||||
| | **Other**
|
||||
| `initial_state` | Defines the initial application state. If set to stopped, then the bot has to be explicitly started via `/start` RPC command. <br>*Defaults to `stopped`.* <br> **Datatype:** Enum, either `stopped` or `running`
|
||||
| `force_entry_enable` | Enables the RPC Commands to force a Trade entry. More information below. <br> **Datatype:** Boolean
|
||||
| `disable_dataframe_checks` | Disable checking the OHLCV dataframe returned from the strategy methods for correctness. Only use when intentionally changing the dataframe and understand what you are doing. [Strategy Override](#parameters-in-the-strategy).<br> *Defaults to `False`*. <br> **Datatype:** Boolean
|
||||
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> **Datatype:** ClassName
|
||||
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> **Datatype:** String
|
||||
| `internals.process_throttle_secs` | Set the process throttle, or minimum loop duration for one bot iteration loop. Value in second. <br>*Defaults to `5` seconds.* <br> **Datatype:** Positive Integer
|
||||
| `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages. <br>*Defaults to `60` seconds.* <br> **Datatype:** Positive Integer or 0
|
||||
| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](installation.md#7-optional-configure-freqtrade-as-a-systemd-service) for more details. <br> **Datatype:** Boolean
|
||||
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
|
||||
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> **Datatype:** ClassName
|
||||
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> **Datatype:** String
|
||||
| `recursive_strategy_search` | Set to `true` to recursively search sub-directories inside `user_data/strategies` for a strategy. <br> **Datatype:** Boolean
|
||||
| `user_data_dir` | Directory containing user data. <br> *Defaults to `./user_data/`*. <br> **Datatype:** String
|
||||
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
|
||||
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
|
||||
| `add_config_files` | Additional config files. These files will be loaded and merged with the current config file. The files are resolved relative to the initial file.<br> *Defaults to `[]`*. <br> **Datatype:** List of strings
|
||||
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `json`*. <br> **Datatype:** String
|
||||
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `jsongz`*. <br> **Datatype:** String
|
||||
| `position_adjustment_enable` | Enables the strategy to use position adjustments (additional buys or sells). [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.*<br> **Datatype:** Boolean
|
||||
| `max_entry_position_adjustment` | Maximum additional order(s) for each open trade on top of the first entry Order. Set it to `-1` for unlimited additional orders. [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `-1`.*<br> **Datatype:** Positive Integer or -1
|
||||
|
||||
### Parameters in the strategy
|
||||
|
||||
@@ -583,7 +600,7 @@ Once you will be happy with your bot performance running in the Dry-run mode, yo
|
||||
* Market orders fill based on orderbook volume the moment the order is placed.
|
||||
* Limit orders fill once the price reaches the defined level - or time out based on `unfilledtimeout` settings.
|
||||
* In combination with `stoploss_on_exchange`, the stop_loss price is assumed to be filled.
|
||||
* Open orders (not trades, which are stored in the database) are reset on bot restart.
|
||||
* Open orders (not trades, which are stored in the database) are kept open after bot restarts, with the assumption that they were not filled while being offline.
|
||||
|
||||
## Switch to production mode
|
||||
|
||||
|
@@ -30,6 +30,7 @@ usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||
[--data-format-ohlcv {json,jsongz,hdf5}]
|
||||
[--data-format-trades {json,jsongz,hdf5}]
|
||||
[--trading-mode {spot,margin,futures}]
|
||||
[--prepend]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@@ -62,6 +63,7 @@ optional arguments:
|
||||
`jsongz`).
|
||||
--trading-mode {spot,margin,futures}
|
||||
Select Trading mode
|
||||
--prepend Allow data prepending. (Data-appending is disabled)
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
@@ -157,10 +159,21 @@ freqtrade download-data --exchange binance --pairs .*/USDT
|
||||
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
|
||||
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
|
||||
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
|
||||
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020. Eventually set end dates are ignored.
|
||||
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
|
||||
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
|
||||
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
|
||||
|
||||
#### Download additional data before the current timerange
|
||||
|
||||
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
|
||||
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
|
||||
|
||||
``` bash
|
||||
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT --prepend --timerange 20210101-20220101
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Freqtrade will ignore the end-date in this mode if data is available, updating the end-date to the existing data start point.
|
||||
|
||||
### Data format
|
||||
|
||||
@@ -173,7 +186,7 @@ Freqtrade currently supports 3 data-formats for both OHLCV and trades data:
|
||||
By default, OHLCV data is stored as `json` data, while trades data is stored as `jsongz` data.
|
||||
|
||||
This can be changed via the `--data-format-ohlcv` and `--data-format-trades` command line arguments respectively.
|
||||
To persist this change, you can should also add the following snippet to your configuration, so you don't have to insert the above arguments each time:
|
||||
To persist this change, you should also add the following snippet to your configuration, so you don't have to insert the above arguments each time:
|
||||
|
||||
``` jsonc
|
||||
// ...
|
||||
@@ -361,6 +374,7 @@ usage: freqtrade list-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
||||
[--data-format-ohlcv {json,jsongz,hdf5}]
|
||||
[-p PAIRS [PAIRS ...]]
|
||||
[--trading-mode {spot,margin,futures}]
|
||||
[--show-timerange]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@@ -374,6 +388,8 @@ optional arguments:
|
||||
separated.
|
||||
--trading-mode {spot,margin,futures}
|
||||
Select Trading mode
|
||||
--show-timerange Show timerange available for available data. (May take
|
||||
a while to calculate).
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
|
@@ -68,6 +68,36 @@ def test_method_to_test(caplog):
|
||||
|
||||
```
|
||||
|
||||
### Debug configuration
|
||||
|
||||
To debug freqtrade, we recommend VSCode with the following launch configuration (located in `.vscode/launch.json`).
|
||||
Details will obviously vary between setups - but this should work to get you started.
|
||||
|
||||
``` json
|
||||
{
|
||||
"name": "freqtrade trade",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "freqtrade",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"trade",
|
||||
// Optional:
|
||||
// "--userdir", "user_data",
|
||||
"--strategy",
|
||||
"MyAwesomeStrategy",
|
||||
]
|
||||
},
|
||||
```
|
||||
|
||||
Command line arguments can be added in the `"args"` array.
|
||||
This method can also be used to debug a strategy, by setting the breakpoints within the strategy.
|
||||
|
||||
A similar setup can also be taken for Pycharm - using `freqtrade` as module name, and setting the command line arguments as "parameters".
|
||||
|
||||
!!! Note "Startup directory"
|
||||
This assumes that you have the repository checked out, and the editor is started at the repository root level (so setup.py is at the top level of your repository).
|
||||
|
||||
## ErrorHandling
|
||||
|
||||
Freqtrade Exceptions all inherit from `FreqtradeException`.
|
||||
@@ -200,11 +230,12 @@ For that reason, they must implement the following methods:
|
||||
* `global_stop()`
|
||||
* `stop_per_pair()`.
|
||||
|
||||
`global_stop()` and `stop_per_pair()` must return a ProtectionReturn tuple, which consists of:
|
||||
`global_stop()` and `stop_per_pair()` must return a ProtectionReturn object, which consists of:
|
||||
|
||||
* lock pair - boolean
|
||||
* lock until - datetime - until when should the pair be locked (will be rounded up to the next new candle)
|
||||
* reason - string, used for logging and storage in the database
|
||||
* lock_side - long, short or '*'.
|
||||
|
||||
The `until` portion should be calculated using the provided `calculate_lock_end()` method.
|
||||
|
||||
@@ -313,6 +344,32 @@ The output will show the last entry from the Exchange as well as the current UTC
|
||||
If the day shows the same day, then the last candle can be assumed as incomplete and should be dropped (leave the setting `"ohlcv_partial_candle"` from the exchange-class untouched / True). Otherwise, set `"ohlcv_partial_candle"` to `False` to not drop Candles (shown in the example above).
|
||||
Another way is to run this command multiple times in a row and observe if the volume is changing (while the date remains the same).
|
||||
|
||||
### Update binance cached leverage tiers
|
||||
|
||||
Updating leveraged tiers should be done regularly - and requires an authenticated account with futures enabled.
|
||||
|
||||
``` python
|
||||
import ccxt
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
exchange = ccxt.binance({
|
||||
'apiKey': '<apikey>',
|
||||
'secret': '<secret>'
|
||||
'options': {'defaultType': 'future'}
|
||||
})
|
||||
_ = exchange.load_markets()
|
||||
|
||||
lev_tiers = exchange.fetch_leverage_tiers()
|
||||
|
||||
# Assumes this is running in the root of the repository.
|
||||
file = Path('freqtrade/exchange/binance_leverage_tiers.json')
|
||||
json.dump(dict(sorted(lev_tiers.items())), file.open('w'), indent=2)
|
||||
|
||||
```
|
||||
|
||||
This file should then be contributed upstream, so others can benefit from this, too.
|
||||
|
||||
## Updating example notebooks
|
||||
|
||||
To keep the jupyter notebooks aligned with the documentation, the following should be ran after updating a example notebook.
|
||||
|
@@ -230,6 +230,11 @@ OKX requires a passphrase for each api key, you will therefore need to add this
|
||||
!!! Warning
|
||||
OKX only provides 100 candles per api call. Therefore, the strategy will only have a pretty low amount of data available in backtesting mode.
|
||||
|
||||
!!! Warning "Futures"
|
||||
OKX Futures has the concept of "position mode" - which can be Net or long/short (hedge mode).
|
||||
Freqtrade supports both modes - but changing the mode mid-trading is not supported and will lead to exceptions and failures to place trades.
|
||||
OKX also only provides MARK candles for the past ~3 months. Backtesting futures prior to that date will therefore lead to slight deviations, as funding-fees cannot be calculated correctly without this data.
|
||||
|
||||
## Gate.io
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
|
@@ -77,9 +77,9 @@ Freqtrade will not provide incomplete candles to strategies. Using incomplete ca
|
||||
|
||||
You can use "current" market data by using the [dataprovider](strategy-customization.md#orderbookpair-maximum)'s orderbook or ticker methods - which however cannot be used during backtesting.
|
||||
|
||||
### Is there a setting to only SELL the coins being held and not perform anymore BUYS?
|
||||
### Is there a setting to only Exit the trades being held and not perform any new Entries?
|
||||
|
||||
You can use the `/stopbuy` command in Telegram to prevent future buys, followed by `/forceexit all` (sell all open trades).
|
||||
You can use the `/stopentry` command in Telegram to prevent future trade entry, followed by `/forceexit all` (sell all open trades).
|
||||
|
||||
### I want to run multiple bots on the same machine
|
||||
|
||||
|
759
docs/freqai.md
Normal file
@@ -0,0 +1,759 @@
|
||||

|
||||
|
||||
# FreqAI
|
||||
|
||||
FreqAI is a module designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input features.
|
||||
|
||||
Features include:
|
||||
|
||||
* **Self-adaptive retraining**: retrain models during [live deployments](#running-the-model-live) to self-adapt to the market in an unsupervised manner.
|
||||
* **Rapid feature engineering**: create large rich [feature sets](#feature-engineering) (10k+ features) based on simple user-created strategies.
|
||||
* **High performance**: adaptive retraining occurs on a separate thread (or on GPU if available) from inferencing and bot trade operations. Newest models and data are kept in memory for rapid inferencing.
|
||||
* **Realistic backtesting**: emulate self-adaptive retraining with a [backtesting module](#backtesting) that automates past retraining.
|
||||
* **Modifiability**: use the generalized and robust architecture for incorporating any [machine learning library/method](#building-a-custom-prediction-model) available in Python. Eight examples are currently available, including classifiers, regressors, and a convolutional neural network.
|
||||
* **Smart outlier removal**: remove outliers from training and prediction data sets using a variety of [outlier detection techniques](#outlier-removal).
|
||||
* **Crash resilience**: store model to disk to make reloading from a crash fast and easy, and [purge obsolete files](#purging-old-model-data) for sustained dry/live runs.
|
||||
* **Automatic data normalization**: [normalize the data](#feature-normalization) in a smart and statistically safe way.
|
||||
* **Automatic data download**: compute the data download timerange and update historic data (in live deployments).
|
||||
* **Cleaning of incoming data**: handle NaNs safely before training and prediction.
|
||||
* **Dimensionality reduction**: reduce the size of the training data via [Principal Component Analysis](#reducing-data-dimensionality-with-principal-component-analysis).
|
||||
* **Deploying bot fleets**: set one bot to train models while a fleet of [follower bots](#setting-up-a-follower) inference the models and handle trades.
|
||||
|
||||
## Quick start
|
||||
|
||||
The easiest way to quickly test FreqAI is to run it in dry mode with the following command
|
||||
|
||||
```bash
|
||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates
|
||||
```
|
||||
|
||||
The user will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
|
||||
|
||||
The example strategy, example prediction model, and example config can be found in
|
||||
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
|
||||
`config_examples/config_freqai.example.json`, respectively.
|
||||
|
||||
## General approach
|
||||
|
||||
The user provides FreqAI with a set of custom *base* indicators (the same way as in a typical Freqtrade strategy) as well as target values (*labels*).
|
||||
FreqAI trains a model to predict the target values based on the input of custom indicators, for each pair in the whitelist. These models are consistently retrained to adapt to market conditions. FreqAI offers the ability to both backtest strategies (emulating reality with periodic retraining) and deploy dry/live runs. In dry/live conditions, FreqAI can be set to constant retraining in a background thread in an effort to keep models as up to date as possible.
|
||||
|
||||
An overview of the algorithm is shown below, explaining the data processing pipeline and the model usage.
|
||||
|
||||

|
||||
|
||||
### Important machine learning vocabulary
|
||||
|
||||
**Features** - the quantities with which a model is trained. All features for a single candle is stored as a vector. In FreqAI, the user
|
||||
builds the feature sets from anything they can construct in the strategy.
|
||||
|
||||
**Labels** - the target values that a model is trained
|
||||
toward. Each set of features is associated with a single label that is
|
||||
defined by the user within the strategy. These labels intentionally look into the
|
||||
future, and are not available to the model during dry/live/backtesting.
|
||||
|
||||
**Training** - the process of feeding individual feature sets, composed of historic data, with associated labels into the
|
||||
model with the goal of matching input feature sets to associated labels.
|
||||
|
||||
**Train data** - a subset of the historic data that is fed to the model during
|
||||
training. This data directly influences weight connections in the model.
|
||||
|
||||
**Test data** - a subset of the historic data that is used to evaluate the performance of the model after training. This data does not influence nodal weights within the model.
|
||||
|
||||
## Install prerequisites
|
||||
|
||||
The normal Freqtrade install process will ask the user if they wish to install FreqAI dependencies. The user should reply "yes" to this question if they wish to use FreqAI. If the user did not reply yes, they can manually install these dependencies after the install with:
|
||||
|
||||
``` bash
|
||||
pip install -r requirements-freqai.txt
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since Catboost does not provide wheels for this platform.
|
||||
|
||||
### Usage with docker
|
||||
|
||||
For docker users, a dedicated tag with freqAI dependencies is available as `:freqai`.
|
||||
As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`.
|
||||
This image contains the regular freqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||
|
||||
## Setting up FreqAI
|
||||
|
||||
### Parameter table
|
||||
|
||||
The table below will list all configuration parameters available for FreqAI, presented in the same order as `config_examples/config_freqai.example.json`.
|
||||
|
||||
Mandatory parameters are marked as **Required**, which means that they are required to be set in one of the possible ways.
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **General configuration parameters**
|
||||
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
||||
| `startup_candles` | Number of candles needed for *backtesting only* to ensure all indicators are non NaNs at the start of the first train period. <br> **Datatype:** Positive integer.
|
||||
| `purge_old_models` | Delete obsolete models (otherwise, all historic models will remain on disk). <br> **Datatype:** Boolean. Default: `False`.
|
||||
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
||||
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the window defined above, and retraining the model. This can be fractional days, but beware that the user-provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
||||
| `identifier` | **Required.** <br> A unique name for the current model. This can be reused to reload pre-trained models/data. <br> **Datatype:** String.
|
||||
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> Default set to 0, which means the model will retrain as often as possible. <br> **Datatype:** Float > 0.
|
||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> Defaults set to 0, which means models never expire. <br> **Datatype:** Positive integer.
|
||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training data set. <br> **Datatype:** Positive integer.
|
||||
| `follow_mode` | If true, this instance of FreqAI will look for models associated with `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. Default: `False`.
|
||||
| | **Feature parameters**
|
||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](#feature-engineering). <br> **Datatype:** Dictionary.
|
||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base asset feature set. <br> **Datatype:** List of timeframes (strings).
|
||||
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](#feature-engineering)) will be created for each coin in this list, and that set of features is added to the base asset feature set. <br> **Datatype:** List of assets (strings).
|
||||
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). The user can create custom labels, making use of this parameter or not. <br> **Datatype:** Positive integer.
|
||||
| `include_shifted_candles` | Add features from previous candles to subsequent candles to add historical information. FreqAI takes all features from the `include_shifted_candles` previous candles, duplicates and shifts them so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
||||
| `weight_factor` | Used to set weights for training data points according to their recency. See details about how it works [here](#controlling-the-model-learning-process). <br> **Datatype:** Positive float (typically < 1).
|
||||
| `indicator_max_period_candles` | The maximum period used in `populate_any_indicators()` for indicator creation. FreqAI uses this information in combination with the maximum timeframe to calculate how many data points that should be downloaded so that the first data point does not have a NaN. <br> **Datatype:** Positive integer.
|
||||
| `indicator_periods_candles` | Calculate indicators for `indicator_periods_candles` time periods and add them to the feature set. <br> **Datatype:** List of positive integers.
|
||||
| `stratify_training_data` | This value is used to indicate the grouping of the data. For example, 2 would set every 2nd data point into a separate dataset to be pulled from during training/testing. See details about how it works [here](#stratifying-the-data-for-training-and-testing-the-model) <br> **Datatype:** Positive integer.
|
||||
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean.
|
||||
| `DI_threshold` | Activates the Dissimilarity Index for outlier detection when > 0. See details about how it works [here](#removing-outliers-with-the-dissimilarity-index). <br> **Datatype:** Positive float (typically < 1).
|
||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training data set, as well as from incoming data points. See details about how it works [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||
| `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact. <br> **Datatype:** float. Default: `30`
|
||||
| | **Data split parameters**
|
||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br>
|
||||
| | **Model training parameters**
|
||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.**Datatype:** Boolean.
|
||||
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
||||
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||
| | **Extraneous parameters**
|
||||
| `keras` | If your model makes use of Keras (typical for Tensorflow-based prediction models), activate this flag so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. Default: `False`.
|
||||
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. Default: 2.
|
||||
|
||||
### Important dataframe key patterns
|
||||
|
||||
Below are the values the user can expect to include/use inside a typical strategy dataframe (`df[]`):
|
||||
|
||||
| DataFrame Key | Description |
|
||||
|------------|-------------|
|
||||
| `df['&*']` | Any dataframe column prepended with `&` in `populate_any_indicators()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). The names of these dataframe columns are fed back to the user as the predictions. For example, if the user wishes to predict the price change in the next 40 candles (similar to `templates/FreqaiExampleStrategy.py`), they set `df['&-s_close']`. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
||||
| `df['&*_std/mean']` | Standard deviation and mean values of the user-defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -1 and 2, which lets the user know if the prediction is trustworthy or not. `do_predict==1` means the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](#removing-outliers-with-the-dissimilarity-index)) of the input data point is above the user-defined threshold, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -1 and 2.
|
||||
| `df['DI_values']` | Dissimilarity Index values are proxies to the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. <br> **Datatype:** Float.
|
||||
| `df['%*']` | Any dataframe column prepended with `%` in `populate_any_indicators()` is treated as a training feature. For example, the user can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](#feature-engineering). <br> **Note**: Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features is easily engineered using the multiplictative functionality described in the `feature_parameters` table shown above), these features are removed from the dataframe upon return from FreqAI. If the user wishes to keep a particular type of feature for plotting purposes, they can prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
||||
|
||||
### File structure
|
||||
|
||||
`user_data_dir/models/` contains all the data associated with the trainings and backtests.
|
||||
This file structure is heavily controlled and inferenced by the `FreqaiDataKitchen()`
|
||||
and should therefore not be modified.
|
||||
|
||||
### Example config file
|
||||
|
||||
The user interface is isolated to the typical Freqtrade config file. A FreqAI config should include:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"enabled": true,
|
||||
"startup_candles": 10000,
|
||||
"purge_old_models": true,
|
||||
"train_period_days": 30,
|
||||
"backtest_period_days": 7,
|
||||
"identifier" : "unique-id",
|
||||
"feature_parameters" : {
|
||||
"include_timeframes": ["5m","15m","4h"],
|
||||
"include_corr_pairlist": [
|
||||
"ETH/USD",
|
||||
"LINK/USD",
|
||||
"BNB/USD"
|
||||
],
|
||||
"label_period_candles": 24,
|
||||
"include_shifted_candles": 2,
|
||||
"indicator_max_period_candles": 20,
|
||||
"indicator_periods_candles": [10, 20]
|
||||
},
|
||||
"data_split_parameters" : {
|
||||
"test_size": 0.25
|
||||
},
|
||||
"model_training_parameters" : {
|
||||
"n_estimators": 100
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Building a FreqAI strategy
|
||||
|
||||
The FreqAI strategy requires the user to include the following lines of code in the standard Freqtrade strategy:
|
||||
|
||||
```python
|
||||
|
||||
def informative_pairs(self):
|
||||
whitelist_pairs = self.dp.current_whitelist()
|
||||
corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"]
|
||||
informative_pairs = []
|
||||
for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]:
|
||||
for pair in whitelist_pairs:
|
||||
informative_pairs.append((pair, tf))
|
||||
for pair in corr_pairs:
|
||||
if pair in whitelist_pairs:
|
||||
continue # avoid duplication
|
||||
informative_pairs.append((pair, tf))
|
||||
return informative_pairs
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
# the model will return all labels created by user in `populate_any_indicators`
|
||||
# (& appended targets), an indication of whether or not the prediction should be accepted,
|
||||
# the target mean/std values for each of the labels created by user in
|
||||
# `populate_any_indicators()` for each training period.
|
||||
|
||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
return dataframe
|
||||
|
||||
def populate_any_indicators(
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
from user indicated timeframes in the configuration file. User controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
(see convention below). I.e. user should not prepend any supporting metrics
|
||||
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
:param pair: pair to be used as informative
|
||||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||
if n == 0:
|
||||
continue
|
||||
informative_shift = informative[indicators].shift(n)
|
||||
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||
informative = pd.concat((informative, informative_shift), axis=1)
|
||||
|
||||
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||
skip_columns = [
|
||||
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||
]
|
||||
df = df.drop(columns=skip_columns)
|
||||
|
||||
# Add generalized indicators here (because in live, it will call this
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df["&-s_close"] = (
|
||||
df["close"]
|
||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.mean()
|
||||
/ df["close"]
|
||||
- 1
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
```
|
||||
|
||||
Notice how the `populate_any_indicators()` is where the user adds their own features ([more information](#feature-engineering)) and labels ([more information](#setting-classifier-targets)). See a full example at `templates/FreqaiExampleStrategy.py`.
|
||||
|
||||
## Creating a dynamic target
|
||||
|
||||
The `&*_std/mean` return values describe the statistical fit of the user defined label *during the most recent training*. This value allows the user to know the rarity of a given prediction. For example, `templates/FreqaiExampleStrategy.py`, creates a `target_roi` which is based on filtering out predictions that are below a given z-score of 1.25.
|
||||
|
||||
```python
|
||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
|
||||
```
|
||||
|
||||
If the user wishes to consider the population
|
||||
of *historical predictions* for creating the dynamic target instead of the trained labels, (as discussed above) the user
|
||||
can do so by setting `fit_live_prediction_candles` in the config to the number of historical prediction candles
|
||||
the user wishes to use to generate target statistics.
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"fit_live_prediction_candles": 300,
|
||||
}
|
||||
```
|
||||
|
||||
If the user sets this value, FreqAI will initially use the predictions from the training data
|
||||
and subsequently begin introducing real prediction data as it is generated. FreqAI will save
|
||||
this historical data to be reloaded if the user stops and restarts a model with the same `identifier`.
|
||||
|
||||
## Building a custom prediction model
|
||||
|
||||
FreqAI has multiple example prediction model libraries, such as `Catboost` regression (`freqai/prediction_models/CatboostRegressor.py`) and `LightGBM` regression.
|
||||
However, the user can customize and create their own prediction models using the `IFreqaiModel` class.
|
||||
The user is encouraged to inherit `train()` and `predict()` to let them customize various aspects of their training procedures.
|
||||
|
||||
## Feature engineering
|
||||
|
||||
Features are added by the user inside the `populate_any_indicators()` method of the strategy
|
||||
by prepending indicators with `%`, and labels with `&`.
|
||||
|
||||
There are some important components/structures that the user *must* include when building their feature set; the use of these is shown below:
|
||||
|
||||
```python
|
||||
def populate_any_indicators(
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name, and merge features
|
||||
from user-indicated timeframes in the configuration file. The user controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
(see convention below). I.e., the user should not prepend any supporting metrics
|
||||
(e.g., bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
:param pair: pair to be used as informative
|
||||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
bollinger = qtpylib.bollinger_bands(
|
||||
qtpylib.typical_price(informative), window=t, stds=2.2
|
||||
)
|
||||
informative[f"{coin}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||
informative[f"{coin}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||
informative[f"{coin}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||
|
||||
informative[f"%-{coin}bb_width-period_{t}"] = (
|
||||
informative[f"{coin}bb_upperband-period_{t}"]
|
||||
- informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
) / informative[f"{coin}bb_middleband-period_{t}"]
|
||||
informative[f"%-{coin}close-bb_lower-period_{t}"] = (
|
||||
informative["close"] / informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
)
|
||||
|
||||
informative[f"%-{coin}relative_volume-period_{t}"] = (
|
||||
informative["volume"] / informative["volume"].rolling(t).mean()
|
||||
)
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||
if n == 0:
|
||||
continue
|
||||
informative_shift = informative[indicators].shift(n)
|
||||
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||
informative = pd.concat((informative, informative_shift), axis=1)
|
||||
|
||||
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||
skip_columns = [
|
||||
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||
]
|
||||
df = df.drop(columns=skip_columns)
|
||||
|
||||
# Add generalized indicators here (because in live, it will call this
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df["&-s_close"] = (
|
||||
df["close"]
|
||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.mean()
|
||||
/ df["close"]
|
||||
- 1
|
||||
)
|
||||
|
||||
return df
|
||||
```
|
||||
|
||||
In the presented example strategy, the user does not wish to pass the `bb_lowerband` as a feature to the model,
|
||||
and has therefore not prepended it with `%`. The user does, however, wish to pass `bb_width` to the
|
||||
model for training/prediction and has therefore prepended it with `%`.
|
||||
|
||||
The `include_timeframes` in the example config above are the timeframes (`tf`) of each call to `populate_any_indicators()` in the strategy. In the present case, the user is asking for the
|
||||
`5m`, `15m`, and `4h` timeframes of the `rsi`, `mfi`, `roc`, and `bb_width` to be included in the feature set.
|
||||
|
||||
The user can ask for each of the defined features to be included also from
|
||||
informative pairs using the `include_corr_pairlist`. This means that the feature
|
||||
set will include all the features from `populate_any_indicators` on all the `include_timeframes` for each of the correlated pairs defined in the config (`ETH/USD`, `LINK/USD`, and `BNB/USD`).
|
||||
|
||||
`include_shifted_candles` indicates the number of previous
|
||||
candles to include in the feature set. For example, `include_shifted_candles: 2` tells
|
||||
FreqAI to include the past 2 candles for each of the features in the feature set.
|
||||
|
||||
In total, the number of features the user of the presented example strat has created is:
|
||||
length of `include_timeframes` * no. features in `populate_any_indicators()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
|
||||
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
||||
|
||||
Another structure to consider is the location of the labels at the bottom of the example function (below `if set_generalized_indicators:`).
|
||||
This is where the user will add single features and labels to their feature set to avoid duplication of them from
|
||||
various configuration parameters that multiply the feature set, such as `include_timeframes`.
|
||||
|
||||
!!! Note
|
||||
Features **must** be defined in `populate_any_indicators()`. Definining FreqAI features in `populate_indicators()`
|
||||
will cause the algorithm to fail in live/dry mode. If the user wishes to add generalized features that are not associated with
|
||||
a specific pair or timeframe, they should use the following structure inside `populate_any_indicators()`
|
||||
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`):
|
||||
|
||||
```python
|
||||
def populate_any_indicators(self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False):
|
||||
|
||||
...
|
||||
|
||||
# Add generalized indicators here (because in live, it will call only this function to populate
|
||||
# indicators for retraining). Notice how we ensure not to add them multiple times by associating
|
||||
# these generalized indicators to the basepair/timeframe
|
||||
if set_generalized_indicators:
|
||||
df['%-day_of_week'] = (df["date"].dt.dayofweek + 1) / 7
|
||||
df['%-hour_of_day'] = (df['date'].dt.hour + 1) / 25
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df["&-s_close"] = (
|
||||
df["close"]
|
||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
.mean()
|
||||
/ df["close"]
|
||||
- 1
|
||||
)
|
||||
```
|
||||
|
||||
(Please see the example script located in `freqtrade/templates/FreqaiExampleStrategy.py` for a full example of `populate_any_indicators()`.)
|
||||
|
||||
## Setting classifier targets
|
||||
|
||||
FreqAI includes the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. The user should take care to set the classes using strings:
|
||||
|
||||
```python
|
||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||
```
|
||||
|
||||
Additionally, the example classifier models do not accommodate multiple labels, but they do allow multi-class classification within a single label column.
|
||||
|
||||
## Running FreqAI
|
||||
|
||||
There are two ways to train and deploy an adaptive machine learning model. FreqAI enables live deployment as well as backtesting analyses. In both cases, a model is trained periodically, as shown in the following figure.
|
||||
|
||||

|
||||
|
||||
### Running the model live
|
||||
|
||||
FreqAI can be run dry/live using the following command:
|
||||
|
||||
```bash
|
||||
freqtrade trade --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel LightGBMRegressor
|
||||
```
|
||||
|
||||
By default, FreqAI will not find any existing models and will start by training a new one
|
||||
based on the user's configuration settings. Following training, the model will be used to make predictions on incoming candles until a new model is available. New models are typically generated as often as possible, with FreqAI managing an internal queue of the coin pairs to try to keep all models equally up to date. FreqAI will always use the most recently trained model to make predictions on incoming live data. If the user does not want FreqAI to retrain new models as often as possible, they can set `live_retrain_hours` to tell FreqAI to wait at least that number of hours before training a new model. Additionally, the user can set `expired_hours` to tell FreqAI to avoid making predictions on models that are older than that number of hours.
|
||||
|
||||
If the user wishes to start a dry/live run from a saved backtest model (or from a previously crashed dry/live session), the user only needs to reuse
|
||||
the same `identifier` parameter:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"identifier": "example",
|
||||
"live_retrain_hours": 0.5
|
||||
}
|
||||
```
|
||||
|
||||
In this case, although FreqAI will initiate with a
|
||||
pre-trained model, it will still check to see how much time has elapsed since the model was trained,
|
||||
and if a full `live_retrain_hours` has elapsed since the end of the loaded model, FreqAI will retrain.
|
||||
|
||||
### Backtesting
|
||||
|
||||
The FreqAI backtesting module can be executed with the following command:
|
||||
|
||||
```bash
|
||||
freqtrade backtesting --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
||||
```
|
||||
|
||||
Backtesting mode requires the user to have the data pre-downloaded (unlike in dry/live mode where FreqAI automatically downloads the necessary data). The user should be careful to consider that the time range of the downloaded data is more than the backtesting time range. This is because FreqAI needs data prior to the desired backtesting time range in order to train a model to be ready to make predictions on the first candle of the user-set backtesting time range. More details on how to calculate the data to download can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
||||
|
||||
If this command has never been executed with the existing config file, it will train a new model
|
||||
for each pair, for each backtesting window within the expanded `--timerange`.
|
||||
|
||||
!!! Note "Model reuse"
|
||||
Once the training is completed, the user can execute the backtesting again with the same config file and
|
||||
FreqAI will find the trained models and load them instead of spending time training. This is useful
|
||||
if the user wants to tweak (or even hyperopt) buy and sell criteria inside the strategy. If the user
|
||||
*wants* to retrain a new model with the same config file, then they should simply change the `identifier`.
|
||||
This way, the user can return to using any model they wish by simply specifying the `identifier`.
|
||||
|
||||
---
|
||||
|
||||
### Deciding the size of the sliding training window and backtesting duration
|
||||
|
||||
The user defines the backtesting timerange with the typical `--timerange` parameter in the
|
||||
configuration file. The duration of the sliding training window is set by `train_period_days`, whilst
|
||||
`backtest_period_days` is the sliding backtesting window, both in number of days (`backtest_period_days` can be
|
||||
a float to indicate sub-daily retraining in live/dry mode). In the presented example config,
|
||||
the user is asking FreqAI to use a training period of 30 days and backtest on the subsequent 7 days.
|
||||
This means that if the user sets `--timerange 20210501-20210701`,
|
||||
FreqAI will train have trained 8 separate models at the end of `--timerange` (because the full range comprises 8 weeks). After the training of the model, FreqAI will backtest the subsequent 7 days. The "sliding window" then moves one week forward (emulating FreqAI retraining once per week in live mode) and the new model uses the previous 30 days (including the 7 days used for backtesting by the previous model) to train. This is repeated until the end of `--timerange`.
|
||||
|
||||
In live mode, the required training data is automatically computed and downloaded. However, in backtesting mode,
|
||||
the user must manually enter the required number of `startup_candles` in the config. This value
|
||||
is used to increase the data to FreqAI, which should be sufficient to enable all indicators
|
||||
to be NaN free at the beginning of the first training. This is done by identifying the
|
||||
longest timeframe (`4h` in presented example config) and the longest indicator period (`20` days in presented example config)
|
||||
and adding this to the `train_period_days`. The units need to be in the base candle time frame:
|
||||
`startup_candles` = ( 4 hours * 20 max period * 60 minutes/hour + 30 day train_period_days * 1440 minutes per day ) / 5 min (base time frame) = 9360.
|
||||
|
||||
!!! Note
|
||||
In dry/live mode, this is all precomputed and handled automatically. Thus, `startup_candle` has no influence on dry/live mode.
|
||||
|
||||
!!! Note
|
||||
Although fractional `backtest_period_days` is allowed, the user should be aware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. Because of this, a true backtest of FreqAI adaptive training would take a *very* long time. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
||||
|
||||
### Defining model expirations
|
||||
|
||||
During dry/live mode, FreqAI trains each coin pair sequentially (on separate threads/GPU from the main Freqtrade bot). This means that there is always an age discrepancy between models. If a user is training on 50 pairs, and each pair requires 5 minutes to train, the oldest model will be over 4 hours old. This may be undesirable if the characteristic time scale (the trade duration target) for a strategy is less than 4 hours. The user can decide to only make trade entries if the model is less than
|
||||
a certain number of hours old by setting the `expiration_hours` in the config file:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"expiration_hours": 0.5,
|
||||
}
|
||||
```
|
||||
|
||||
In the presented example config, the user will only allow predictions on models that are less than 1/2 hours old.
|
||||
|
||||
### Purging old model data
|
||||
|
||||
FreqAI stores new model files each time it retrains. These files become obsolete as new models are trained and FreqAI adapts to new market conditions. Users planning to leave FreqAI running for extended periods of time with high frequency retraining should enable `purge_old_models` in their config:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"purge_old_models": true,
|
||||
}
|
||||
```
|
||||
|
||||
This will automatically purge all models older than the two most recently trained ones.
|
||||
|
||||
### Returning additional info from training
|
||||
|
||||
The user may find that there are some important metrics that they'd like to return to the strategy at the end of each model training.
|
||||
The user can include these metrics by assigning them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside their custom prediction model class. FreqAI takes the `my_new_value` assigned in this dictionary and expands it to fit the return dataframe to the strategy.
|
||||
The user can then use the value in the strategy with `dataframe['my_new_value']`. An example of how this is already used in FreqAI is
|
||||
the `&*_mean` and `&*_std` values, which indicate the mean and standard deviation of the particular target (label) during the most recent training.
|
||||
An example, where the user wants to use live metrics from the trade database, is shown below:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"extra_returns_per_train": {"total_profit": 4}
|
||||
}
|
||||
```
|
||||
|
||||
The user needs to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, this is the value that will be returned.
|
||||
|
||||
### Setting up a follower
|
||||
|
||||
The user can define:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"follow_mode": true,
|
||||
"identifier": "example"
|
||||
}
|
||||
```
|
||||
|
||||
to indicate to the bot that it should not train models, but instead should look for models trained by a leader with the same `identifier`. In this example, the user has a leader bot with the `identifier: "example"`. The leader bot is already running or launching simultaneously as the follower.
|
||||
The follower will load models created by the leader and inference them to obtain predictions.
|
||||
|
||||
## Data manipulation techniques
|
||||
|
||||
### Feature normalization
|
||||
|
||||
The feature set created by the user is automatically normalized to the training data. This includes all test data and unseen prediction data (dry/live/backtest).
|
||||
|
||||
### Reducing data dimensionality with Principal Component Analysis
|
||||
|
||||
Users can reduce the dimensionality of their features by activating the `principal_component_analysis` in the config:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"principal_component_analysis": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will perform PCA on the features and reduce the dimensionality of the data so that the explained variance of the data set is >= 0.999.
|
||||
|
||||
### Stratifying the data for training and testing the model
|
||||
|
||||
The user can stratify (group) the training/testing data using:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"stratify_training_data": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will split the data chronologically so that every Xth data point is used to test the model after training. In the
|
||||
example above, the user is asking for every third data point in the dataframe to be used for
|
||||
testing; the other points are used for training.
|
||||
|
||||
The test data is used to evaluate the performance of the model after training. If the test score is high, the model is able to capture the behavior of the data well. If the test score is low, either the model either does not capture the complexity of the data, the test data is significantly different from the train data, or a different model should be used.
|
||||
|
||||
### Controlling the model learning process
|
||||
|
||||
Model training parameters are unique to the machine learning library selected by the user. FreqAI allows the user to set any parameter for any library using the `model_training_parameters` dictionary in the user configuration file. The example configuration file (found in `config_examples/config_freqai.example.json`) show some of the example parameters associated with `Catboost` and `LightGBM`, but the user can add any parameters available in those libraries.
|
||||
|
||||
Data split parameters are defined in `data_split_parameters` which can be any parameters associated with `Sklearn`'s `train_test_split()` function.
|
||||
|
||||
FreqAI includes some additional parameters such as `weight_factor`, which allows the user to weight more recent data more strongly
|
||||
than past data via an exponential function:
|
||||
|
||||
$$ W_i = \exp(\frac{-i}{\alpha*n}) $$
|
||||
|
||||
where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. Below is a figure showing the effect of different weight factors on the data points (candles) in a feature set.
|
||||
|
||||

|
||||
|
||||
`train_test_split()` has a parameters called `shuffle` that allows the user to keep the data unshuffled. This is particularly useful to avoid biasing training with temporally auto-correlated data.
|
||||
|
||||
Finally, `label_period_candles` defines the offset (number of candles into the future) used for the `labels`. In the presented example config,
|
||||
the user is asking for `labels` that are 24 candles in the future.
|
||||
|
||||
### Outlier removal
|
||||
|
||||
#### Removing outliers with the Dissimilarity Index
|
||||
|
||||
The user can tell FreqAI to remove outlier data points from the training/test data sets using a Dissimilarity Index by including the following statement in the config:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"DI_threshold": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model. The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty.
|
||||
|
||||
To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
|
||||
|
||||
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
||||
|
||||
where $d_{ab}$ is the distance between the normalized points $a$ and $b$. $p$ is the number of features, i.e., the length of the vector $X$. The characteristic distance, $\overline{d}$ for a set of training data points is simply the mean of the average distances:
|
||||
|
||||
$$ \overline{d} = \sum_{a=1}^n(\sum_{b=1}^n(d_{ab}/n)/n) $$
|
||||
|
||||
$\overline{d}$ quantifies the spread of the training data, which is compared to the distance between a new prediction feature vectors, $X_k$ and all the training data:
|
||||
|
||||
$$ d_k = \arg \min d_{k,i} $$
|
||||
|
||||
which enables the estimation of the Dissimilarity Index as:
|
||||
|
||||
$$ DI_k = d_k/\overline{d} $$
|
||||
|
||||
The user can tweak the DI through the `DI_threshold` to increase or decrease the extrapolation of the trained model.
|
||||
|
||||
Below is a figure that describes the DI for a 3D data set.
|
||||
|
||||

|
||||
|
||||
#### Removing outliers using a Support Vector Machine (SVM)
|
||||
|
||||
The user can tell FreqAI to remove outlier data points from the training/test data sets using a SVM by setting:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"use_SVM_to_remove_outliers": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
FreqAI will train an SVM on the training data (or components of it if the user activated
|
||||
`principal_component_analysis`) and remove any data point that the SVM deems to be beyond the feature space.
|
||||
|
||||
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
|
||||
|
||||
The parameter `nu`, *very* broadly, is the amount of data points that should be considered outliers.
|
||||
|
||||
#### Removing outliers with DBSCAN
|
||||
|
||||
The user can configure FreqAI to use DBSCAN to cluster and remove outliers from the training/test data set or incoming outliers from predictions, by activating `use_DBSCAN_to_remove_outliers` in the config:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"use_DBSCAN_to_remove_outliers": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
DBSCAN is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
|
||||
|
||||
Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$.
|
||||
|
||||

|
||||
|
||||
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
||||
|
||||
## Additional information
|
||||
|
||||
### Common pitfalls
|
||||
|
||||
FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically).
|
||||
This is for performance reasons - FreqAI relies on making quick predictions/retrains. To do this effectively,
|
||||
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
|
||||
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
|
||||
|
||||
## Credits
|
||||
|
||||
FreqAI was developed by a group of individuals who all contributed specific skillsets to the project.
|
||||
|
||||
Conception and software development:
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming, data analysis:
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
|
||||
Beta testing and bug reporting:
|
||||
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
||||
Juha Nykänen @suikula, Wagner Costa @wagnercosta
|
@@ -40,18 +40,21 @@ pip install -r requirements-hyperopt.txt
|
||||
```
|
||||
usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
||||
[--userdir PATH] [-s NAME] [--strategy-path PATH]
|
||||
[-i TIMEFRAME] [--timerange TIMERANGE]
|
||||
[--recursive-strategy-search] [--freqaimodel NAME]
|
||||
[--freqaimodel-path PATH] [-i TIMEFRAME]
|
||||
[--timerange TIMERANGE]
|
||||
[--data-format-ohlcv {json,jsongz,hdf5}]
|
||||
[--max-open-trades INT]
|
||||
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
|
||||
[-p PAIRS [PAIRS ...]] [--hyperopt-path PATH]
|
||||
[--eps] [--dmmp] [--enable-protections]
|
||||
[--dry-run-wallet DRY_RUN_WALLET] [-e INT]
|
||||
[--dry-run-wallet DRY_RUN_WALLET]
|
||||
[--timeframe-detail TIMEFRAME_DETAIL] [-e INT]
|
||||
[--spaces {all,buy,sell,roi,stoploss,trailing,protection,default} [{all,buy,sell,roi,stoploss,trailing,protection,default} ...]]
|
||||
[--print-all] [--no-color] [--print-json] [-j JOBS]
|
||||
[--random-state INT] [--min-trades INT]
|
||||
[--hyperopt-loss NAME] [--disable-param-export]
|
||||
[--ignore-missing-spaces]
|
||||
[--ignore-missing-spaces] [--analyze-per-epoch]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@@ -89,6 +92,9 @@ optional arguments:
|
||||
--dry-run-wallet DRY_RUN_WALLET, --starting-balance DRY_RUN_WALLET
|
||||
Starting balance, used for backtesting / hyperopt and
|
||||
dry-runs.
|
||||
--timeframe-detail TIMEFRAME_DETAIL
|
||||
Specify detail timeframe for backtesting (`1m`, `5m`,
|
||||
`30m`, `1h`, `1d`).
|
||||
-e INT, --epochs INT Specify number of epochs (default: 100).
|
||||
--spaces {all,buy,sell,roi,stoploss,trailing,protection,default} [{all,buy,sell,roi,stoploss,trailing,protection,default} ...]
|
||||
Specify which parameters to hyperopt. Space-separated
|
||||
@@ -116,12 +122,15 @@ optional arguments:
|
||||
ShortTradeDurHyperOptLoss, OnlyProfitHyperOptLoss,
|
||||
SharpeHyperOptLoss, SharpeHyperOptLossDaily,
|
||||
SortinoHyperOptLoss, SortinoHyperOptLossDaily,
|
||||
CalmarHyperOptLoss, MaxDrawDownHyperOptLoss, ProfitDrawDownHyperOptLoss
|
||||
CalmarHyperOptLoss, MaxDrawDownHyperOptLoss,
|
||||
MaxDrawDownRelativeHyperOptLoss,
|
||||
ProfitDrawDownHyperOptLoss
|
||||
--disable-param-export
|
||||
Disable automatic hyperopt parameter export.
|
||||
--ignore-missing-spaces, --ignore-unparameterized-spaces
|
||||
Suppress errors for any requested Hyperopt spaces that
|
||||
do not contain any parameters.
|
||||
--analyze-per-epoch Run populate_indicators once per epoch.
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
@@ -144,6 +153,12 @@ Strategy arguments:
|
||||
Specify strategy class name which will be used by the
|
||||
bot.
|
||||
--strategy-path PATH Specify additional strategy lookup path.
|
||||
--recursive-strategy-search
|
||||
Recursively search for a strategy in the strategies
|
||||
folder.
|
||||
--freqaimodel NAME Specify a custom freqaimodels.
|
||||
--freqaimodel-path PATH
|
||||
Specify additional lookup path for freqaimodels.
|
||||
|
||||
```
|
||||
|
||||
@@ -176,7 +191,7 @@ Rarely you may also need to create a [nested class](advanced-hyperopt.md#overrid
|
||||
|
||||
### Hyperopt execution logic
|
||||
|
||||
Hyperopt will first load your data into memory and will then run `populate_indicators()` once per Pair to generate all indicators.
|
||||
Hyperopt will first load your data into memory and will then run `populate_indicators()` once per Pair to generate all indicators, unless `--analyze-per-epoch` is specified.
|
||||
|
||||
Hyperopt will then spawn into different processes (number of processors, or `-j <n>`), and run backtesting over and over again, changing the parameters that are part of the `--spaces` defined.
|
||||
|
||||
@@ -269,7 +284,8 @@ The last one we call `trigger` and use it to decide which buy trigger we want to
|
||||
|
||||
!!! Note "Parameter space assignment"
|
||||
Parameters must either be assigned to a variable named `buy_*` or `sell_*` - or contain `space='buy'` | `space='sell'` to be assigned to a space correctly.
|
||||
If no parameter is available for a space, you'll receive the error that no space was found when running hyperopt.
|
||||
If no parameter is available for a space, you'll receive the error that no space was found when running hyperopt.
|
||||
Parameters with unclear space (e.g. `adx_period = IntParameter(4, 24, default=14)` - no explicit nor implicit space) will not be detected and will therefore be ignored.
|
||||
|
||||
So let's write the buy strategy using these values:
|
||||
|
||||
@@ -332,6 +348,7 @@ There are four parameter types each suited for different purposes.
|
||||
## Optimizing an indicator parameter
|
||||
|
||||
Assuming you have a simple strategy in mind - a EMA cross strategy (2 Moving averages crossing) - and you'd like to find the ideal parameters for this strategy.
|
||||
By default, we assume a stoploss of 5% - and a take-profit (`minimal_roi`) of 10% - which means freqtrade will sell the trade once 10% profit has been reached.
|
||||
|
||||
``` python
|
||||
from pandas import DataFrame
|
||||
@@ -346,6 +363,9 @@ import freqtrade.vendor.qtpylib.indicators as qtpylib
|
||||
class MyAwesomeStrategy(IStrategy):
|
||||
stoploss = -0.05
|
||||
timeframe = '15m'
|
||||
minimal_roi = {
|
||||
"0": 0.10
|
||||
},
|
||||
# Define the parameter spaces
|
||||
buy_ema_short = IntParameter(3, 50, default=5)
|
||||
buy_ema_long = IntParameter(15, 200, default=50)
|
||||
@@ -380,7 +400,7 @@ class MyAwesomeStrategy(IStrategy):
|
||||
return dataframe
|
||||
|
||||
def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
conditions = []
|
||||
conditions = []
|
||||
conditions.append(qtpylib.crossed_above(
|
||||
dataframe[f'ema_long_{self.buy_ema_long.value}'], dataframe[f'ema_short_{self.buy_ema_short.value}']
|
||||
))
|
||||
@@ -401,7 +421,7 @@ Using `self.buy_ema_short.range` will return a range object containing all entri
|
||||
In this case (`IntParameter(3, 50, default=5)`), the loop would run for all numbers between 3 and 50 (`[3, 4, 5, ... 49, 50]`).
|
||||
By using this in a loop, hyperopt will generate 48 new columns (`['buy_ema_3', 'buy_ema_4', ... , 'buy_ema_50']`).
|
||||
|
||||
Hyperopt itself will then use the selected value to create the buy and sell signals
|
||||
Hyperopt itself will then use the selected value to create the buy and sell signals.
|
||||
|
||||
While this strategy is most likely too simple to provide consistent profit, it should serve as an example how optimize indicator parameters.
|
||||
|
||||
@@ -412,9 +432,10 @@ While this strategy is most likely too simple to provide consistent profit, it s
|
||||
`range` property may also be used with `DecimalParameter` and `CategoricalParameter`. `RealParameter` does not provide this property due to infinite search space.
|
||||
|
||||
??? Hint "Performance tip"
|
||||
By doing the calculation of all possible indicators in `populate_indicators()`, the calculation of the indicator happens only once for every parameter.
|
||||
While this may slow down the hyperopt startup speed, the overall performance will increase as the Hyperopt execution itself may pick the same value for multiple epochs (changing other values).
|
||||
You should however try to use space ranges as small as possible. Every new column will require more memory, and every possibility hyperopt can try will increase the search space.
|
||||
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value. This will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
|
||||
|
||||
In either case, you should try to use space ranges as small as possible this will improve CPU/RAM usage in both scenarios.
|
||||
|
||||
|
||||
## Optimizing protections
|
||||
|
||||
@@ -563,7 +584,8 @@ Currently, the following loss functions are builtin:
|
||||
* `SharpeHyperOptLossDaily` - optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation.
|
||||
* `SortinoHyperOptLoss` - optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation.
|
||||
* `SortinoHyperOptLossDaily` - optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation.
|
||||
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum drawdown.
|
||||
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum absolute drawdown.
|
||||
* `MaxDrawDownRelativeHyperOptLoss` - Optimizes both maximum absolute drawdown while also adjusting for maximum relative drawdown.
|
||||
* `CalmarHyperOptLoss` - Optimizes Calmar Ratio calculated on trade returns relative to max drawdown.
|
||||
* `ProfitDrawDownHyperOptLoss` - Optimizes by max Profit & min Drawdown objective. `DRAWDOWN_MULT` variable within the hyperoptloss file can be adjusted to be stricter or more flexible on drawdown purposes.
|
||||
|
||||
@@ -677,7 +699,7 @@ class MyAwesomeStrategy(IStrategy):
|
||||
|
||||
!!! Note
|
||||
Values in the configuration file will overwrite Parameter-file level parameters - and both will overwrite parameters within the strategy.
|
||||
The prevalence is therefore: config > parameter file > strategy
|
||||
The prevalence is therefore: config > parameter file > strategy `*_params` > parameter default
|
||||
|
||||
### Understand Hyperopt ROI results
|
||||
|
||||
@@ -859,10 +881,29 @@ You can also enable position stacking in the configuration file by explicitly se
|
||||
As hyperopt consumes a lot of memory (the complete data needs to be in memory once per parallel backtesting process), it's likely that you run into "out of memory" errors.
|
||||
To combat these, you have multiple options:
|
||||
|
||||
* reduce the amount of pairs
|
||||
* reduce the timerange used (`--timerange <timerange>`)
|
||||
* reduce the number of parallel processes (`-j <n>`)
|
||||
* Increase the memory of your machine
|
||||
* Reduce the amount of pairs.
|
||||
* Reduce the timerange used (`--timerange <timerange>`).
|
||||
* Avoid using `--timeframe-detail` (this loads a lot of additional data into memory).
|
||||
* Reduce the number of parallel processes (`-j <n>`).
|
||||
* Increase the memory of your machine.
|
||||
* Use `--analyze-per-epoch` if you're using a lot of parameters with `.range` functionality.
|
||||
|
||||
|
||||
## The objective has been evaluated at this point before.
|
||||
|
||||
If you see `The objective has been evaluated at this point before.` - then this is a sign that your space has been exhausted, or is close to that.
|
||||
Basically all points in your space have been hit (or a local minima has been hit) - and hyperopt does no longer find points in the multi-dimensional space it did not try yet.
|
||||
Freqtrade tries to counter the "local minima" problem by using new, randomized points in this case.
|
||||
|
||||
Example:
|
||||
|
||||
``` python
|
||||
buy_ema_short = IntParameter(5, 20, default=10, space="buy", optimize=True)
|
||||
# This is the only parameter in the buy space
|
||||
```
|
||||
|
||||
The `buy_ema_short` space has 15 possible values (`5, 6, ... 19, 20`). If you now run hyperopt for the buy space, hyperopt will only have 15 values to try before running out of options.
|
||||
Your epochs should therefore be aligned to the possible values - or you should be ready to interrupt a run if you norice a lot of `The objective has been evaluated at this point before.` warnings.
|
||||
|
||||
## Show details of Hyperopt results
|
||||
|
||||
|
@@ -44,7 +44,7 @@ It uses configuration from `exchange.pair_whitelist` and `exchange.pair_blacklis
|
||||
```json
|
||||
"pairlists": [
|
||||
{"method": "StaticPairList"}
|
||||
],
|
||||
],
|
||||
```
|
||||
|
||||
By default, only currently enabled pairs are allowed.
|
||||
@@ -160,17 +160,17 @@ This filter allows freqtrade to ignore pairs until they have been listed for at
|
||||
|
||||
Offsets an incoming pairlist by a given `offset` value.
|
||||
|
||||
As an example it can be used in conjunction with `VolumeFilter` to remove the top X volume pairs. Or to split
|
||||
a larger pairlist on two bot instances.
|
||||
As an example it can be used in conjunction with `VolumeFilter` to remove the top X volume pairs. Or to split a larger pairlist on two bot instances.
|
||||
|
||||
Example to remove the first 10 pairs from the pairlist:
|
||||
Example to remove the first 10 pairs from the pairlist, and takes the next 20 (taking items 10-30 of the initial list):
|
||||
|
||||
```json
|
||||
"pairlists": [
|
||||
// ...
|
||||
{
|
||||
"method": "OffsetFilter",
|
||||
"offset": 10
|
||||
"offset": 10,
|
||||
"number_assets": 20
|
||||
}
|
||||
],
|
||||
```
|
||||
@@ -181,7 +181,7 @@ Example to remove the first 10 pairs from the pairlist:
|
||||
`VolumeFilter`.
|
||||
|
||||
!!! Note
|
||||
An offset larger then the total length of the incoming pairlist will result in an empty pairlist.
|
||||
An offset larger than the total length of the incoming pairlist will result in an empty pairlist.
|
||||
|
||||
#### PerformanceFilter
|
||||
|
||||
|
@@ -48,6 +48,10 @@ If `trade_limit` or more trades resulted in stoploss, trading will stop for `sto
|
||||
|
||||
This applies across all pairs, unless `only_per_pair` is set to true, which will then only look at one pair at a time.
|
||||
|
||||
Similarly, this protection will by default look at all trades (long and short). For futures bots, setting `only_per_side` will make the bot only consider one side, and will then only lock this one side, allowing for example shorts to continue after a series of long stoplosses.
|
||||
|
||||
`required_profit` will determine the required relative profit (or loss) for stoplosses to consider. This should normally not be set and defaults to 0.0 - which means all losing stoplosses will be triggering a block.
|
||||
|
||||
The below example stops trading for all pairs for 4 candles after the last trade if the bot hit stoploss 4 times within the last 24 candles.
|
||||
|
||||
``` python
|
||||
@@ -59,7 +63,9 @@ def protections(self):
|
||||
"lookback_period_candles": 24,
|
||||
"trade_limit": 4,
|
||||
"stop_duration_candles": 4,
|
||||
"only_per_pair": False
|
||||
"required_profit": 0.0,
|
||||
"only_per_pair": False,
|
||||
"only_per_side": False
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -93,6 +99,8 @@ def protections(self):
|
||||
`LowProfitPairs` uses all trades for a pair within `lookback_period` in minutes (or in candles when using `lookback_period_candles`) to determine the overall profit ratio.
|
||||
If that ratio is below `required_profit`, that pair will be locked for `stop_duration` in minutes (or in candles when using `stop_duration_candles`).
|
||||
|
||||
For futures bots, setting `only_per_side` will make the bot only consider one side, and will then only lock this one side, allowing for example shorts to continue after a series of long losses.
|
||||
|
||||
The below example will stop trading a pair for 60 minutes if the pair does not have a required profit of 2% (and a minimum of 2 trades) within the last 6 candles.
|
||||
|
||||
``` python
|
||||
@@ -104,7 +112,8 @@ def protections(self):
|
||||
"lookback_period_candles": 6,
|
||||
"trade_limit": 2,
|
||||
"stop_duration": 60,
|
||||
"required_profit": 0.02
|
||||
"required_profit": 0.02,
|
||||
"only_per_pair": False,
|
||||
}
|
||||
]
|
||||
```
|
||||
|
@@ -22,10 +22,6 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
|
||||
|
||||

|
||||
|
||||
## Sponsored promotion
|
||||
|
||||
[](https://tokenbot.com/?utm_source=github&utm_medium=freqtrade&utm_campaign=algodevs)
|
||||
|
||||
## Features
|
||||
|
||||
- Develop your Strategy: Write your strategy in python, using [pandas](https://pandas.pydata.org/). Example strategies to inspire you are available in the [strategy repository](https://github.com/freqtrade/freqtrade-strategies).
|
||||
@@ -51,7 +47,7 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
|
||||
- [X] [OKX](https://okx.com/) (Former OKEX)
|
||||
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
||||
|
||||
### Experimentally, freqtrade also supports futures on the following exchanges:
|
||||
### Supported Futures Exchanges (experimental)
|
||||
|
||||
- [X] [Binance](https://www.binance.com/)
|
||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||
|
@@ -326,6 +326,16 @@ python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -e .
|
||||
```
|
||||
|
||||
Patch conda libta-lib (Linux only)
|
||||
|
||||
```bash
|
||||
# Ensure that the environment is active!
|
||||
conda activate freqtrade-conda
|
||||
|
||||
cd build_helpers
|
||||
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
|
||||
```
|
||||
|
||||
### Congratulations
|
||||
|
||||
[You are ready](#you-are-ready), and run the bot
|
||||
|
@@ -64,7 +64,10 @@ You will also have to pick a "margin mode" (explanation below) - with freqtrade
|
||||
|
||||
### Margin mode
|
||||
|
||||
The possible values are: `isolated`, or `cross`(*currently unavailable*)
|
||||
On top of `trading_mode` - you will also have to configure your `margin_mode`.
|
||||
While freqtrade currently only supports one margin mode, this will change, and by configuring it now you're all set for future updates.
|
||||
|
||||
The possible values are: `isolated`, or `cross`(*currently unavailable*).
|
||||
|
||||
#### Isolated margin mode
|
||||
|
||||
@@ -82,6 +85,16 @@ One account is used to share collateral between markets (trading pairs). Margin
|
||||
"margin_mode": "cross"
|
||||
```
|
||||
|
||||
## Set leverage to use
|
||||
|
||||
Different strategies and risk profiles will require different levels of leverage.
|
||||
While you could configure one static leverage value - freqtrade offers you the flexibility to adjust this via [strategy leverage callback](strategy-callbacks.md#leverage-callback) - which allows you to use different leverages by pair, or based on some other factor benefitting your strategy result.
|
||||
|
||||
If not implemented, leverage defaults to 1x (no leverage).
|
||||
|
||||
!!! Warning
|
||||
Higher leverage also equals higher risk - be sure you fully understand the implications of using leverage!
|
||||
|
||||
## Understand `liquidation_buffer`
|
||||
|
||||
*Defaults to `0.05`*
|
||||
@@ -101,6 +114,13 @@ Possible values are any floats between 0.0 and 0.99
|
||||
!!! Danger "A `liquidation_buffer` of 0.0, or a low `liquidation_buffer` is likely to result in liquidations, and liquidation fees"
|
||||
Currently Freqtrade is able to calculate liquidation prices, but does not calculate liquidation fees. Setting your `liquidation_buffer` to 0.0, or using a low `liquidation_buffer` could result in your positions being liquidated. Freqtrade does not track liquidation fees, so liquidations will result in inaccurate profit/loss results for your bot. If you use a low `liquidation_buffer`, it is recommended to use `stoploss_on_exchange` if your exchange supports this.
|
||||
|
||||
## Unavailable funding rates
|
||||
|
||||
For futures data, exchanges commonly provide the futures candles, the marks, and the funding rates. However, it is common that whilst candles and marks might be available, the funding rates are not. This can affect backtesting timeranges, i.e. you may only be able to test recent timeranges and not earlier, experiencing the `No data found. Terminating.` error. To get around this, add the `futures_funding_rate` config option as listed in [configuration.md](configuration.md), and it is recommended that you set this to `0`, unless you know a given specific funding rate for your pair, exchange and timerange. Setting this to anything other than `0` can have drastic effects on your profit calculations within strategy, e.g. within the `custom_exit`, `custom_stoploss`, etc functions.
|
||||
|
||||
!!! Warning "This will mean your backtests are inaccurate."
|
||||
This will not overwrite funding rates that are available from the exchange, but bear in mind that setting a false funding rate will mean backtesting results will be inaccurate for historical timeranges where funding rates are not available.
|
||||
|
||||
### Developer
|
||||
|
||||
#### Margin mode
|
||||
|
@@ -1,5 +1,6 @@
|
||||
mkdocs==1.3.0
|
||||
mkdocs-material==8.2.10
|
||||
mdx_truly_sane_lists==1.2
|
||||
pymdown-extensions==9.4
|
||||
jinja2==3.1.1
|
||||
markdown==3.3.7
|
||||
mkdocs==1.3.1
|
||||
mkdocs-material==8.4.1
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==9.5
|
||||
jinja2==3.1.2
|
||||
|
@@ -163,6 +163,8 @@ python3 scripts/rest_client.py --config rest_config.json <command> [optional par
|
||||
| `strategy <strategy>` | Get specific Strategy content. **Alpha**
|
||||
| `available_pairs` | List available backtest data. **Alpha**
|
||||
| `version` | Show version.
|
||||
| `sysinfo` | Show informations about the system load.
|
||||
| `health` | Show bot health (last bot loop).
|
||||
|
||||
!!! Warning "Alpha status"
|
||||
Endpoints labeled with *Alpha status* above may change at any time without notice.
|
||||
@@ -227,6 +229,11 @@ forceexit
|
||||
Force-exit a trade.
|
||||
|
||||
:param tradeid: Id of the trade (can be received via status command)
|
||||
:param ordertype: Order type to use (must be market or limit)
|
||||
:param amount: Amount to sell. Full sell if not given
|
||||
|
||||
health
|
||||
Provides a quick health check of the running bot.
|
||||
|
||||
locks
|
||||
Return current locks
|
||||
@@ -312,12 +319,13 @@ version
|
||||
|
||||
whitelist
|
||||
Show the current whitelist.
|
||||
|
||||
```
|
||||
|
||||
### OpenAPI interface
|
||||
|
||||
To enable the builtin openAPI interface (Swagger UI), specify `"enable_openapi": true` in the api_server configuration.
|
||||
This will enable the Swagger UI at the `/docs` endpoint. By default, that's running at http://localhost:8080/docs/ - but it'll depend on your settings.
|
||||
This will enable the Swagger UI at the `/docs` endpoint. By default, that's running at http://localhost:8080/docs - but it'll depend on your settings.
|
||||
|
||||
### Advanced API usage using JWT tokens
|
||||
|
||||
|
@@ -89,11 +89,12 @@ WHERE id=31;
|
||||
|
||||
If you'd still like to remove a trade from the database directly, you can use the below query.
|
||||
|
||||
```sql
|
||||
DELETE FROM trades WHERE id = <tradeid>;
|
||||
```
|
||||
!!! Danger
|
||||
Some systems (Ubuntu) disable foreign keys in their sqlite3 packaging. When using sqlite - please ensure that foreign keys are on by running `PRAGMA foreign_keys = ON` before the above query.
|
||||
|
||||
```sql
|
||||
DELETE FROM trades WHERE id = <tradeid>;
|
||||
|
||||
DELETE FROM trades WHERE id = 31;
|
||||
```
|
||||
|
||||
@@ -102,13 +103,20 @@ DELETE FROM trades WHERE id = 31;
|
||||
|
||||
## Use a different database system
|
||||
|
||||
Freqtrade is using SQLAlchemy, which supports multiple different database systems. As such, a multitude of database systems should be supported.
|
||||
Freqtrade does not depend or install any additional database driver. Please refer to the [SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls) on installation instructions for the respective database systems.
|
||||
|
||||
The following systems have been tested and are known to work with freqtrade:
|
||||
|
||||
* sqlite (default)
|
||||
* PostgreSQL)
|
||||
* MariaDB
|
||||
|
||||
!!! Warning
|
||||
By using one of the below database systems, you acknowledge that you know how to manage such a system. Freqtrade will not provide any support with setup or maintenance (or backups) of the below database systems.
|
||||
By using one of the below database systems, you acknowledge that you know how to manage such a system. The freqtrade team will not provide any support with setup or maintenance (or backups) of the below database systems.
|
||||
|
||||
### PostgreSQL
|
||||
|
||||
Freqtrade supports PostgreSQL by using SQLAlchemy, which supports multiple different database systems.
|
||||
|
||||
Installation:
|
||||
`pip install psycopg2-binary`
|
||||
|
||||
|
@@ -130,7 +130,7 @@ In summary: The stoploss will be adjusted to be always be -10% of the highest ob
|
||||
|
||||
### Trailing stop loss, custom positive loss
|
||||
|
||||
It is also possible to have a default stop loss, when you are in the red with your buy (buy - fee), but once you hit positive result the system will utilize a new stop loss, which can have a different value.
|
||||
You could also have a default stop loss when you are in the red with your buy (buy - fee), but once you hit a positive result (or an offset you define) the system will utilize a new stop loss, which can have a different value.
|
||||
For example, your default stop loss is -10%, but once you have more than 0% profit (example 0.1%) a different trailing stoploss will be used.
|
||||
|
||||
!!! Note
|
||||
@@ -142,6 +142,8 @@ Both values require `trailing_stop` to be set to true and `trailing_stop_positiv
|
||||
stoploss = -0.10
|
||||
trailing_stop = True
|
||||
trailing_stop_positive = 0.02
|
||||
trailing_stop_positive_offset = 0.0
|
||||
trailing_only_offset_is_reached = False # Default - not necessary for this example
|
||||
```
|
||||
|
||||
For example, simplified math:
|
||||
@@ -156,11 +158,31 @@ For example, simplified math:
|
||||
The 0.02 would translate to a -2% stop loss.
|
||||
Before this, `stoploss` is used for the trailing stoploss.
|
||||
|
||||
!!! Tip "Use an offset to change your stoploss"
|
||||
Use `trailing_stop_positive_offset` to ensure that your new trailing stoploss will be in profit by setting `trailing_stop_positive_offset` higher than `trailing_stop_positive`. Your first new stoploss value will then already have locked in profits.
|
||||
|
||||
Example with simplified math:
|
||||
|
||||
``` python
|
||||
stoploss = -0.10
|
||||
trailing_stop = True
|
||||
trailing_stop_positive = 0.02
|
||||
trailing_stop_positive_offset = 0.03
|
||||
```
|
||||
|
||||
* the bot buys an asset at a price of 100$
|
||||
* the stop loss is defined at -10%, so the stop loss would get triggered once the asset drops below 90$
|
||||
* assuming the asset now increases to 102$
|
||||
* the stoploss will now be at 91.8$ - 10% below the highest observed rate
|
||||
* assuming the asset now increases to 103.5$ (above the offset configured)
|
||||
* the stop loss will now be -2% of 103.5$ = 101.43$
|
||||
* now the asset drops in value to 102\$, the stop loss will still be 101.43$ and would trigger once price breaks below 101.43$
|
||||
|
||||
### Trailing stop loss only once the trade has reached a certain offset
|
||||
|
||||
It is also possible to use a static stoploss until the offset is reached, and then trail the trade to take profits once the market turns.
|
||||
You can also keep a static stoploss until the offset is reached, and then trail the trade to take profits once the market turns.
|
||||
|
||||
If `"trailing_only_offset_is_reached": true` then the trailing stoploss is only activated once the offset is reached. Until then, the stoploss remains at the configured `stoploss`.
|
||||
If `trailing_only_offset_is_reached = True` then the trailing stoploss is only activated once the offset is reached. Until then, the stoploss remains at the configured `stoploss`.
|
||||
This option can be used with or without `trailing_stop_positive`, but uses `trailing_stop_positive_offset` as offset.
|
||||
|
||||
``` python
|
||||
@@ -191,6 +213,18 @@ For example, simplified math:
|
||||
!!! Tip
|
||||
Make sure to have this value (`trailing_stop_positive_offset`) lower than minimal ROI, otherwise minimal ROI will apply first and sell the trade.
|
||||
|
||||
## Stoploss and Leverage
|
||||
|
||||
Stoploss should be thought of as "risk on this trade" - so a stoploss of 10% on a 100$ trade means you are willing to lose 10$ (10%) on this trade - which would trigger if the price moves 10% to the downside.
|
||||
|
||||
When using leverage, the same principle is applied - with stoploss defining the risk on the trade (the amount you are willing to lose).
|
||||
|
||||
Therefore, a stoploss of 10% on a 10x trade would trigger on a 1% price move.
|
||||
If your stake amount (own capital) was 100$ - this trade would be 1000$ at 10x (after leverage).
|
||||
If price moves 1% - you've lost 10$ of your own capital - therfore stoploss will trigger in this case.
|
||||
|
||||
Make sure to be aware of this, and avoid using too tight stoploss (at 10x leverage, 10% risk may be too little to allow the trade to "breath" a little).
|
||||
|
||||
## Changing stoploss on open trades
|
||||
|
||||
A stoploss on an open trade can be changed by changing the value in the configuration or strategy and use the `/reload_config` command (alternatively, completely stopping and restarting the bot also works).
|
||||
|
@@ -224,3 +224,5 @@ for val in self.buy_ema_short.range:
|
||||
# Append columns to existing dataframe
|
||||
merged_frame = pd.concat(frames, axis=1)
|
||||
```
|
||||
|
||||
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.
|
||||
|
@@ -17,6 +17,7 @@ Currently available callbacks:
|
||||
* [`confirm_trade_entry()`](#trade-entry-buy-order-confirmation)
|
||||
* [`confirm_trade_exit()`](#trade-exit-sell-order-confirmation)
|
||||
* [`adjust_trade_position()`](#adjust-trade-position)
|
||||
* [`adjust_entry_price()`](#adjust-entry-price)
|
||||
* [`leverage()`](#leverage-callback)
|
||||
|
||||
!!! Tip "Callback calling sequence"
|
||||
@@ -45,6 +46,9 @@ class AwesomeStrategy(IStrategy):
|
||||
self.cust_remote_data = requests.get('https://some_remote_source.example.com')
|
||||
|
||||
```
|
||||
|
||||
During hyperopt, this runs only once at startup.
|
||||
|
||||
## Bot loop start
|
||||
|
||||
A simple callback which is called once at the start of every bot throttling iteration (roughly every 5 seconds, unless configured differently).
|
||||
@@ -71,15 +75,16 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
```
|
||||
|
||||
### Stake size management
|
||||
## Stake size management
|
||||
|
||||
Called before entering a trade, makes it possible to manage your position size when placing a new trade.
|
||||
|
||||
```python
|
||||
class AwesomeStrategy(IStrategy):
|
||||
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
|
||||
proposed_stake: float, min_stake: float, max_stake: float,
|
||||
entry_tag: Optional[str], side: str, **kwargs) -> float:
|
||||
proposed_stake: float, min_stake: Optional[float], max_stake: float,
|
||||
leverage: float, entry_tag: Optional[str], side: str,
|
||||
**kwargs) -> float:
|
||||
|
||||
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
|
||||
current_candle = dataframe.iloc[-1].squeeze()
|
||||
@@ -418,7 +423,7 @@ class AwesomeStrategy(IStrategy):
|
||||
!!! Warning "Backtesting"
|
||||
Custom prices are supported in backtesting (starting with 2021.12), and orders will fill if the price falls within the candle's low/high range.
|
||||
Orders that don't fill immediately are subject to regular timeout handling, which happens once per (detail) candle.
|
||||
`custom_exit_price()` is only called for sells of type exit_signal and Custom exit. All other exit-types will use regular backtesting prices.
|
||||
`custom_exit_price()` is only called for sells of type exit_signal, Custom exit and partial exits. All other exit-types will use regular backtesting prices.
|
||||
|
||||
## Custom order timeout rules
|
||||
|
||||
@@ -545,10 +550,12 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
:param pair: Pair that's about to be bought/shorted.
|
||||
:param order_type: Order type (as configured in order_types). usually limit or market.
|
||||
:param amount: Amount in target (quote) currency that's going to be traded.
|
||||
:param rate: Rate that's going to be used when using limit orders
|
||||
:param amount: Amount in target (base) currency that's going to be traded.
|
||||
:param rate: Rate that's going to be used when using limit orders
|
||||
or current rate for market orders.
|
||||
:param time_in_force: Time in force. Defaults to GTC (Good-til-cancelled).
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param entry_tag: Optional entry_tag (buy_tag) if provided with the buy signal.
|
||||
:param side: 'long' or 'short' - indicating the direction of the proposed trade
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return bool: When True is returned, then the buy-order is placed on the exchange.
|
||||
@@ -562,6 +569,14 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
`confirm_trade_exit()` can be used to abort a trade exit (sell) at the latest second (maybe because the price is not what we expect).
|
||||
|
||||
`confirm_trade_exit()` may be called multiple times within one iteration for the same trade if different exit-reasons apply.
|
||||
The exit-reasons (if applicable) will be in the following sequence:
|
||||
|
||||
* `exit_signal` / `custom_exit`
|
||||
* `stop_loss`
|
||||
* `roi`
|
||||
* `trailing_stop_loss`
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
@@ -574,7 +589,7 @@ class AwesomeStrategy(IStrategy):
|
||||
rate: float, time_in_force: str, exit_reason: str,
|
||||
current_time: datetime, **kwargs) -> bool:
|
||||
"""
|
||||
Called right before placing a regular sell order.
|
||||
Called right before placing a regular exit order.
|
||||
Timing for this function is critical, so avoid doing heavy computations or
|
||||
network requests in this method.
|
||||
|
||||
@@ -582,17 +597,19 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
When not implemented by a strategy, returns True (always confirming).
|
||||
|
||||
:param pair: Pair that's about to be sold.
|
||||
:param pair: Pair for trade that's about to be exited.
|
||||
:param trade: trade object.
|
||||
:param order_type: Order type (as configured in order_types). usually limit or market.
|
||||
:param amount: Amount in quote currency.
|
||||
:param amount: Amount in base currency.
|
||||
:param rate: Rate that's going to be used when using limit orders
|
||||
or current rate for market orders.
|
||||
:param time_in_force: Time in force. Defaults to GTC (Good-til-cancelled).
|
||||
:param exit_reason: Exit reason.
|
||||
Can be any of ['roi', 'stop_loss', 'stoploss_on_exchange', 'trailing_stop_loss',
|
||||
'exit_signal', 'force_exit', 'emergency_exit']
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return bool: When True is returned, then the exit-order is placed on the exchange.
|
||||
:return bool: When True, then the exit-order is placed on the exchange.
|
||||
False aborts the process
|
||||
"""
|
||||
if exit_reason == 'force_exit' and trade.calc_profit_ratio(rate) < 0:
|
||||
@@ -604,11 +621,15 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
`confirm_trade_exit()` can prevent stoploss exits, causing significant losses as this would ignore stoploss exits.
|
||||
`confirm_trade_exit()` will not be called for Liquidations - as liquidations are forced by the exchange, and therefore cannot be rejected.
|
||||
|
||||
## Adjust trade position
|
||||
|
||||
The `position_adjustment_enable` strategy property enables the usage of `adjust_trade_position()` callback in the strategy.
|
||||
For performance reasons, it's disabled by default and freqtrade will show a warning message on startup if enabled.
|
||||
`adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging).
|
||||
`adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging) or to increase or decrease positions.
|
||||
|
||||
`max_entry_position_adjustment` property is used to limit the number of additional buys per trade (on top of the first buy) that the bot can execute. By default, the value is -1 which means the bot have no limit on number of adjustment buys.
|
||||
|
||||
@@ -616,10 +637,13 @@ The strategy is expected to return a stake_amount (in stake currency) between `m
|
||||
If there are not enough funds in the wallet (the return value is above `max_stake`) then the signal will be ignored.
|
||||
Additional orders also result in additional fees and those orders don't count towards `max_open_trades`.
|
||||
|
||||
This callback is **not** called when there is an open order (either buy or sell) waiting for execution, or when you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`.
|
||||
This callback is **not** called when there is an open order (either buy or sell) waiting for execution.
|
||||
|
||||
`adjust_trade_position()` is called very frequently for the duration of a trade, so you must keep your implementation as performant as possible.
|
||||
|
||||
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position, no matter if it's a long or short trade. Modifications to leverage are not possible.
|
||||
Additional Buys are ignored once you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits.
|
||||
|
||||
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade. Modifications to leverage are not possible.
|
||||
|
||||
!!! Note "About stake size"
|
||||
Using fixed stake size means it will be the amount used for the first order, just like without position adjustment.
|
||||
@@ -628,12 +652,12 @@ Position adjustments will always be applied in the direction of the trade, so a
|
||||
|
||||
!!! Warning
|
||||
Stoploss is still calculated from the initial opening price, not averaged price.
|
||||
Regular stoploss rules still apply (cannot move down).
|
||||
|
||||
!!! Warning "/stopbuy"
|
||||
While `/stopbuy` command stops the bot from entering new trades, the position adjustment feature will continue buying new orders on existing trades.
|
||||
While `/stopentry` command stops the bot from entering new trades, the position adjustment feature will continue buying new orders on existing trades.
|
||||
|
||||
!!! Warning "Backtesting"
|
||||
During backtesting this callback is called for each candle in `timeframe` or `timeframe_detail`, so performance will be affected.
|
||||
During backtesting this callback is called for each candle in `timeframe` or `timeframe_detail`, so run-time performance will be affected.
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
@@ -655,30 +679,50 @@ class DigDeeperStrategy(IStrategy):
|
||||
|
||||
# This is called when placing the initial order (opening trade)
|
||||
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
|
||||
proposed_stake: float, min_stake: float, max_stake: float,
|
||||
entry_tag: Optional[str], side: str, **kwargs) -> float:
|
||||
proposed_stake: float, min_stake: Optional[float], max_stake: float,
|
||||
leverage: float, entry_tag: Optional[str], side: str,
|
||||
**kwargs) -> float:
|
||||
|
||||
# We need to leave most of the funds for possible further DCA orders
|
||||
# This also applies to fixed stakes
|
||||
return proposed_stake / self.max_dca_multiplier
|
||||
|
||||
def adjust_trade_position(self, trade: Trade, current_time: datetime,
|
||||
current_rate: float, current_profit: float, min_stake: float,
|
||||
max_stake: float, **kwargs):
|
||||
current_rate: float, current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float,
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs) -> Optional[float]:
|
||||
"""
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be increased.
|
||||
This means extra buy orders with additional fees.
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be
|
||||
increased or decreased.
|
||||
This means extra buy or sell orders with additional fees.
|
||||
Only called when `position_adjustment_enable` is set to True.
|
||||
|
||||
For full documentation please go to https://www.freqtrade.io/en/latest/strategy-advanced/
|
||||
|
||||
When not implemented by a strategy, returns None
|
||||
|
||||
:param trade: trade object.
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param current_rate: Current buy rate.
|
||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||
:param min_stake: Minimal stake size allowed by exchange.
|
||||
:param max_stake: Balance available for trading.
|
||||
:param min_stake: Minimal stake size allowed by exchange (for both entries and exits)
|
||||
:param max_stake: Maximum stake allowed (either through balance, or by exchange limits).
|
||||
:param current_entry_rate: Current rate using entry pricing.
|
||||
:param current_exit_rate: Current rate using exit pricing.
|
||||
:param current_entry_profit: Current profit using entry pricing.
|
||||
:param current_exit_profit: Current profit using exit pricing.
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return float: Stake amount to adjust your trade
|
||||
:return float: Stake amount to adjust your trade,
|
||||
Positive values to increase position, Negative values to decrease position.
|
||||
Return None for no action.
|
||||
"""
|
||||
|
||||
if current_profit > 0.05 and trade.nr_of_successful_exits == 0:
|
||||
# Take half of the profit at +5%
|
||||
return -(trade.stake_amount / 2)
|
||||
|
||||
if current_profit > -0.05:
|
||||
return None
|
||||
|
||||
@@ -713,6 +757,88 @@ class DigDeeperStrategy(IStrategy):
|
||||
|
||||
```
|
||||
|
||||
### Position adjust calculations
|
||||
|
||||
* Entry rates are calculated using weighted averages.
|
||||
* Exits will not influence the average entry rate.
|
||||
* Partial exit relative profit is relative to the average entry price at this point.
|
||||
* Final exit relative profit is calculated based on the total invested capital. (See example below)
|
||||
|
||||
??? example "Calculation example"
|
||||
*This example assumes 0 fees for simplicity, and a long position on an imaginary coin.*
|
||||
|
||||
* Buy 100@8\$
|
||||
* Buy 100@9\$ -> Avg price: 8.5\$
|
||||
* Sell 100@10\$ -> Avg price: 8.5\$, realized profit 150\$, 17.65%
|
||||
* Buy 150@11\$ -> Avg price: 10\$, realized profit 150\$, 17.65%
|
||||
* Sell 100@12\$ -> Avg price: 10\$, total realized profit 350\$, 20%
|
||||
* Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40%
|
||||
|
||||
The total profit for this trade was 950$ on a 3350$ investment (`100@8$ + 100@9$ + 150@11$`). As such - the final relative profit is 28.35% (`950 / 3350`).
|
||||
|
||||
## Adjust Entry Price
|
||||
|
||||
The `adjust_entry_price()` callback may be used by strategy developer to refresh/replace limit orders upon arrival of new candles.
|
||||
Be aware that `custom_entry_price()` is still the one dictating initial entry limit order price target at the time of entry trigger.
|
||||
|
||||
Orders can be cancelled out of this callback by returning `None`.
|
||||
|
||||
Returning `current_order_rate` will keep the order on the exchange "as is".
|
||||
Returning any other price will cancel the existing order, and replace it with a new order.
|
||||
|
||||
The trade open-date (`trade.open_date_utc`) will remain at the time of the very first order placed.
|
||||
Please make sure to be aware of this - and eventually adjust your logic in other callbacks to account for this, and use the date of the first filled order instead.
|
||||
|
||||
!!! Warning "Regular timeout"
|
||||
Entry `unfilledtimeout` mechanism (as well as `check_entry_timeout()`) takes precedence over this.
|
||||
Entry Orders that are cancelled via the above methods will not have this callback called. Be sure to update timeout values to match your expectations.
|
||||
|
||||
```python
|
||||
from freqtrade.persistence import Trade
|
||||
from datetime import timedelta
|
||||
|
||||
class AwesomeStrategy(IStrategy):
|
||||
|
||||
# ... populate_* methods
|
||||
|
||||
def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str,
|
||||
current_time: datetime, proposed_rate: float, current_order_rate: float,
|
||||
entry_tag: Optional[str], side: str, **kwargs) -> float:
|
||||
"""
|
||||
Entry price re-adjustment logic, returning the user desired limit price.
|
||||
This only executes when a order was already placed, still open (unfilled fully or partially)
|
||||
and not timed out on subsequent candles after entry trigger.
|
||||
|
||||
When not implemented by a strategy, returns current_order_rate as default.
|
||||
If current_order_rate is returned then the existing order is maintained.
|
||||
If None is returned then order gets canceled but not replaced by a new one.
|
||||
|
||||
:param pair: Pair that's currently analyzed
|
||||
:param trade: Trade object.
|
||||
:param order: Order object
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param proposed_rate: Rate, calculated based on pricing settings in entry_pricing.
|
||||
:param current_order_rate: Rate of the existing order in place.
|
||||
:param entry_tag: Optional entry_tag (buy_tag) if provided with the buy signal.
|
||||
:param side: 'long' or 'short' - indicating the direction of the proposed trade
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return float: New entry price value if provided
|
||||
|
||||
"""
|
||||
# Limit orders to use and follow SMA200 as price target for the first 10 minutes since entry trigger for BTC/USDT pair.
|
||||
if pair == 'BTC/USDT' and entry_tag == 'long_sma200' and side == 'long' and (current_time - timedelta(minutes=10) > trade.open_date_utc:
|
||||
# just cancel the order if it has been filled more than half of the amount
|
||||
if order.filled > order.remaining:
|
||||
return None
|
||||
else:
|
||||
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
|
||||
current_candle = dataframe.iloc[-1].squeeze()
|
||||
# desired price
|
||||
return current_candle['sma_200']
|
||||
# default: maintain existing order
|
||||
return current_order_rate
|
||||
```
|
||||
|
||||
## Leverage Callback
|
||||
|
||||
When trading in markets that allow leverage, this method must return the desired Leverage (Defaults to 1 -> No leverage).
|
||||
@@ -724,19 +850,23 @@ For markets / exchanges that don't support leverage, this method is ignored.
|
||||
|
||||
``` python
|
||||
class AwesomeStrategy(IStrategy):
|
||||
def leverage(self, pair: str, current_time: 'datetime', current_rate: float,
|
||||
proposed_leverage: float, max_leverage: float, side: str,
|
||||
def leverage(self, pair: str, current_time: datetime, current_rate: float,
|
||||
proposed_leverage: float, max_leverage: float, entry_tag: Optional[str], side: str,
|
||||
**kwargs) -> float:
|
||||
"""
|
||||
Customize leverage for each new trade.
|
||||
Customize leverage for each new trade. This method is only called in futures mode.
|
||||
|
||||
:param pair: Pair that's currently analyzed
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param current_rate: Rate, calculated based on pricing settings in exit_pricing.
|
||||
:param proposed_leverage: A leverage proposed by the bot.
|
||||
:param max_leverage: Max leverage allowed on this pair
|
||||
:param entry_tag: Optional entry_tag (buy_tag) if provided with the buy signal.
|
||||
:param side: 'long' or 'short' - indicating the direction of the proposed trade
|
||||
:return: A leverage amount, which is between 1.0 and max_leverage.
|
||||
"""
|
||||
return 1.0
|
||||
```
|
||||
|
||||
All profit calculations include leverage. Stoploss / ROI also include leverage in their calculation.
|
||||
Defining a stoploss of 10% at 10x leverage would trigger the stoploss with a 1% move to the downside.
|
||||
|
@@ -617,9 +617,8 @@ Please always check the mode of operation to select the correct method to get da
|
||||
### *available_pairs*
|
||||
|
||||
``` python
|
||||
if self.dp:
|
||||
for pair, timeframe in self.dp.available_pairs:
|
||||
print(f"available {pair}, {timeframe}")
|
||||
for pair, timeframe in self.dp.available_pairs:
|
||||
print(f"available {pair}, {timeframe}")
|
||||
```
|
||||
|
||||
### *current_whitelist()*
|
||||
@@ -630,7 +629,7 @@ The strategy might look something like this:
|
||||
|
||||
*Scan through the top 10 pairs by volume using the `VolumePairList` every 5 minutes and use a 14 day RSI to buy and sell.*
|
||||
|
||||
Due to the limited available data, it's very difficult to resample `5m` candles into daily candles for use in a 14 day RSI. Most exchanges limit us to just 500 candles which effectively gives us around 1.74 daily candles. We need 14 days at least!
|
||||
Due to the limited available data, it's very difficult to resample `5m` candles into daily candles for use in a 14 day RSI. Most exchanges limit us to just 500-1000 candles which effectively gives us around 1.74 daily candles. We need 14 days at least!
|
||||
|
||||
Since we can't resample the data we will have to use an informative pair; and since the whitelist will be dynamic we don't know which pair(s) to use.
|
||||
|
||||
@@ -646,14 +645,16 @@ This is where calling `self.dp.current_whitelist()` comes in handy.
|
||||
return informative_pairs
|
||||
```
|
||||
|
||||
??? Note "Plotting with current_whitelist"
|
||||
Current whitelist is not supported for `plot-dataframe`, as this command is usually used by providing an explicit pairlist - and would therefore make the return values of this method misleading.
|
||||
|
||||
### *get_pair_dataframe(pair, timeframe)*
|
||||
|
||||
``` python
|
||||
# fetch live / historical candle (OHLCV) data for the first informative pair
|
||||
if self.dp:
|
||||
inf_pair, inf_timeframe = self.informative_pairs()[0]
|
||||
informative = self.dp.get_pair_dataframe(pair=inf_pair,
|
||||
timeframe=inf_timeframe)
|
||||
inf_pair, inf_timeframe = self.informative_pairs()[0]
|
||||
informative = self.dp.get_pair_dataframe(pair=inf_pair,
|
||||
timeframe=inf_timeframe)
|
||||
```
|
||||
|
||||
!!! Warning "Warning about backtesting"
|
||||
@@ -668,10 +669,9 @@ It can also be used in specific callbacks to get the signal that caused the acti
|
||||
|
||||
``` python
|
||||
# fetch current dataframe
|
||||
if self.dp:
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=metadata['pair'],
|
||||
timeframe=self.timeframe)
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=metadata['pair'],
|
||||
timeframe=self.timeframe)
|
||||
```
|
||||
|
||||
!!! Note "No data available"
|
||||
@@ -681,11 +681,10 @@ if self.dp:
|
||||
### *orderbook(pair, maximum)*
|
||||
|
||||
``` python
|
||||
if self.dp:
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
ob = self.dp.orderbook(metadata['pair'], 1)
|
||||
dataframe['best_bid'] = ob['bids'][0][0]
|
||||
dataframe['best_ask'] = ob['asks'][0][0]
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
ob = self.dp.orderbook(metadata['pair'], 1)
|
||||
dataframe['best_bid'] = ob['bids'][0][0]
|
||||
dataframe['best_ask'] = ob['asks'][0][0]
|
||||
```
|
||||
|
||||
The orderbook structure is aligned with the order structure from [ccxt](https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure), so the result will look as follows:
|
||||
@@ -714,12 +713,11 @@ Therefore, using `ob['bids'][0][0]` as demonstrated above will result in using t
|
||||
### *ticker(pair)*
|
||||
|
||||
``` python
|
||||
if self.dp:
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
ticker = self.dp.ticker(metadata['pair'])
|
||||
dataframe['last_price'] = ticker['last']
|
||||
dataframe['volume24h'] = ticker['quoteVolume']
|
||||
dataframe['vwap'] = ticker['vwap']
|
||||
if self.dp.runmode.value in ('live', 'dry_run'):
|
||||
ticker = self.dp.ticker(metadata['pair'])
|
||||
dataframe['last_price'] = ticker['last']
|
||||
dataframe['volume24h'] = ticker['quoteVolume']
|
||||
dataframe['vwap'] = ticker['vwap']
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
@@ -729,7 +727,24 @@ if self.dp:
|
||||
data returned from the exchange and add appropriate error handling / defaults.
|
||||
|
||||
!!! Warning "Warning about backtesting"
|
||||
This method will always return up-to-date values - so usage during backtesting / hyperopt will lead to wrong results.
|
||||
This method will always return up-to-date values - so usage during backtesting / hyperopt without runmode checks will lead to wrong results.
|
||||
|
||||
### Send Notification
|
||||
|
||||
The dataprovider `.send_msg()` function allows you to send custom notifications from your strategy.
|
||||
Identical notifications will only be sent once per candle, unless the 2nd argument (`always_send`) is set to True.
|
||||
|
||||
``` python
|
||||
self.dp.send_msg(f"{metadata['pair']} just got hot!")
|
||||
|
||||
# Force send this notification, avoid caching (Please read warning below!)
|
||||
self.dp.send_msg(f"{metadata['pair']} just got hot!", always_send=True)
|
||||
```
|
||||
|
||||
Notifications will only be sent in trading modes (Live/Dry-run) - so this method can be called without conditions for backtesting.
|
||||
|
||||
!!! Warning "Spamming"
|
||||
You can spam yourself pretty good by setting `always_send=True` in this method. Use this with great care and only in conditions you know will not happen throughout a candle to avoid a message every 5 seconds.
|
||||
|
||||
### Complete Data-provider sample
|
||||
|
||||
|
@@ -14,7 +14,7 @@ from freqtrade.configuration import Configuration
|
||||
|
||||
# Initialize empty configuration object
|
||||
config = Configuration.from_files([])
|
||||
# Optionally, use existing configuration file
|
||||
# Optionally (recommended), use existing configuration file
|
||||
# config = Configuration.from_files(["config.json"])
|
||||
|
||||
# Define some constants
|
||||
@@ -22,7 +22,7 @@ config["timeframe"] = "5m"
|
||||
# Name of the strategy class
|
||||
config["strategy"] = "SampleStrategy"
|
||||
# Location of the data
|
||||
data_location = Path(config['user_data_dir'], 'data', 'binance')
|
||||
data_location = config['datadir']
|
||||
# Pair to analyze - Only use one pair here
|
||||
pair = "BTC/USDT"
|
||||
```
|
||||
@@ -31,11 +31,13 @@ pair = "BTC/USDT"
|
||||
```python
|
||||
# Load data using values set above
|
||||
from freqtrade.data.history import load_pair_history
|
||||
from freqtrade.enums import CandleType
|
||||
|
||||
candles = load_pair_history(datadir=data_location,
|
||||
timeframe=config["timeframe"],
|
||||
pair=pair,
|
||||
data_format = "hdf5",
|
||||
candle_type=CandleType.SPOT,
|
||||
)
|
||||
|
||||
# Confirm success
|
||||
@@ -93,7 +95,7 @@ from freqtrade.data.btanalysis import load_backtest_data, load_backtest_stats
|
||||
|
||||
# if backtest_dir points to a directory, it'll automatically load the last backtest file.
|
||||
backtest_dir = config["user_data_dir"] / "backtest_results"
|
||||
# backtest_dir can also point to a specific file
|
||||
# backtest_dir can also point to a specific file
|
||||
# backtest_dir = config["user_data_dir"] / "backtest_results/backtest-result-2020-07-01_20-04-22.json"
|
||||
```
|
||||
|
||||
|
@@ -18,7 +18,7 @@ Note : `forcesell`, `forcebuy`, `emergencysell` are changed to `force_exit`, `fo
|
||||
* [`check_buy_timeout()` -> `check_entry_timeout()`](#custom_entry_timeout)
|
||||
* [`check_sell_timeout()` -> `check_exit_timeout()`](#custom_entry_timeout)
|
||||
* New `side` argument to callbacks without trade object
|
||||
* [`custom_stake_amount`](#custom-stake-amount)
|
||||
* [`custom_stake_amount`](#custom_stake_amount)
|
||||
* [`confirm_trade_entry`](#confirm_trade_entry)
|
||||
* [`custom_entry_price`](#custom_entry_price)
|
||||
* [Changed argument name in `confirm_trade_exit`](#confirm_trade_exit)
|
||||
@@ -192,14 +192,14 @@ class AwesomeStrategy(IStrategy):
|
||||
return False
|
||||
```
|
||||
|
||||
### Custom-stake-amount
|
||||
### `custom_stake_amount`
|
||||
|
||||
New string argument `side` - which can be either `"long"` or `"short"`.
|
||||
|
||||
``` python hl_lines="4"
|
||||
class AwesomeStrategy(IStrategy):
|
||||
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
|
||||
proposed_stake: float, min_stake: float, max_stake: float,
|
||||
proposed_stake: float, min_stake: Optional[float], max_stake: float,
|
||||
entry_tag: Optional[str], **kwargs) -> float:
|
||||
# ...
|
||||
return proposed_stake
|
||||
@@ -208,7 +208,7 @@ class AwesomeStrategy(IStrategy):
|
||||
``` python hl_lines="4"
|
||||
class AwesomeStrategy(IStrategy):
|
||||
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
|
||||
proposed_stake: float, min_stake: float, max_stake: float,
|
||||
proposed_stake: float, min_stake: Optional[float], max_stake: float,
|
||||
entry_tag: Optional[str], side: str, **kwargs) -> float:
|
||||
# ...
|
||||
return proposed_stake
|
||||
|
@@ -97,7 +97,9 @@ Example configuration showing the different settings:
|
||||
"entry_fill": "off",
|
||||
"exit_fill": "off",
|
||||
"protection_trigger": "off",
|
||||
"protection_trigger_global": "on"
|
||||
"protection_trigger_global": "on",
|
||||
"strategy_msg": "off",
|
||||
"show_candle": "off"
|
||||
},
|
||||
"reload": true,
|
||||
"balance_dust_level": 0.01
|
||||
@@ -108,7 +110,8 @@ Example configuration showing the different settings:
|
||||
`exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
|
||||
`*_fill` notifications are off by default and must be explicitly enabled.
|
||||
`protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
|
||||
|
||||
`strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
|
||||
`show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
|
||||
|
||||
`balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
|
||||
`reload` allows you to disable reload-buttons on selected messages.
|
||||
@@ -146,7 +149,7 @@ You can create your own keyboard in `config.json`:
|
||||
!!! Note "Supported Commands"
|
||||
Only the following commands are allowed. Command arguments are not supported!
|
||||
|
||||
`/start`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopbuy`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`
|
||||
`/start`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopentry`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`
|
||||
|
||||
## Telegram commands
|
||||
|
||||
@@ -158,7 +161,7 @@ official commands. You can ask at any moment for help with `/help`.
|
||||
|----------|-------------|
|
||||
| `/start` | Starts the trader
|
||||
| `/stop` | Stops the trader
|
||||
| `/stopbuy` | Stops the trader from opening new trades. Gracefully closes open trades according to their rules.
|
||||
| `/stopbuy | /stopentry` | Stops the trader from opening new trades. Gracefully closes open trades according to their rules.
|
||||
| `/reload_config` | Reloads the configuration file
|
||||
| `/show_config` | Shows part of the current configuration with relevant settings to operation
|
||||
| `/logs [limit]` | Show last log messages.
|
||||
@@ -171,8 +174,8 @@ official commands. You can ask at any moment for help with `/help`.
|
||||
| `/locks` | Show currently locked pairs.
|
||||
| `/unlock <pair or lock_id>` | Remove the lock for this pair (or for this lock id).
|
||||
| `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default)
|
||||
| `/forceexit <trade_id>` | Instantly exits the given trade (Ignoring `minimum_roi`).
|
||||
| `/forceexit all` | Instantly exits all open trades (Ignoring `minimum_roi`).
|
||||
| `/forceexit <trade_id> | /fx <tradeid>` | Instantly exits the given trade (Ignoring `minimum_roi`).
|
||||
| `/forceexit all | /fx all` | Instantly exits all open trades (Ignoring `minimum_roi`).
|
||||
| `/fx` | alias for `/forceexit`
|
||||
| `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True)
|
||||
| `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True)
|
||||
@@ -184,7 +187,7 @@ official commands. You can ask at any moment for help with `/help`.
|
||||
| `/stats` | Shows Wins / losses by Exit reason as well as Avg. holding durations for buys and sells
|
||||
| `/exits` | Shows Wins / losses by Exit reason as well as Avg. holding durations for buys and sells
|
||||
| `/entries` | Shows Wins / losses by Exit reason as well as Avg. holding durations for buys and sells
|
||||
| `/whitelist` | Show the current whitelist
|
||||
| `/whitelist [sorted] [baseonly]` | Show the current whitelist. Optionally display in alphabetical order and/or with just the base currency of each pairing.
|
||||
| `/blacklist [pair]` | Show the current blacklist, or adds a pair to the blacklist.
|
||||
| `/edge` | Show validated pairs by Edge if it is enabled.
|
||||
| `/help` | Show help message
|
||||
@@ -270,10 +273,15 @@ Return a summary of your profit/loss and performance.
|
||||
> **Latest Trade opened:** `2 minutes ago`
|
||||
> **Avg. Duration:** `2:33:45`
|
||||
> **Best Performing:** `PAY/BTC: 50.23%`
|
||||
> **Trading volume:** `0.5 BTC`
|
||||
> **Profit factor:** `1.04`
|
||||
> **Max Drawdown:** `9.23% (0.01255 BTC)`
|
||||
|
||||
The relative profit of `1.2%` is the average profit per trade.
|
||||
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
|
||||
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
||||
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
|
||||
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
||||
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
||||
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
||||
|
||||
### /forceexit <trade_id>
|
||||
|
||||
@@ -281,6 +289,7 @@ Starting capital is either taken from the `available_capital` setting, or calcul
|
||||
|
||||
!!! Tip
|
||||
You can get a list of all open trades by calling `/forceexit` without parameter, which will show a list of buttons to simply exit a trade.
|
||||
This command has an alias in `/fx` - which has the same capabilities, but is faster to type in "emergency" situations.
|
||||
|
||||
### /forcelong <pair> [rate] | /forceshort <pair> [rate]
|
||||
|
||||
@@ -328,11 +337,11 @@ Per default `/daily` will return the 7 last days. The example below if for `/dai
|
||||
|
||||
> **Daily Profit over the last 3 days:**
|
||||
```
|
||||
Day Profit BTC Profit USD
|
||||
---------- -------------- ------------
|
||||
2018-01-03 0.00224175 BTC 29,142 USD
|
||||
2018-01-02 0.00033131 BTC 4,307 USD
|
||||
2018-01-01 0.00269130 BTC 34.986 USD
|
||||
Day (count) USDT USD Profit %
|
||||
-------------- ------------ ---------- ----------
|
||||
2022-06-11 (1) -0.746 USDT -0.75 USD -0.08%
|
||||
2022-06-10 (0) 0 USDT 0.00 USD 0.00%
|
||||
2022-06-09 (5) 20 USDT 20.10 USD 5.00%
|
||||
```
|
||||
|
||||
### /weekly <n>
|
||||
@@ -342,11 +351,11 @@ from Monday. The example below if for `/weekly 3`:
|
||||
|
||||
> **Weekly Profit over the last 3 weeks (starting from Monday):**
|
||||
```
|
||||
Monday Profit BTC Profit USD
|
||||
---------- -------------- ------------
|
||||
2018-01-03 0.00224175 BTC 29,142 USD
|
||||
2017-12-27 0.00033131 BTC 4,307 USD
|
||||
2017-12-20 0.00269130 BTC 34.986 USD
|
||||
Monday (count) Profit BTC Profit USD Profit %
|
||||
------------- -------------- ------------ ----------
|
||||
2018-01-03 (5) 0.00224175 BTC 29,142 USD 4.98%
|
||||
2017-12-27 (1) 0.00033131 BTC 4,307 USD 0.00%
|
||||
2017-12-20 (4) 0.00269130 BTC 34.986 USD 5.12%
|
||||
```
|
||||
|
||||
### /monthly <n>
|
||||
@@ -356,11 +365,11 @@ if for `/monthly 3`:
|
||||
|
||||
> **Monthly Profit over the last 3 months:**
|
||||
```
|
||||
Month Profit BTC Profit USD
|
||||
---------- -------------- ------------
|
||||
2018-01 0.00224175 BTC 29,142 USD
|
||||
2017-12 0.00033131 BTC 4,307 USD
|
||||
2017-11 0.00269130 BTC 34.986 USD
|
||||
Month (count) Profit BTC Profit USD Profit %
|
||||
------------- -------------- ------------ ----------
|
||||
2018-01 (20) 0.00224175 BTC 29,142 USD 4.98%
|
||||
2017-12 (5) 0.00033131 BTC 4,307 USD 0.00%
|
||||
2017-11 (10) 0.00269130 BTC 34.986 USD 5.10%
|
||||
```
|
||||
|
||||
### /whitelist
|
||||
|
@@ -32,4 +32,8 @@ Please ensure that you're also updating dependencies - otherwise things might br
|
||||
``` bash
|
||||
git pull
|
||||
pip install -U -r requirements.txt
|
||||
pip install -e .
|
||||
|
||||
# Ensure freqUI is at the latest version
|
||||
freqtrade install-ui
|
||||
```
|
||||
|
107
docs/utils.md
@@ -119,6 +119,7 @@ This subcommand is useful for finding problems in your environment with loading
|
||||
usage: freqtrade list-strategies [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||
[-d PATH] [--userdir PATH]
|
||||
[--strategy-path PATH] [-1] [--no-color]
|
||||
[--recursive-strategy-search]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@@ -126,6 +127,9 @@ optional arguments:
|
||||
-1, --one-column Print output in one column.
|
||||
--no-color Disable colorization of hyperopt results. May be
|
||||
useful if you are redirecting output to a file.
|
||||
--recursive-strategy-search
|
||||
Recursively search for a strategy in the strategies
|
||||
folder.
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
@@ -134,9 +138,10 @@ Common arguments:
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
-c PATH, --config PATH
|
||||
Specify configuration file (default: `config.json`).
|
||||
Multiple --config options may be used. Can be set to
|
||||
`-` to read config from stdin.
|
||||
Specify configuration file (default:
|
||||
`userdir/config.json` or `config.json` whichever
|
||||
exists). Multiple --config options may be used. Can be
|
||||
set to `-` to read config from stdin.
|
||||
-d PATH, --datadir PATH
|
||||
Path to directory with historical backtesting data.
|
||||
--userdir PATH, --user-data-dir PATH
|
||||
@@ -549,6 +554,27 @@ Show whitelist when using a [dynamic pairlist](plugins.md#pairlists).
|
||||
freqtrade test-pairlist --config config.json --quote USDT BTC
|
||||
```
|
||||
|
||||
## Convert database
|
||||
|
||||
`freqtrade convert-db` can be used to convert your database from one system to another (sqlite -> postgres, postgres -> other postgres), migrating all trades, orders and Pairlocks.
|
||||
|
||||
Please refer to the [SQL cheatsheet](sql_cheatsheet.md#use-a-different-database-system) to learn about requirements for different database systems.
|
||||
|
||||
```
|
||||
usage: freqtrade convert-db [-h] [--db-url PATH] [--db-url-from PATH]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--db-url PATH Override trades database URL, this is useful in custom
|
||||
deployments (default: `sqlite:///tradesv3.sqlite` for
|
||||
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
|
||||
Dry Run).
|
||||
--db-url-from PATH Source db url to use when migrating a database.
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
Please ensure to only use this on an empty target database. Freqtrade will perform a regular migration, but may fail if entries already existed.
|
||||
|
||||
## Webserver mode
|
||||
|
||||
!!! Warning "Experimental"
|
||||
@@ -585,6 +611,26 @@ Common arguments:
|
||||
|
||||
```
|
||||
|
||||
### Webserver mode - docker
|
||||
|
||||
You can also use webserver mode via docker.
|
||||
Starting a one-off container requires the configuration of the port explicitly, as ports are not exposed by default.
|
||||
You can use `docker-compose run --rm -p 127.0.0.1:8080:8080 freqtrade webserver` to start a one-off container that'll be removed once you stop it. This assumes that port 8080 is still available and no other bot is running on that port.
|
||||
|
||||
Alternatively, you can reconfigure the docker-compose file to have the command updated:
|
||||
|
||||
``` yml
|
||||
command: >
|
||||
webserver
|
||||
--config /freqtrade/user_data/config.json
|
||||
```
|
||||
|
||||
You can now use `docker-compose up` to start the webserver.
|
||||
This assumes that the configuration has a webserver enabled and configured for docker (listening port = `0.0.0.0`).
|
||||
|
||||
!!! Tip
|
||||
Don't forget to reset the command back to the trade command if you want to start a live or dry-run bot.
|
||||
|
||||
## Show previous Backtest results
|
||||
|
||||
Allows you to show previous backtest results.
|
||||
@@ -625,6 +671,61 @@ Common arguments:
|
||||
|
||||
```
|
||||
|
||||
## Detailed backtest analysis
|
||||
|
||||
Advanced backtest result analysis.
|
||||
|
||||
More details in the [Backtesting analysis](advanced-backtesting.md#analyze-the-buyentry-and-sellexit-tags) Section.
|
||||
|
||||
```
|
||||
usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
|
||||
[-c PATH] [-d PATH] [--userdir PATH]
|
||||
[--export-filename PATH]
|
||||
[--analysis-groups {0,1,2,3,4} [{0,1,2,3,4} ...]]
|
||||
[--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]]
|
||||
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
||||
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--analysis-groups {0,1,2,3,4} [{0,1,2,3,4} ...]
|
||||
grouping output - 0: simple wins/losses by enter tag,
|
||||
1: by enter_tag, 2: by enter_tag and exit_tag, 3: by
|
||||
pair and enter_tag, 4: by pair, enter_ and exit_tag
|
||||
(this can get quite large)
|
||||
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
|
||||
Comma separated list of entry signals to analyse.
|
||||
Default: all. e.g. 'entry_tag_a,entry_tag_b'
|
||||
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
|
||||
Comma separated list of exit signals to analyse.
|
||||
Default: all. e.g.
|
||||
'exit_tag_a,roi,stop_loss,trailing_stop_loss'
|
||||
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
|
||||
Comma separated list of indicators to analyse. e.g.
|
||||
'close,rsi,bb_lowerband,profit_abs'
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
-c PATH, --config PATH
|
||||
Specify configuration file (default:
|
||||
`userdir/config.json` or `config.json` whichever
|
||||
exists). Multiple --config options may be used. Can be
|
||||
set to `-` to read config from stdin.
|
||||
-d PATH, --datadir PATH
|
||||
Path to directory with historical backtesting data.
|
||||
--userdir PATH, --user-data-dir PATH
|
||||
Path to userdata directory.
|
||||
|
||||
```
|
||||
|
||||
## List Hyperopt results
|
||||
|
||||
You can list the hyperoptimization epochs the Hyperopt module evaluated previously with the `hyperopt-list` sub-command.
|
||||
|
@@ -239,3 +239,52 @@ Possible parameters are:
|
||||
The fields in `webhook.webhookstatus` are used for regular status messages (Started / Stopped / ...). Parameters are filled using string.format.
|
||||
|
||||
The only possible value here is `{status}`.
|
||||
|
||||
## Discord
|
||||
|
||||
A special form of webhooks is available for discord.
|
||||
You can configure this as follows:
|
||||
|
||||
```json
|
||||
"discord": {
|
||||
"enabled": true,
|
||||
"webhook_url": "https://discord.com/api/webhooks/<Your webhook URL ...>",
|
||||
"exit_fill": [
|
||||
{"Trade ID": "{trade_id}"},
|
||||
{"Exchange": "{exchange}"},
|
||||
{"Pair": "{pair}"},
|
||||
{"Direction": "{direction}"},
|
||||
{"Open rate": "{open_rate}"},
|
||||
{"Close rate": "{close_rate}"},
|
||||
{"Amount": "{amount}"},
|
||||
{"Open date": "{open_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Close date": "{close_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Profit": "{profit_amount} {stake_currency}"},
|
||||
{"Profitability": "{profit_ratio:.2%}"},
|
||||
{"Enter tag": "{enter_tag}"},
|
||||
{"Exit Reason": "{exit_reason}"},
|
||||
{"Strategy": "{strategy}"},
|
||||
{"Timeframe": "{timeframe}"},
|
||||
],
|
||||
"entry_fill": [
|
||||
{"Trade ID": "{trade_id}"},
|
||||
{"Exchange": "{exchange}"},
|
||||
{"Pair": "{pair}"},
|
||||
{"Direction": "{direction}"},
|
||||
{"Open rate": "{open_rate}"},
|
||||
{"Amount": "{amount}"},
|
||||
{"Open date": "{open_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Enter tag": "{enter_tag}"},
|
||||
{"Strategy": "{strategy} {timeframe}"},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
The above represents the default (`exit_fill` and `entry_fill` are optional and will default to the above configuration) - modifications are obviously possible.
|
||||
|
||||
Available fields correspond to the fields for webhooks and are documented in the corresponding webhook sections.
|
||||
|
||||
The notifications will look as follows by default.
|
||||
|
||||

|
||||
|
@@ -9,6 +9,7 @@ dependencies:
|
||||
- pandas
|
||||
- pip
|
||||
|
||||
- py-find-1st
|
||||
- aiohttp
|
||||
- SQLAlchemy
|
||||
- python-telegram-bot
|
||||
@@ -64,7 +65,7 @@ dependencies:
|
||||
|
||||
- pip:
|
||||
- pycoingecko
|
||||
- py_find_1st
|
||||
# - py_find_1st
|
||||
- tables
|
||||
- pytest-random-order
|
||||
- ccxt
|
||||
|
@@ -1,5 +1,5 @@
|
||||
""" Freqtrade bot """
|
||||
__version__ = '2022.4.2'
|
||||
__version__ = '2022.8'
|
||||
|
||||
if 'dev' in __version__:
|
||||
try:
|
||||
|
@@ -6,10 +6,12 @@ Contains all start-commands, subcommands and CLI Interface creation.
|
||||
Note: Be careful with file-scoped imports in these subfiles.
|
||||
as they are parsed on startup, nothing containing optional modules should be loaded.
|
||||
"""
|
||||
from freqtrade.commands.analyze_commands import start_analysis_entries_exits
|
||||
from freqtrade.commands.arguments import Arguments
|
||||
from freqtrade.commands.build_config_commands import start_new_config
|
||||
from freqtrade.commands.data_commands import (start_convert_data, start_convert_trades,
|
||||
start_download_data, start_list_data)
|
||||
from freqtrade.commands.db_commands import start_convert_db
|
||||
from freqtrade.commands.deploy_commands import (start_create_userdir, start_install_ui,
|
||||
start_new_strategy)
|
||||
from freqtrade.commands.hyperopt_commands import start_hyperopt_list, start_hyperopt_show
|
||||
|
69
freqtrade/commands/analyze_commands.py
Executable file
@@ -0,0 +1,69 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from freqtrade.configuration import setup_utils_configuration
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_analyze_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
|
||||
"""
|
||||
Prepare the configuration for the entry/exit reason analysis module
|
||||
:param args: Cli args from Arguments()
|
||||
:param method: Bot running mode
|
||||
:return: Configuration
|
||||
"""
|
||||
config = setup_utils_configuration(args, method)
|
||||
|
||||
no_unlimited_runmodes = {
|
||||
RunMode.BACKTEST: 'backtesting',
|
||||
}
|
||||
if method in no_unlimited_runmodes.keys():
|
||||
from freqtrade.data.btanalysis import get_latest_backtest_filename
|
||||
|
||||
if 'exportfilename' in config:
|
||||
if config['exportfilename'].is_dir():
|
||||
btfile = Path(get_latest_backtest_filename(config['exportfilename']))
|
||||
signals_file = f"{config['exportfilename']}/{btfile.stem}_signals.pkl"
|
||||
else:
|
||||
if config['exportfilename'].exists():
|
||||
btfile = Path(config['exportfilename'])
|
||||
signals_file = f"{btfile.parent}/{btfile.stem}_signals.pkl"
|
||||
else:
|
||||
raise OperationalException(f"{config['exportfilename']} does not exist.")
|
||||
else:
|
||||
raise OperationalException('exportfilename not in config.')
|
||||
|
||||
if (not Path(signals_file).exists()):
|
||||
raise OperationalException(
|
||||
(f"Cannot find latest backtest signals file: {signals_file}."
|
||||
"Run backtesting with `--export signals`.")
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Start analysis script
|
||||
:param args: Cli args from Arguments()
|
||||
:return: None
|
||||
"""
|
||||
from freqtrade.data.entryexitanalysis import process_entry_exit_reasons
|
||||
|
||||
# Initialize configuration
|
||||
config = setup_analyze_configuration(args, RunMode.BACKTEST)
|
||||
|
||||
logger.info('Starting freqtrade in analysis mode')
|
||||
|
||||
process_entry_exit_reasons(config['exportfilename'],
|
||||
config['exchange']['pair_whitelist'],
|
||||
config['analysis_groups'],
|
||||
config['enter_reason_list'],
|
||||
config['exit_reason_list'],
|
||||
config['indicator_list']
|
||||
)
|
@@ -12,7 +12,8 @@ from freqtrade.constants import DEFAULT_CONFIG
|
||||
|
||||
ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
|
||||
|
||||
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search"]
|
||||
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search", "freqaimodel",
|
||||
"freqaimodel_path"]
|
||||
|
||||
ARGS_TRADE = ["db_url", "sd_notify", "dry_run", "dry_run_wallet", "fee"]
|
||||
|
||||
@@ -28,12 +29,12 @@ ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_pos
|
||||
|
||||
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
|
||||
"position_stacking", "use_max_market_positions",
|
||||
"enable_protections", "dry_run_wallet",
|
||||
"enable_protections", "dry_run_wallet", "timeframe_detail",
|
||||
"epochs", "spaces", "print_all",
|
||||
"print_colorized", "print_json", "hyperopt_jobs",
|
||||
"hyperopt_random_state", "hyperopt_min_trades",
|
||||
"hyperopt_loss", "disableparamexport",
|
||||
"hyperopt_ignore_missing_space"]
|
||||
"hyperopt_ignore_missing_space", "analyze_per_epoch"]
|
||||
|
||||
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
|
||||
|
||||
@@ -68,11 +69,12 @@ ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "exchange", "tradin
|
||||
|
||||
ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades"]
|
||||
|
||||
ARGS_LIST_DATA = ["exchange", "dataformat_ohlcv", "pairs", "trading_mode"]
|
||||
ARGS_LIST_DATA = ["exchange", "dataformat_ohlcv", "pairs", "trading_mode", "show_timerange"]
|
||||
|
||||
ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "new_pairs_days", "include_inactive",
|
||||
"timerange", "download_trades", "exchange", "timeframes",
|
||||
"erase", "dataformat_ohlcv", "dataformat_trades", "trading_mode"]
|
||||
"erase", "dataformat_ohlcv", "dataformat_trades", "trading_mode",
|
||||
"prepend_data"]
|
||||
|
||||
ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
|
||||
"db_url", "trade_source", "export", "exportfilename",
|
||||
@@ -81,7 +83,9 @@ ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
|
||||
ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
|
||||
"trade_source", "timeframe", "plot_auto_open", ]
|
||||
|
||||
ARGS_INSTALL_UI = ["erase_ui_only", 'ui_version']
|
||||
ARGS_CONVERT_DB = ["db_url", "db_url_from"]
|
||||
|
||||
ARGS_INSTALL_UI = ["erase_ui_only", "ui_version"]
|
||||
|
||||
ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"]
|
||||
|
||||
@@ -98,6 +102,9 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
|
||||
"print_json", "hyperoptexportfilename", "hyperopt_show_no_header",
|
||||
"disableparamexport", "backtest_breakdown"]
|
||||
|
||||
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
||||
"exit_reason_list", "indicator_list"]
|
||||
|
||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||
"list-markets", "list-pairs", "list-strategies", "list-data",
|
||||
"hyperopt-list", "hyperopt-show", "backtest-filter",
|
||||
@@ -179,8 +186,9 @@ class Arguments:
|
||||
self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot')
|
||||
self._build_args(optionlist=['version'], parser=self.parser)
|
||||
|
||||
from freqtrade.commands import (start_backtesting, start_backtesting_show,
|
||||
start_convert_data, start_convert_trades,
|
||||
from freqtrade.commands import (start_analysis_entries_exits, start_backtesting,
|
||||
start_backtesting_show, start_convert_data,
|
||||
start_convert_db, start_convert_trades,
|
||||
start_create_userdir, start_download_data, start_edge,
|
||||
start_hyperopt, start_hyperopt_list, start_hyperopt_show,
|
||||
start_install_ui, start_list_data, start_list_exchanges,
|
||||
@@ -280,6 +288,13 @@ class Arguments:
|
||||
backtesting_show_cmd.set_defaults(func=start_backtesting_show)
|
||||
self._build_args(optionlist=ARGS_BACKTEST_SHOW, parser=backtesting_show_cmd)
|
||||
|
||||
# Add backtesting analysis subcommand
|
||||
analysis_cmd = subparsers.add_parser('backtesting-analysis',
|
||||
help='Backtest Analysis module.',
|
||||
parents=[_common_parser])
|
||||
analysis_cmd.set_defaults(func=start_analysis_entries_exits)
|
||||
self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd)
|
||||
|
||||
# Add edge subcommand
|
||||
edge_cmd = subparsers.add_parser('edge', help='Edge module.',
|
||||
parents=[_common_parser, _strategy_parser])
|
||||
@@ -373,6 +388,14 @@ class Arguments:
|
||||
test_pairlist_cmd.set_defaults(func=start_test_pairlist)
|
||||
self._build_args(optionlist=ARGS_TEST_PAIRLIST, parser=test_pairlist_cmd)
|
||||
|
||||
# Add db-convert subcommand
|
||||
convert_db = subparsers.add_parser(
|
||||
"convert-db",
|
||||
help="Migrate database to different system",
|
||||
)
|
||||
convert_db.set_defaults(func=start_convert_db)
|
||||
self._build_args(optionlist=ARGS_CONVERT_DB, parser=convert_db)
|
||||
|
||||
# Add install-ui subcommand
|
||||
install_ui_cmd = subparsers.add_parser(
|
||||
'install-ui',
|
||||
|
@@ -67,7 +67,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"type": "text",
|
||||
"name": "stake_amount",
|
||||
"message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):",
|
||||
"default": "100",
|
||||
"default": "unlimited",
|
||||
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
|
||||
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
|
||||
if val == UNLIMITED_STAKE_AMOUNT
|
||||
@@ -164,7 +164,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"when": lambda x: x['telegram']
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "password",
|
||||
"name": "telegram_chat_id",
|
||||
"message": "Insert Telegram chat id",
|
||||
"when": lambda x: x['telegram']
|
||||
@@ -191,7 +191,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"when": lambda x: x['api_server']
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "password",
|
||||
"name": "api_server_password",
|
||||
"message": "Insert api-server password",
|
||||
"when": lambda x: x['api_server']
|
||||
|
@@ -106,6 +106,11 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
f'`{constants.DEFAULT_DB_DRYRUN_URL}` for Dry Run).',
|
||||
metavar='PATH',
|
||||
),
|
||||
"db_url_from": Arg(
|
||||
'--db-url-from',
|
||||
help='Source db url to use when migrating a database.',
|
||||
metavar='PATH',
|
||||
),
|
||||
"sd_notify": Arg(
|
||||
'--sd-notify',
|
||||
help='Notify systemd service manager.',
|
||||
@@ -250,6 +255,13 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
nargs='+',
|
||||
default='default',
|
||||
),
|
||||
"analyze_per_epoch": Arg(
|
||||
'--analyze-per-epoch',
|
||||
help='Run populate_indicators once per epoch.',
|
||||
action='store_true',
|
||||
default=False,
|
||||
),
|
||||
|
||||
"print_all": Arg(
|
||||
'--print-all',
|
||||
help='Print all results, not only the best ones.',
|
||||
@@ -362,7 +374,7 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
metavar='BASE_CURRENCY',
|
||||
),
|
||||
"trading_mode": Arg(
|
||||
'--trading-mode',
|
||||
'--trading-mode', '--tradingmode',
|
||||
help='Select Trading mode',
|
||||
choices=constants.TRADING_MODES,
|
||||
),
|
||||
@@ -429,6 +441,11 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
help='Storage format for downloaded trades data. (default: `jsongz`).',
|
||||
choices=constants.AVAILABLE_DATAHANDLERS,
|
||||
),
|
||||
"show_timerange": Arg(
|
||||
'--show-timerange',
|
||||
help='Show timerange available for available data. (May take a while to calculate).',
|
||||
action='store_true',
|
||||
),
|
||||
"exchange": Arg(
|
||||
'--exchange',
|
||||
help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). '
|
||||
@@ -443,6 +460,11 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
default=['1m', '5m'],
|
||||
nargs='+',
|
||||
),
|
||||
"prepend_data": Arg(
|
||||
'--prepend',
|
||||
help='Allow data prepending. (Data-appending is disabled)',
|
||||
action='store_true',
|
||||
),
|
||||
"erase": Arg(
|
||||
'--erase',
|
||||
help='Clean all existing data for the selected exchange/pairs/timeframes.',
|
||||
@@ -604,4 +626,47 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
"that do not contain any parameters."),
|
||||
action="store_true",
|
||||
),
|
||||
"analysis_groups": Arg(
|
||||
"--analysis-groups",
|
||||
help=("grouping output - "
|
||||
"0: simple wins/losses by enter tag, "
|
||||
"1: by enter_tag, "
|
||||
"2: by enter_tag and exit_tag, "
|
||||
"3: by pair and enter_tag, "
|
||||
"4: by pair, enter_ and exit_tag (this can get quite large)"),
|
||||
nargs='+',
|
||||
default=['0', '1', '2'],
|
||||
choices=['0', '1', '2', '3', '4'],
|
||||
),
|
||||
"enter_reason_list": Arg(
|
||||
"--enter-reason-list",
|
||||
help=("Comma separated list of entry signals to analyse. Default: all. "
|
||||
"e.g. 'entry_tag_a,entry_tag_b'"),
|
||||
nargs='+',
|
||||
default=['all'],
|
||||
),
|
||||
"exit_reason_list": Arg(
|
||||
"--exit-reason-list",
|
||||
help=("Comma separated list of exit signals to analyse. Default: all. "
|
||||
"e.g. 'exit_tag_a,roi,stop_loss,trailing_stop_loss'"),
|
||||
nargs='+',
|
||||
default=['all'],
|
||||
),
|
||||
"indicator_list": Arg(
|
||||
"--indicator-list",
|
||||
help=("Comma separated list of indicators to analyse. "
|
||||
"e.g. 'close,rsi,bb_lowerband,profit_abs'"),
|
||||
nargs='+',
|
||||
default=[],
|
||||
),
|
||||
"freqaimodel": Arg(
|
||||
'--freqaimodel',
|
||||
help='Specify a custom freqaimodels.',
|
||||
metavar='NAME',
|
||||
),
|
||||
"freqaimodel_path": Arg(
|
||||
'--freqaimodel-path',
|
||||
help='Specify additional lookup path for freqaimodels.',
|
||||
metavar='PATH',
|
||||
),
|
||||
}
|
||||
|
@@ -5,14 +5,14 @@ from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from freqtrade.configuration import TimeRange, setup_utils_configuration
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||
from freqtrade.data.converter import convert_ohlcv_format, convert_trades_format
|
||||
from freqtrade.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
|
||||
refresh_backtest_trades_data)
|
||||
from freqtrade.enums import CandleType, RunMode, TradingMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes
|
||||
from freqtrade.exchange.exchange import market_is_active
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||
from freqtrade.exchange import market_is_active, timeframe_to_minutes
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, expand_pairlist
|
||||
from freqtrade.resolvers import ExchangeResolver
|
||||
|
||||
|
||||
@@ -50,7 +50,8 @@ def start_download_data(args: Dict[str, Any]) -> None:
|
||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
||||
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
|
||||
or config.get('include_inactive')]
|
||||
expanded_pairs = expand_pairlist(config['pairs'], markets)
|
||||
|
||||
expanded_pairs = dynamic_expand_pairlist(config, markets)
|
||||
|
||||
# Manual validations of relevant settings
|
||||
if not config['exchange'].get('skip_pair_validation', False):
|
||||
@@ -79,12 +80,19 @@ def start_download_data(args: Dict[str, Any]) -> None:
|
||||
data_format_trades=config['dataformat_trades'],
|
||||
)
|
||||
else:
|
||||
if not exchange.get_option('ohlcv_has_history', True):
|
||||
raise OperationalException(
|
||||
f"Historic klines not available for {exchange.name}. "
|
||||
"Please use `--dl-trades` instead for this exchange "
|
||||
"(will unfortunately take a long time)."
|
||||
)
|
||||
pairs_not_available = refresh_backtest_ohlcv_data(
|
||||
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
|
||||
datadir=config['datadir'], timerange=timerange,
|
||||
new_pairs_days=config['new_pairs_days'],
|
||||
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
|
||||
trading_mode=config.get('trading_mode', 'spot'),
|
||||
prepend=config.get('prepend_data', False)
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
@@ -169,17 +177,31 @@ def start_list_data(args: Dict[str, Any]) -> None:
|
||||
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
|
||||
|
||||
print(f"Found {len(paircombs)} pair / timeframe combinations.")
|
||||
groupedpair = defaultdict(list)
|
||||
for pair, timeframe, candle_type in sorted(
|
||||
paircombs,
|
||||
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
|
||||
):
|
||||
groupedpair[(pair, candle_type)].append(timeframe)
|
||||
if not config.get('show_timerange'):
|
||||
groupedpair = defaultdict(list)
|
||||
for pair, timeframe, candle_type in sorted(
|
||||
paircombs,
|
||||
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
|
||||
):
|
||||
groupedpair[(pair, candle_type)].append(timeframe)
|
||||
|
||||
if groupedpair:
|
||||
if groupedpair:
|
||||
print(tabulate([
|
||||
(pair, ', '.join(timeframes), candle_type)
|
||||
for (pair, candle_type), timeframes in groupedpair.items()
|
||||
],
|
||||
headers=("Pair", "Timeframe", "Type"),
|
||||
tablefmt='psql', stralign='right'))
|
||||
else:
|
||||
paircombs1 = [(
|
||||
pair, timeframe, candle_type,
|
||||
*dhc.ohlcv_data_min_max(pair, timeframe, candle_type)
|
||||
) for pair, timeframe, candle_type in paircombs]
|
||||
print(tabulate([
|
||||
(pair, ', '.join(timeframes), candle_type)
|
||||
for (pair, candle_type), timeframes in groupedpair.items()
|
||||
],
|
||||
headers=("Pair", "Timeframe", "Type"),
|
||||
(pair, timeframe, candle_type,
|
||||
start.strftime(DATETIME_PRINT_FORMAT),
|
||||
end.strftime(DATETIME_PRINT_FORMAT))
|
||||
for pair, timeframe, candle_type, start, end in paircombs1
|
||||
],
|
||||
headers=("Pair", "Timeframe", "Type", 'From', 'To'),
|
||||
tablefmt='psql', stralign='right'))
|
||||
|
55
freqtrade/commands/db_commands.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from sqlalchemy import func
|
||||
|
||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||
from freqtrade.enums.runmode import RunMode
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def start_convert_db(args: Dict[str, Any]) -> None:
|
||||
from sqlalchemy.orm import make_transient
|
||||
|
||||
from freqtrade.persistence import Order, Trade, init_db
|
||||
from freqtrade.persistence.migrations import set_sequence_ids
|
||||
from freqtrade.persistence.pairlock import PairLock
|
||||
|
||||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
||||
|
||||
init_db(config['db_url'])
|
||||
session_target = Trade._session
|
||||
init_db(config['db_url_from'])
|
||||
logger.info("Starting db migration.")
|
||||
|
||||
trade_count = 0
|
||||
pairlock_count = 0
|
||||
for trade in Trade.get_trades():
|
||||
trade_count += 1
|
||||
make_transient(trade)
|
||||
for o in trade.orders:
|
||||
make_transient(o)
|
||||
|
||||
session_target.add(trade)
|
||||
|
||||
session_target.commit()
|
||||
|
||||
for pairlock in PairLock.query:
|
||||
pairlock_count += 1
|
||||
make_transient(pairlock)
|
||||
session_target.add(pairlock)
|
||||
session_target.commit()
|
||||
|
||||
# Update sequences
|
||||
max_trade_id = session_target.query(func.max(Trade.id)).scalar()
|
||||
max_order_id = session_target.query(func.max(Order.id)).scalar()
|
||||
max_pairlock_id = session_target.query(func.max(PairLock.id)).scalar()
|
||||
|
||||
set_sequence_ids(session_target.get_bind(),
|
||||
trade_id=max_trade_id,
|
||||
order_id=max_order_id,
|
||||
pairlock_id=max_pairlock_id)
|
||||
|
||||
logger.info(f"Migrated {trade_count} Trades, and {pairlock_count} Pairlocks.")
|
@@ -24,7 +24,7 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
|
||||
|
||||
print_colorized = config.get('print_colorized', False)
|
||||
print_json = config.get('print_json', False)
|
||||
export_csv = config.get('export_csv', None)
|
||||
export_csv = config.get('export_csv')
|
||||
no_details = config.get('hyperopt_list_no_details', False)
|
||||
no_header = False
|
||||
|
||||
|
@@ -212,7 +212,7 @@ def start_show_trades(args: Dict[str, Any]) -> None:
|
||||
raise OperationalException("--db-url is required for this command.")
|
||||
|
||||
logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"')
|
||||
init_db(config['db_url'], clean_open_orders=False)
|
||||
init_db(config['db_url'])
|
||||
tfilter = []
|
||||
|
||||
if config.get('trade_ids'):
|
||||
|
@@ -4,5 +4,4 @@ from freqtrade.configuration.check_exchange import check_exchange
|
||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||
from freqtrade.configuration.configuration import Configuration
|
||||
from freqtrade.configuration.PeriodicCache import PeriodicCache
|
||||
from freqtrade.configuration.timerange import TimeRange
|
||||
|
@@ -27,7 +27,7 @@ def check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:
|
||||
return True
|
||||
logger.info("Checking exchange...")
|
||||
|
||||
exchange = config.get('exchange', {}).get('name').lower()
|
||||
exchange = config.get('exchange', {}).get('name', '').lower()
|
||||
if not exchange:
|
||||
raise OperationalException(
|
||||
f'This command requires a configured exchange. You should either use '
|
||||
|
@@ -95,6 +95,10 @@ class Configuration:
|
||||
|
||||
self._process_data_options(config)
|
||||
|
||||
self._process_analyze_options(config)
|
||||
|
||||
self._process_freqai_options(config)
|
||||
|
||||
# Check if the exchange set by the user is supported
|
||||
check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))
|
||||
|
||||
@@ -127,7 +131,7 @@ class Configuration:
|
||||
# Default to in-memory db for dry_run if not specified
|
||||
config['db_url'] = constants.DEFAULT_DB_DRYRUN_URL
|
||||
else:
|
||||
if not config.get('db_url', None):
|
||||
if not config.get('db_url'):
|
||||
config['db_url'] = constants.DEFAULT_DB_PROD_URL
|
||||
logger.info('Dry run is disabled')
|
||||
|
||||
@@ -147,6 +151,9 @@ class Configuration:
|
||||
config.update({'db_url': self.args['db_url']})
|
||||
logger.info('Parameter --db-url detected ...')
|
||||
|
||||
self._args_to_config(config, argname='db_url_from',
|
||||
logstring='Parameter --db-url-from detected ...')
|
||||
|
||||
if config.get('force_entry_enable', False):
|
||||
logger.warning('`force_entry_enable` RPC message enabled.')
|
||||
|
||||
@@ -177,7 +184,7 @@ class Configuration:
|
||||
config['user_data_dir'] = create_userdata_dir(config['user_data_dir'], create_dir=False)
|
||||
logger.info('Using user-data directory: %s ...', config['user_data_dir'])
|
||||
|
||||
config.update({'datadir': create_datadir(config, self.args.get('datadir', None))})
|
||||
config.update({'datadir': create_datadir(config, self.args.get('datadir'))})
|
||||
logger.info('Using data directory: %s ...', config.get('datadir'))
|
||||
|
||||
if self.args.get('exportfilename'):
|
||||
@@ -216,7 +223,7 @@ class Configuration:
|
||||
if config.get('max_open_trades') == -1:
|
||||
config['max_open_trades'] = float('inf')
|
||||
|
||||
if self.args.get('stake_amount', None):
|
||||
if self.args.get('stake_amount'):
|
||||
# Convert explicitly to float to support CLI argument for both unlimited and value
|
||||
try:
|
||||
self.args['stake_amount'] = float(self.args['stake_amount'])
|
||||
@@ -295,6 +302,9 @@ class Configuration:
|
||||
self._args_to_config(config, argname='spaces',
|
||||
logstring='Parameter -s/--spaces detected: {}')
|
||||
|
||||
self._args_to_config(config, argname='analyze_per_epoch',
|
||||
logstring='Parameter --analyze-per-epoch detected.')
|
||||
|
||||
self._args_to_config(config, argname='print_all',
|
||||
logstring='Parameter --print-all detected ...')
|
||||
|
||||
@@ -393,6 +403,8 @@ class Configuration:
|
||||
self._args_to_config(config, argname='trade_source',
|
||||
logstring='Using trades from: {}')
|
||||
|
||||
self._args_to_config(config, argname='prepend_data',
|
||||
logstring='Prepend detected. Allowing data prepending.')
|
||||
self._args_to_config(config, argname='erase',
|
||||
logstring='Erase detected. Deleting existing data.')
|
||||
|
||||
@@ -417,6 +429,9 @@ class Configuration:
|
||||
self._args_to_config(config, argname='dataformat_trades',
|
||||
logstring='Using "{}" to store trades data.')
|
||||
|
||||
self._args_to_config(config, argname='show_timerange',
|
||||
logstring='Detected --show-timerange')
|
||||
|
||||
def _process_data_options(self, config: Dict[str, Any]) -> None:
|
||||
self._args_to_config(config, argname='new_pairs_days',
|
||||
logstring='Detected --new-pairs-days: {}')
|
||||
@@ -428,6 +443,19 @@ class Configuration:
|
||||
self._args_to_config(config, argname='candle_types',
|
||||
logstring='Detected --candle-types: {}')
|
||||
|
||||
def _process_analyze_options(self, config: Dict[str, Any]) -> None:
|
||||
self._args_to_config(config, argname='analysis_groups',
|
||||
logstring='Analysis reason groups: {}')
|
||||
|
||||
self._args_to_config(config, argname='enter_reason_list',
|
||||
logstring='Analysis enter tag list: {}')
|
||||
|
||||
self._args_to_config(config, argname='exit_reason_list',
|
||||
logstring='Analysis exit tag list: {}')
|
||||
|
||||
self._args_to_config(config, argname='indicator_list',
|
||||
logstring='Analysis indicator list: {}')
|
||||
|
||||
def _process_runmode(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
self._args_to_config(config, argname='dry_run',
|
||||
@@ -441,6 +469,16 @@ class Configuration:
|
||||
|
||||
config.update({'runmode': self.runmode})
|
||||
|
||||
def _process_freqai_options(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
self._args_to_config(config, argname='freqaimodel',
|
||||
logstring='Using freqaimodel class name: {}')
|
||||
|
||||
self._args_to_config(config, argname='freqaimodel_path',
|
||||
logstring='Using freqaimodel path: {}')
|
||||
|
||||
return
|
||||
|
||||
def _args_to_config(self, config: Dict[str, Any], argname: str,
|
||||
logstring: str, logfun: Optional[Callable] = None,
|
||||
deprecated_msg: Optional[str] = None) -> None:
|
||||
@@ -454,7 +492,7 @@ class Configuration:
|
||||
configuration instead of the content)
|
||||
"""
|
||||
if (argname in self.args and self.args[argname] is not None
|
||||
and self.args[argname] is not False):
|
||||
and self.args[argname] is not False):
|
||||
|
||||
config.update({argname: self.args[argname]})
|
||||
if logfun:
|
||||
@@ -485,7 +523,8 @@ class Configuration:
|
||||
if not pairs_file.exists():
|
||||
raise OperationalException(f'No pairs file found with path "{pairs_file}".')
|
||||
config['pairs'] = load_file(pairs_file)
|
||||
config['pairs'].sort()
|
||||
if isinstance(config['pairs'], list):
|
||||
config['pairs'].sort()
|
||||
return
|
||||
|
||||
if 'config' in self.args and self.args['config']:
|
||||
@@ -496,5 +535,5 @@ class Configuration:
|
||||
pairs_file = config['datadir'] / 'pairs.json'
|
||||
if pairs_file.exists():
|
||||
config['pairs'] = load_file(pairs_file)
|
||||
if 'pairs' in config:
|
||||
if 'pairs' in config and isinstance(config['pairs'], list):
|
||||
config['pairs'].sort()
|
||||
|
@@ -113,7 +113,7 @@ def process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:
|
||||
process_removed_setting(config, 'experimental', 'ignore_roi_if_buy_signal',
|
||||
None, 'ignore_roi_if_entry_signal')
|
||||
|
||||
process_removed_setting(config, 'ask_strategy', 'use_sell_signal', None, 'exit_sell_signal')
|
||||
process_removed_setting(config, 'ask_strategy', 'use_sell_signal', None, 'use_exit_signal')
|
||||
process_removed_setting(config, 'ask_strategy', 'sell_profit_only', None, 'exit_profit_only')
|
||||
process_removed_setting(config, 'ask_strategy', 'sell_profit_offset',
|
||||
None, 'exit_profit_offset')
|
||||
|
@@ -15,7 +15,7 @@ def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Pat
|
||||
folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
|
||||
if not datadir:
|
||||
# set datadir
|
||||
exchange_name = config.get('exchange', {}).get('name').lower()
|
||||
exchange_name = config.get('exchange', {}).get('name', '').lower()
|
||||
folder = folder.joinpath(exchange_name)
|
||||
|
||||
if not folder.is_dir():
|
||||
|
@@ -28,7 +28,8 @@ HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',
|
||||
'SharpeHyperOptLoss', 'SharpeHyperOptLossDaily',
|
||||
'SortinoHyperOptLoss', 'SortinoHyperOptLossDaily',
|
||||
'CalmarHyperOptLoss',
|
||||
'MaxDrawDownHyperOptLoss', 'ProfitDrawDownHyperOptLoss']
|
||||
'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',
|
||||
'ProfitDrawDownHyperOptLoss']
|
||||
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList',
|
||||
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
||||
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
||||
@@ -54,6 +55,7 @@ FTHYPT_FILEVERSION = 'fthypt_fileversion'
|
||||
USERPATH_HYPEROPTS = 'hyperopts'
|
||||
USERPATH_STRATEGIES = 'strategies'
|
||||
USERPATH_NOTEBOOKS = 'notebooks'
|
||||
USERPATH_FREQAIMODELS = 'freqaimodels'
|
||||
|
||||
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
||||
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
||||
@@ -239,6 +241,7 @@ CONF_SCHEMA = {
|
||||
},
|
||||
'exchange': {'$ref': '#/definitions/exchange'},
|
||||
'edge': {'$ref': '#/definitions/edge'},
|
||||
'freqai': {'$ref': '#/definitions/freqai'},
|
||||
'experimental': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
@@ -301,17 +304,25 @@ CONF_SCHEMA = {
|
||||
'exit_fill': {
|
||||
'type': 'string',
|
||||
'enum': TELEGRAM_SETTING_OPTIONS,
|
||||
'default': 'off'
|
||||
'default': 'on'
|
||||
},
|
||||
'protection_trigger': {
|
||||
'type': 'string',
|
||||
'enum': TELEGRAM_SETTING_OPTIONS,
|
||||
'default': 'off'
|
||||
'default': 'on'
|
||||
},
|
||||
'protection_trigger_global': {
|
||||
'type': 'string',
|
||||
'enum': TELEGRAM_SETTING_OPTIONS,
|
||||
},
|
||||
'show_candle': {
|
||||
'type': 'string',
|
||||
'enum': ['off', 'ohlc'],
|
||||
},
|
||||
'strategy_msg': {
|
||||
'type': 'string',
|
||||
'enum': TELEGRAM_SETTING_OPTIONS,
|
||||
},
|
||||
}
|
||||
},
|
||||
'reload': {'type': 'boolean'},
|
||||
@@ -335,6 +346,47 @@ CONF_SCHEMA = {
|
||||
'webhookstatus': {'type': 'object'},
|
||||
},
|
||||
},
|
||||
'discord': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'enabled': {'type': 'boolean'},
|
||||
'webhook_url': {'type': 'string'},
|
||||
"exit_fill": {
|
||||
'type': 'array', 'items': {'type': 'object'},
|
||||
'default': [
|
||||
{"Trade ID": "{trade_id}"},
|
||||
{"Exchange": "{exchange}"},
|
||||
{"Pair": "{pair}"},
|
||||
{"Direction": "{direction}"},
|
||||
{"Open rate": "{open_rate}"},
|
||||
{"Close rate": "{close_rate}"},
|
||||
{"Amount": "{amount}"},
|
||||
{"Open date": "{open_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Close date": "{close_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Profit": "{profit_amount} {stake_currency}"},
|
||||
{"Profitability": "{profit_ratio:.2%}"},
|
||||
{"Enter tag": "{enter_tag}"},
|
||||
{"Exit Reason": "{exit_reason}"},
|
||||
{"Strategy": "{strategy}"},
|
||||
{"Timeframe": "{timeframe}"},
|
||||
]
|
||||
},
|
||||
"entry_fill": {
|
||||
'type': 'array', 'items': {'type': 'object'},
|
||||
'default': [
|
||||
{"Trade ID": "{trade_id}"},
|
||||
{"Exchange": "{exchange}"},
|
||||
{"Pair": "{pair}"},
|
||||
{"Direction": "{direction}"},
|
||||
{"Open rate": "{open_rate}"},
|
||||
{"Amount": "{amount}"},
|
||||
{"Open date": "{open_date:%Y-%m-%d %H:%M:%S}"},
|
||||
{"Enter tag": "{enter_tag}"},
|
||||
{"Strategy": "{strategy} {timeframe}"},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
'api_server': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
@@ -430,7 +482,60 @@ CONF_SCHEMA = {
|
||||
'remove_pumps': {'type': 'boolean'}
|
||||
},
|
||||
'required': ['process_throttle_secs', 'allowed_risk']
|
||||
}
|
||||
},
|
||||
"freqai": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {"type": "boolean", "default": False},
|
||||
"keras": {"type": "boolean", "default": False},
|
||||
"conv_width": {"type": "integer", "default": 2},
|
||||
"train_period_days": {"type": "integer", "default": 0},
|
||||
"backtest_period_days": {"type": "number", "default": 7},
|
||||
"identifier": {"type": "string", "default": "example"},
|
||||
"feature_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"include_corr_pairlist": {"type": "array"},
|
||||
"include_timeframes": {"type": "array"},
|
||||
"label_period_candles": {"type": "integer"},
|
||||
"include_shifted_candles": {"type": "integer", "default": 0},
|
||||
"DI_threshold": {"type": "number", "default": 0},
|
||||
"weight_factor": {"type": "number", "default": 0},
|
||||
"principal_component_analysis": {"type": "boolean", "default": False},
|
||||
"use_SVM_to_remove_outliers": {"type": "boolean", "default": False},
|
||||
"svm_params": {"type": "object",
|
||||
"properties": {
|
||||
"shuffle": {"type": "boolean", "default": False},
|
||||
"nu": {"type": "number", "default": 0.1}
|
||||
},
|
||||
}
|
||||
},
|
||||
"required": ["include_timeframes", "include_corr_pairlist", ]
|
||||
},
|
||||
"data_split_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"test_size": {"type": "number"},
|
||||
"random_state": {"type": "integer"},
|
||||
},
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"n_estimators": {"type": "integer", "default": 1000}
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"enabled",
|
||||
"train_period_days",
|
||||
"backtest_period_days",
|
||||
"identifier",
|
||||
"feature_parameters",
|
||||
"data_split_parameters",
|
||||
"model_training_parameters"
|
||||
]
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -482,6 +587,8 @@ CANCEL_REASON = {
|
||||
"ALL_CANCELLED": "cancelled (all unfilled and partially filled open orders cancelled)",
|
||||
"CANCELLED_ON_EXCHANGE": "cancelled on exchange",
|
||||
"FORCE_EXIT": "forcesold",
|
||||
"REPLACE": "cancelled to be replaced by new limit order",
|
||||
"USER_CANCEL": "user requested order cancel"
|
||||
}
|
||||
|
||||
# List of pairs with their timeframes
|
||||
@@ -493,3 +600,5 @@ TradeList = List[List]
|
||||
|
||||
LongShort = Literal['long', 'short']
|
||||
EntryExit = Literal['entry', 'exit']
|
||||
BuySell = Literal['buy', 'sell']
|
||||
MakerTaker = Literal['maker', 'taker']
|
||||
|
@@ -26,7 +26,7 @@ BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date',
|
||||
'profit_ratio', 'profit_abs', 'exit_reason',
|
||||
'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',
|
||||
'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag',
|
||||
'is_short'
|
||||
'is_short', 'open_timestamp', 'close_timestamp', 'orders'
|
||||
]
|
||||
|
||||
|
||||
@@ -283,6 +283,8 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
|
||||
if 'enter_tag' not in df.columns:
|
||||
df['enter_tag'] = df['buy_tag']
|
||||
df = df.drop(['buy_tag'], axis=1)
|
||||
if 'orders' not in df.columns:
|
||||
df.loc[:, 'orders'] = None
|
||||
|
||||
else:
|
||||
# old format - only with lists.
|
||||
@@ -337,7 +339,7 @@ def trade_list_to_dataframe(trades: List[LocalTrade]) -> pd.DataFrame:
|
||||
:param trades: List of trade objects
|
||||
:return: Dataframe with BT_DATA_COLUMNS
|
||||
"""
|
||||
df = pd.DataFrame.from_records([t.to_json() for t in trades], columns=BT_DATA_COLUMNS)
|
||||
df = pd.DataFrame.from_records([t.to_json(True) for t in trades], columns=BT_DATA_COLUMNS)
|
||||
if len(df) > 0:
|
||||
df.loc[:, 'close_date'] = pd.to_datetime(df['close_date'], utc=True)
|
||||
df.loc[:, 'open_date'] = pd.to_datetime(df['open_date'], utc=True)
|
||||
@@ -353,7 +355,7 @@ def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataF
|
||||
Can also serve as protection to load the correct result.
|
||||
:return: Dataframe containing Trades
|
||||
"""
|
||||
init_db(db_url, clean_open_orders=False)
|
||||
init_db(db_url)
|
||||
|
||||
filters = []
|
||||
if strategy:
|
||||
|
@@ -5,6 +5,7 @@ including ticker and orderbook data, live and historical candle (OHLCV) data
|
||||
Common Interface for bot and strategy to access data.
|
||||
"""
|
||||
import logging
|
||||
from collections import deque
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
@@ -16,6 +17,7 @@ from freqtrade.data.history import load_pair_history
|
||||
from freqtrade.enums import CandleType, RunMode
|
||||
from freqtrade.exceptions import ExchangeError, OperationalException
|
||||
from freqtrade.exchange import Exchange, timeframe_to_seconds
|
||||
from freqtrade.util import PeriodicCache
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,6 +35,10 @@ class DataProvider:
|
||||
self.__cached_pairs: Dict[PairWithTimeframe, Tuple[DataFrame, datetime]] = {}
|
||||
self.__slice_index: Optional[int] = None
|
||||
self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {}
|
||||
self._msg_queue: deque = deque()
|
||||
|
||||
self.__msg_cache = PeriodicCache(
|
||||
maxsize=1000, ttl=timeframe_to_seconds(self._config.get('timeframe', '1h')))
|
||||
|
||||
def _set_dataframe_max_index(self, limit_index: int):
|
||||
"""
|
||||
@@ -265,3 +271,20 @@ class DataProvider:
|
||||
if self._exchange is None:
|
||||
raise OperationalException(NO_EXCHANGE_EXCEPTION)
|
||||
return self._exchange.fetch_l2_order_book(pair, maximum)
|
||||
|
||||
def send_msg(self, message: str, *, always_send: bool = False) -> None:
|
||||
"""
|
||||
Send custom RPC Notifications from your bot.
|
||||
Will not send any bot in modes other than Dry-run or Live.
|
||||
:param message: Message to be sent. Must be below 4096.
|
||||
:param always_send: If False, will send the message only once per candle, and surpress
|
||||
identical messages.
|
||||
Careful as this can end up spaming your chat.
|
||||
Defaults to False
|
||||
"""
|
||||
if self.runmode not in (RunMode.DRY_RUN, RunMode.LIVE):
|
||||
return
|
||||
|
||||
if always_send or message not in self.__msg_cache:
|
||||
self._msg_queue.append(message)
|
||||
self.__msg_cache[message] = True
|
||||
|
227
freqtrade/data/entryexitanalysis.py
Executable file
@@ -0,0 +1,227 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data,
|
||||
load_backtest_stats)
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_signal_candles(backtest_dir: Path):
|
||||
if backtest_dir.is_dir():
|
||||
scpf = Path(backtest_dir,
|
||||
Path(get_latest_backtest_filename(backtest_dir)).stem + "_signals.pkl"
|
||||
)
|
||||
else:
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
|
||||
|
||||
try:
|
||||
scp = open(scpf, "rb")
|
||||
signal_candles = joblib.load(scp)
|
||||
logger.info(f"Loaded signal candles: {str(scpf)}")
|
||||
except Exception as e:
|
||||
logger.error("Cannot load signal candles from pickled results: ", e)
|
||||
|
||||
return signal_candles
|
||||
|
||||
|
||||
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
|
||||
analysed_trades_dict = {}
|
||||
analysed_trades_dict[strategy_name] = {}
|
||||
|
||||
try:
|
||||
logger.info(f"Processing {strategy_name} : {len(pairlist)} pairs")
|
||||
|
||||
for pair in pairlist:
|
||||
if pair in signal_candles[strategy_name]:
|
||||
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
|
||||
pair,
|
||||
trades,
|
||||
signal_candles[strategy_name][pair])
|
||||
except Exception as e:
|
||||
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
|
||||
|
||||
return analysed_trades_dict
|
||||
|
||||
|
||||
def _analyze_candles_and_indicators(pair, trades, signal_candles):
|
||||
buyf = signal_candles
|
||||
|
||||
if len(buyf) > 0:
|
||||
buyf = buyf.set_index('date', drop=False)
|
||||
trades_red = trades.loc[trades['pair'] == pair].copy()
|
||||
|
||||
trades_inds = pd.DataFrame()
|
||||
|
||||
if trades_red.shape[0] > 0 and buyf.shape[0] > 0:
|
||||
for t, v in trades_red.open_date.items():
|
||||
allinds = buyf.loc[(buyf['date'] < v)]
|
||||
if allinds.shape[0] > 0:
|
||||
tmp_inds = allinds.iloc[[-1]]
|
||||
|
||||
trades_red.loc[t, 'signal_date'] = tmp_inds['date'].values[0]
|
||||
trades_red.loc[t, 'enter_reason'] = trades_red.loc[t, 'enter_tag']
|
||||
tmp_inds.index.rename('signal_date', inplace=True)
|
||||
trades_inds = pd.concat([trades_inds, tmp_inds])
|
||||
|
||||
if 'signal_date' in trades_red:
|
||||
trades_red['signal_date'] = pd.to_datetime(trades_red['signal_date'], utc=True)
|
||||
trades_red.set_index('signal_date', inplace=True)
|
||||
|
||||
try:
|
||||
trades_red = pd.merge(trades_red, trades_inds, on='signal_date', how='outer')
|
||||
except Exception as e:
|
||||
raise e
|
||||
return trades_red
|
||||
else:
|
||||
return pd.DataFrame()
|
||||
|
||||
|
||||
def _do_group_table_output(bigdf, glist):
|
||||
for g in glist:
|
||||
# 0: summary wins/losses grouped by enter tag
|
||||
if g == "0":
|
||||
group_mask = ['enter_reason']
|
||||
wins = bigdf.loc[bigdf['profit_abs'] >= 0] \
|
||||
.groupby(group_mask) \
|
||||
.agg({'profit_abs': ['sum']})
|
||||
|
||||
wins.columns = ['profit_abs_wins']
|
||||
loss = bigdf.loc[bigdf['profit_abs'] < 0] \
|
||||
.groupby(group_mask) \
|
||||
.agg({'profit_abs': ['sum']})
|
||||
loss.columns = ['profit_abs_loss']
|
||||
|
||||
new = bigdf.groupby(group_mask).agg({'profit_abs': [
|
||||
'count',
|
||||
lambda x: sum(x > 0),
|
||||
lambda x: sum(x <= 0)]})
|
||||
new = pd.concat([new, wins, loss], axis=1).fillna(0)
|
||||
|
||||
new['profit_tot'] = new['profit_abs_wins'] - abs(new['profit_abs_loss'])
|
||||
new['wl_ratio_pct'] = (new.iloc[:, 1] / new.iloc[:, 0] * 100).fillna(0)
|
||||
new['avg_win'] = (new['profit_abs_wins'] / new.iloc[:, 1]).fillna(0)
|
||||
new['avg_loss'] = (new['profit_abs_loss'] / new.iloc[:, 2]).fillna(0)
|
||||
|
||||
new.columns = ['total_num_buys', 'wins', 'losses', 'profit_abs_wins', 'profit_abs_loss',
|
||||
'profit_tot', 'wl_ratio_pct', 'avg_win', 'avg_loss']
|
||||
|
||||
sortcols = ['total_num_buys']
|
||||
|
||||
_print_table(new, sortcols, show_index=True)
|
||||
|
||||
else:
|
||||
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
|
||||
'profit_ratio': ['sum', 'median', 'mean']}
|
||||
agg_cols = ['num_buys', 'profit_abs_sum', 'profit_abs_median',
|
||||
'profit_abs_mean', 'median_profit_pct', 'mean_profit_pct',
|
||||
'total_profit_pct']
|
||||
sortcols = ['profit_abs_sum', 'enter_reason']
|
||||
|
||||
# 1: profit summaries grouped by enter_tag
|
||||
if g == "1":
|
||||
group_mask = ['enter_reason']
|
||||
|
||||
# 2: profit summaries grouped by enter_tag and exit_tag
|
||||
if g == "2":
|
||||
group_mask = ['enter_reason', 'exit_reason']
|
||||
|
||||
# 3: profit summaries grouped by pair and enter_tag
|
||||
if g == "3":
|
||||
group_mask = ['pair', 'enter_reason']
|
||||
|
||||
# 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large)
|
||||
if g == "4":
|
||||
group_mask = ['pair', 'enter_reason', 'exit_reason']
|
||||
if group_mask:
|
||||
new = bigdf.groupby(group_mask).agg(agg_mask).reset_index()
|
||||
new.columns = group_mask + agg_cols
|
||||
new['median_profit_pct'] = new['median_profit_pct'] * 100
|
||||
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
|
||||
new['total_profit_pct'] = new['total_profit_pct'] * 100
|
||||
|
||||
_print_table(new, sortcols)
|
||||
else:
|
||||
logger.warning("Invalid group mask specified.")
|
||||
|
||||
|
||||
def _print_results(analysed_trades, stratname, analysis_groups,
|
||||
enter_reason_list, exit_reason_list,
|
||||
indicator_list, columns=None):
|
||||
if columns is None:
|
||||
columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason']
|
||||
|
||||
bigdf = pd.DataFrame()
|
||||
for pair, trades in analysed_trades[stratname].items():
|
||||
bigdf = pd.concat([bigdf, trades], ignore_index=True)
|
||||
|
||||
if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns):
|
||||
if analysis_groups:
|
||||
_do_group_table_output(bigdf, analysis_groups)
|
||||
|
||||
if enter_reason_list and "all" not in enter_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))]
|
||||
|
||||
if exit_reason_list and "all" not in exit_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))]
|
||||
|
||||
if "all" in indicator_list:
|
||||
print(bigdf)
|
||||
elif indicator_list is not None:
|
||||
available_inds = []
|
||||
for ind in indicator_list:
|
||||
if ind in bigdf:
|
||||
available_inds.append(ind)
|
||||
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
||||
_print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
else:
|
||||
print("\\_ No trades to show")
|
||||
|
||||
|
||||
def _print_table(df, sortcols=None, show_index=False):
|
||||
if (sortcols is not None):
|
||||
data = df.sort_values(sortcols)
|
||||
else:
|
||||
data = df
|
||||
|
||||
print(
|
||||
tabulate(
|
||||
data,
|
||||
headers='keys',
|
||||
tablefmt='psql',
|
||||
showindex=show_index
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def process_entry_exit_reasons(backtest_dir: Path,
|
||||
pairlist: List[str],
|
||||
analysis_groups: Optional[List[str]] = ["0", "1", "2"],
|
||||
enter_reason_list: Optional[List[str]] = ["all"],
|
||||
exit_reason_list: Optional[List[str]] = ["all"],
|
||||
indicator_list: Optional[List[str]] = []):
|
||||
try:
|
||||
backtest_stats = load_backtest_stats(backtest_dir)
|
||||
for strategy_name, results in backtest_stats['strategy'].items():
|
||||
trades = load_backtest_data(backtest_dir, strategy_name)
|
||||
|
||||
if not trades.empty:
|
||||
signal_candles = _load_signal_candles(backtest_dir)
|
||||
analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name,
|
||||
trades, signal_candles)
|
||||
_print_results(analysed_trades_dict,
|
||||
strategy_name,
|
||||
analysis_groups,
|
||||
enter_reason_list,
|
||||
exit_reason_list,
|
||||
indicator_list)
|
||||
|
||||
except ValueError as e:
|
||||
raise OperationalException(e) from e
|
@@ -7,9 +7,8 @@ import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import (DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS,
|
||||
ListPairsWithTimeframes, TradeList)
|
||||
from freqtrade.enums import CandleType, TradingMode
|
||||
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, TradeList
|
||||
from freqtrade.enums import CandleType
|
||||
|
||||
from .idatahandler import IDataHandler
|
||||
|
||||
@@ -21,29 +20,6 @@ class HDF5DataHandler(IDataHandler):
|
||||
|
||||
_columns = DEFAULT_DATAFRAME_COLUMNS
|
||||
|
||||
@classmethod
|
||||
def ohlcv_get_available_data(
|
||||
cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes:
|
||||
"""
|
||||
Returns a list of all pairs with ohlcv data available in this datadir
|
||||
:param datadir: Directory to search for ohlcv files
|
||||
:param trading_mode: trading-mode to be used
|
||||
:return: List of Tuples of (pair, timeframe)
|
||||
"""
|
||||
if trading_mode == TradingMode.FUTURES:
|
||||
datadir = datadir.joinpath('futures')
|
||||
_tmp = [
|
||||
re.search(
|
||||
cls._OHLCV_REGEX, p.name
|
||||
) for p in datadir.glob("*.h5")
|
||||
]
|
||||
return [
|
||||
(
|
||||
cls.rebuild_pair_from_filename(match[1]),
|
||||
match[2],
|
||||
CandleType.from_string(match[3])
|
||||
) for match in _tmp if match and len(match.groups()) > 1]
|
||||
|
||||
@classmethod
|
||||
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str, candle_type: CandleType) -> List[str]:
|
||||
"""
|
||||
@@ -109,7 +85,11 @@ class HDF5DataHandler(IDataHandler):
|
||||
)
|
||||
|
||||
if not filename.exists():
|
||||
return pd.DataFrame(columns=self._columns)
|
||||
# Fallback mode for 1M files
|
||||
filename = self._pair_data_filename(
|
||||
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
|
||||
if not filename.exists():
|
||||
return pd.DataFrame(columns=self._columns)
|
||||
where = []
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
|
@@ -56,7 +56,7 @@ def load_pair_history(pair: str,
|
||||
fill_missing=fill_up_missing,
|
||||
drop_incomplete=drop_incomplete,
|
||||
startup_candles=startup_candles,
|
||||
candle_type=candle_type
|
||||
candle_type=candle_type,
|
||||
)
|
||||
|
||||
|
||||
@@ -68,7 +68,8 @@ def load_data(datadir: Path,
|
||||
startup_candles: int = 0,
|
||||
fail_without_data: bool = False,
|
||||
data_format: str = 'json',
|
||||
candle_type: CandleType = CandleType.SPOT
|
||||
candle_type: CandleType = CandleType.SPOT,
|
||||
user_futures_funding_rate: int = None,
|
||||
) -> Dict[str, DataFrame]:
|
||||
"""
|
||||
Load ohlcv history data for a list of pairs.
|
||||
@@ -96,10 +97,15 @@ def load_data(datadir: Path,
|
||||
fill_up_missing=fill_up_missing,
|
||||
startup_candles=startup_candles,
|
||||
data_handler=data_handler,
|
||||
candle_type=candle_type
|
||||
candle_type=candle_type,
|
||||
)
|
||||
if not hist.empty:
|
||||
result[pair] = hist
|
||||
else:
|
||||
if candle_type is CandleType.FUNDING_RATE and user_futures_funding_rate is not None:
|
||||
logger.warn(f"{pair} using user specified [{user_futures_funding_rate}]")
|
||||
elif candle_type not in (CandleType.SPOT, CandleType.FUTURES):
|
||||
result[pair] = DataFrame(columns=["date", "open", "close", "high", "low", "volume"])
|
||||
|
||||
if fail_without_data and not result:
|
||||
raise OperationalException("No data found. Terminating.")
|
||||
@@ -139,8 +145,9 @@ def _load_cached_data_for_updating(
|
||||
timeframe: str,
|
||||
timerange: Optional[TimeRange],
|
||||
data_handler: IDataHandler,
|
||||
candle_type: CandleType
|
||||
) -> Tuple[DataFrame, Optional[int]]:
|
||||
candle_type: CandleType,
|
||||
prepend: bool = False,
|
||||
) -> Tuple[DataFrame, Optional[int], Optional[int]]:
|
||||
"""
|
||||
Load cached data to download more data.
|
||||
If timerange is passed in, checks whether data from an before the stored data will be
|
||||
@@ -150,9 +157,12 @@ def _load_cached_data_for_updating(
|
||||
Note: Only used by download_pair_history().
|
||||
"""
|
||||
start = None
|
||||
end = None
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
||||
if timerange.stoptype == 'date':
|
||||
end = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
||||
|
||||
# Intentionally don't pass timerange in - since we need to load the full dataset.
|
||||
data = data_handler.ohlcv_load(pair, timeframe=timeframe,
|
||||
@@ -160,14 +170,17 @@ def _load_cached_data_for_updating(
|
||||
drop_incomplete=True, warn_no_data=False,
|
||||
candle_type=candle_type)
|
||||
if not data.empty:
|
||||
if start and start < data.iloc[0]['date']:
|
||||
if not prepend and start and start < data.iloc[0]['date']:
|
||||
# Earlier data than existing data requested, redownload all
|
||||
data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS)
|
||||
else:
|
||||
start = data.iloc[-1]['date']
|
||||
|
||||
if prepend:
|
||||
end = data.iloc[0]['date']
|
||||
else:
|
||||
start = data.iloc[-1]['date']
|
||||
start_ms = int(start.timestamp() * 1000) if start else None
|
||||
return data, start_ms
|
||||
end_ms = int(end.timestamp() * 1000) if end else None
|
||||
return data, start_ms, end_ms
|
||||
|
||||
|
||||
def _download_pair_history(pair: str, *,
|
||||
@@ -180,6 +193,7 @@ def _download_pair_history(pair: str, *,
|
||||
timerange: Optional[TimeRange] = None,
|
||||
candle_type: CandleType,
|
||||
erase: bool = False,
|
||||
prepend: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Download latest candles from the exchange for the pair and timeframe passed in parameters
|
||||
@@ -187,8 +201,6 @@ def _download_pair_history(pair: str, *,
|
||||
exists in a cache. If timerange starts earlier than the data in the cache,
|
||||
the full data will be redownloaded
|
||||
|
||||
Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
|
||||
|
||||
:param pair: pair to download
|
||||
:param timeframe: Timeframe (e.g "5m")
|
||||
:param timerange: range of time to download
|
||||
@@ -203,14 +215,17 @@ def _download_pair_history(pair: str, *,
|
||||
if data_handler.ohlcv_purge(pair, timeframe, candle_type=candle_type):
|
||||
logger.info(f'Deleting existing data for pair {pair}, {timeframe}, {candle_type}.')
|
||||
|
||||
logger.info(
|
||||
f'Download history data for pair: "{pair}" ({process}), timeframe: {timeframe}, '
|
||||
f'candle type: {candle_type} and store in {datadir}.'
|
||||
)
|
||||
data, since_ms, until_ms = _load_cached_data_for_updating(
|
||||
pair, timeframe, timerange,
|
||||
data_handler=data_handler,
|
||||
candle_type=candle_type,
|
||||
prepend=prepend)
|
||||
|
||||
data, since_ms = _load_cached_data_for_updating(pair, timeframe, timerange,
|
||||
data_handler=data_handler,
|
||||
candle_type=candle_type)
|
||||
logger.info(f'({process}) - Download history data for "{pair}", {timeframe}, '
|
||||
f'{candle_type} and store in {datadir}. '
|
||||
f'From {format_ms_time(since_ms) if since_ms else "start"} to '
|
||||
f'{format_ms_time(until_ms) if until_ms else "now"}'
|
||||
)
|
||||
|
||||
logger.debug("Current Start: %s",
|
||||
f"{data.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
|
||||
@@ -225,6 +240,7 @@ def _download_pair_history(pair: str, *,
|
||||
days=-new_pairs_days).int_timestamp * 1000,
|
||||
is_new_pair=data.empty,
|
||||
candle_type=candle_type,
|
||||
until_ms=until_ms if until_ms else None
|
||||
)
|
||||
# TODO: Maybe move parsing to exchange class (?)
|
||||
new_dataframe = ohlcv_to_dataframe(new_data, timeframe, pair,
|
||||
@@ -257,6 +273,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
|
||||
timerange: Optional[TimeRange] = None,
|
||||
new_pairs_days: int = 30, erase: bool = False,
|
||||
data_format: str = None,
|
||||
prepend: bool = False,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Refresh stored ohlcv data for backtesting and hyperopt operations.
|
||||
@@ -266,6 +283,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
|
||||
pairs_not_available = []
|
||||
data_handler = get_datahandler(datadir, data_format)
|
||||
candle_type = CandleType.get_default(trading_mode)
|
||||
process = ''
|
||||
for idx, pair in enumerate(pairs, start=1):
|
||||
if pair not in exchange.markets:
|
||||
pairs_not_available.append(pair)
|
||||
@@ -280,12 +298,12 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
|
||||
timerange=timerange, data_handler=data_handler,
|
||||
timeframe=str(timeframe), new_pairs_days=new_pairs_days,
|
||||
candle_type=candle_type,
|
||||
erase=erase)
|
||||
erase=erase, prepend=prepend)
|
||||
if trading_mode == 'futures':
|
||||
# Predefined candletype (and timeframe) depending on exchange
|
||||
# Downloads what is necessary to backtest based on futures data.
|
||||
tf_mark = exchange._ft_has['mark_ohlcv_timeframe']
|
||||
fr_candle_type = CandleType.from_string(exchange._ft_has['mark_ohlcv_price'])
|
||||
tf_mark = exchange.get_option('mark_ohlcv_timeframe')
|
||||
fr_candle_type = CandleType.from_string(exchange.get_option('mark_ohlcv_price'))
|
||||
# All exchanges need FundingRate for futures trading.
|
||||
# The timeframe is aligned to the mark-price timeframe.
|
||||
for funding_candle_type in (CandleType.FUNDING_RATE, fr_candle_type):
|
||||
@@ -294,7 +312,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
|
||||
timerange=timerange, data_handler=data_handler,
|
||||
timeframe=str(tf_mark), new_pairs_days=new_pairs_days,
|
||||
candle_type=funding_candle_type,
|
||||
erase=erase)
|
||||
erase=erase, prepend=prepend)
|
||||
|
||||
return pairs_not_available
|
||||
|
||||
@@ -312,12 +330,12 @@ def _download_trades_history(exchange: Exchange,
|
||||
try:
|
||||
|
||||
until = None
|
||||
if (timerange and timerange.starttype == 'date'):
|
||||
since = timerange.startts * 1000
|
||||
since = 0
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
since = timerange.startts * 1000
|
||||
if timerange.stoptype == 'date':
|
||||
until = timerange.stopts * 1000
|
||||
else:
|
||||
since = arrow.utcnow().shift(days=-new_pairs_days).int_timestamp * 1000
|
||||
|
||||
trades = data_handler.trades_load(pair)
|
||||
|
||||
@@ -330,6 +348,9 @@ def _download_trades_history(exchange: Exchange,
|
||||
logger.info(f"Start earlier than available data. Redownloading trades for {pair}...")
|
||||
trades = []
|
||||
|
||||
if not since:
|
||||
since = arrow.utcnow().shift(days=-new_pairs_days).int_timestamp * 1000
|
||||
|
||||
from_id = trades[-1][1] if trades else None
|
||||
if trades and since < trades[-1][0]:
|
||||
# Reset since to the last available point
|
||||
|
@@ -5,11 +5,11 @@ It's subclasses handle and storing data from disk.
|
||||
"""
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC, abstractclassmethod, abstractmethod
|
||||
from abc import ABC, abstractmethod
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Type
|
||||
from typing import List, Optional, Tuple, Type
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
@@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class IDataHandler(ABC):
|
||||
|
||||
_OHLCV_REGEX = r'^([a-zA-Z_-]+)\-(\d+\S)\-?([a-zA-Z_]*)?(?=\.)'
|
||||
_OHLCV_REGEX = r'^([a-zA-Z_-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)'
|
||||
|
||||
def __init__(self, datadir: Path) -> None:
|
||||
self._datadir = datadir
|
||||
@@ -38,17 +38,30 @@ class IDataHandler(ABC):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractclassmethod
|
||||
@classmethod
|
||||
def ohlcv_get_available_data(
|
||||
cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes:
|
||||
"""
|
||||
Returns a list of all pairs with ohlcv data available in this datadir
|
||||
:param datadir: Directory to search for ohlcv files
|
||||
:param trading_mode: trading-mode to be used
|
||||
:return: List of Tuples of (pair, timeframe)
|
||||
:return: List of Tuples of (pair, timeframe, CandleType)
|
||||
"""
|
||||
if trading_mode == TradingMode.FUTURES:
|
||||
datadir = datadir.joinpath('futures')
|
||||
_tmp = [
|
||||
re.search(
|
||||
cls._OHLCV_REGEX, p.name
|
||||
) for p in datadir.glob(f"*.{cls._get_file_extension()}")]
|
||||
return [
|
||||
(
|
||||
cls.rebuild_pair_from_filename(match[1]),
|
||||
cls.rebuild_timeframe_from_filename(match[2]),
|
||||
CandleType.from_string(match[3])
|
||||
) for match in _tmp if match and len(match.groups()) > 1]
|
||||
|
||||
@abstractclassmethod
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str, candle_type: CandleType) -> List[str]:
|
||||
"""
|
||||
Returns a list of all pairs with ohlcv data available in this datadir
|
||||
@@ -71,6 +84,18 @@ class IDataHandler(ABC):
|
||||
:return: None
|
||||
"""
|
||||
|
||||
def ohlcv_data_min_max(self, pair: str, timeframe: str,
|
||||
candle_type: CandleType) -> Tuple[datetime, datetime]:
|
||||
"""
|
||||
Returns the min and max timestamp for the given pair and timeframe.
|
||||
:param pair: Pair to get min/max for
|
||||
:param timeframe: Timeframe to get min/max for
|
||||
:param candle_type: Any of the enum CandleType (must match trading mode!)
|
||||
:return: (min, max)
|
||||
"""
|
||||
data = self._ohlcv_load(pair, timeframe, None, candle_type)
|
||||
return data.iloc[0]['date'].to_pydatetime(), data.iloc[-1]['date'].to_pydatetime()
|
||||
|
||||
@abstractmethod
|
||||
def _ohlcv_load(self, pair: str, timeframe: str, timerange: Optional[TimeRange],
|
||||
candle_type: CandleType
|
||||
@@ -118,7 +143,8 @@ class IDataHandler(ABC):
|
||||
:param candle_type: Any of the enum CandleType (must match trading mode!)
|
||||
"""
|
||||
|
||||
@abstractclassmethod
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def trades_get_pairs(cls, datadir: Path) -> List[str]:
|
||||
"""
|
||||
Returns a list of all pairs for which trade data is available in this
|
||||
@@ -190,10 +216,14 @@ class IDataHandler(ABC):
|
||||
datadir: Path,
|
||||
pair: str,
|
||||
timeframe: str,
|
||||
candle_type: CandleType
|
||||
candle_type: CandleType,
|
||||
no_timeframe_modify: bool = False
|
||||
) -> Path:
|
||||
pair_s = misc.pair_to_filename(pair)
|
||||
candle = ""
|
||||
if not no_timeframe_modify:
|
||||
timeframe = cls.timeframe_to_file(timeframe)
|
||||
|
||||
if candle_type != CandleType.SPOT:
|
||||
datadir = datadir.joinpath('futures')
|
||||
candle = f"-{candle_type}"
|
||||
@@ -207,6 +237,18 @@ class IDataHandler(ABC):
|
||||
filename = datadir.joinpath(f'{pair_s}-trades.{cls._get_file_extension()}')
|
||||
return filename
|
||||
|
||||
@staticmethod
|
||||
def timeframe_to_file(timeframe: str):
|
||||
return timeframe.replace('M', 'Mo')
|
||||
|
||||
@staticmethod
|
||||
def rebuild_timeframe_from_filename(timeframe: str) -> str:
|
||||
"""
|
||||
converts timeframe from disk to file
|
||||
Replaces mo with M (to avoid problems on case-insensitive filesystems)
|
||||
"""
|
||||
return re.sub('1mo', '1M', timeframe, flags=re.IGNORECASE)
|
||||
|
||||
@staticmethod
|
||||
def rebuild_pair_from_filename(pair: str) -> str:
|
||||
"""
|
||||
|
@@ -8,9 +8,9 @@ from pandas import DataFrame, read_json, to_datetime
|
||||
|
||||
from freqtrade import misc
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, ListPairsWithTimeframes, TradeList
|
||||
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, TradeList
|
||||
from freqtrade.data.converter import trades_dict_to_list
|
||||
from freqtrade.enums import CandleType, TradingMode
|
||||
from freqtrade.enums import CandleType
|
||||
|
||||
from .idatahandler import IDataHandler
|
||||
|
||||
@@ -23,28 +23,6 @@ class JsonDataHandler(IDataHandler):
|
||||
_use_zip = False
|
||||
_columns = DEFAULT_DATAFRAME_COLUMNS
|
||||
|
||||
@classmethod
|
||||
def ohlcv_get_available_data(
|
||||
cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes:
|
||||
"""
|
||||
Returns a list of all pairs with ohlcv data available in this datadir
|
||||
:param datadir: Directory to search for ohlcv files
|
||||
:param trading_mode: trading-mode to be used
|
||||
:return: List of Tuples of (pair, timeframe)
|
||||
"""
|
||||
if trading_mode == 'futures':
|
||||
datadir = datadir.joinpath('futures')
|
||||
_tmp = [
|
||||
re.search(
|
||||
cls._OHLCV_REGEX, p.name
|
||||
) for p in datadir.glob(f"*.{cls._get_file_extension()}")]
|
||||
return [
|
||||
(
|
||||
cls.rebuild_pair_from_filename(match[1]),
|
||||
match[2],
|
||||
CandleType.from_string(match[3])
|
||||
) for match in _tmp if match and len(match.groups()) > 1]
|
||||
|
||||
@classmethod
|
||||
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str, candle_type: CandleType) -> List[str]:
|
||||
"""
|
||||
@@ -103,9 +81,14 @@ class JsonDataHandler(IDataHandler):
|
||||
:param candle_type: Any of the enum CandleType (must match trading mode!)
|
||||
:return: DataFrame with ohlcv data, or empty DataFrame
|
||||
"""
|
||||
filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type)
|
||||
filename = self._pair_data_filename(
|
||||
self._datadir, pair, timeframe, candle_type=candle_type)
|
||||
if not filename.exists():
|
||||
return DataFrame(columns=self._columns)
|
||||
# Fallback mode for 1M files
|
||||
filename = self._pair_data_filename(
|
||||
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
|
||||
if not filename.exists():
|
||||
return DataFrame(columns=self._columns)
|
||||
try:
|
||||
pairdata = read_json(filename, orient='values')
|
||||
pairdata.columns = self._columns
|
||||
|
@@ -72,18 +72,28 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
|
||||
return df
|
||||
|
||||
|
||||
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str
|
||||
) -> pd.DataFrame:
|
||||
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str,
|
||||
starting_balance: float) -> pd.DataFrame:
|
||||
max_drawdown_df = pd.DataFrame()
|
||||
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
|
||||
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
|
||||
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
|
||||
max_drawdown_df['date'] = profit_results.loc[:, date_col]
|
||||
if starting_balance:
|
||||
cumulative_balance = starting_balance + max_drawdown_df['cumulative']
|
||||
max_balance = starting_balance + max_drawdown_df['high_value']
|
||||
max_drawdown_df['drawdown_relative'] = ((max_balance - cumulative_balance) / max_balance)
|
||||
else:
|
||||
# NOTE: This is not completely accurate,
|
||||
# but might good enough if starting_balance is not available
|
||||
max_drawdown_df['drawdown_relative'] = (
|
||||
(max_drawdown_df['high_value'] - max_drawdown_df['cumulative'])
|
||||
/ max_drawdown_df['high_value'])
|
||||
return max_drawdown_df
|
||||
|
||||
|
||||
def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||
value_col: str = 'profit_ratio'
|
||||
value_col: str = 'profit_ratio', starting_balance: float = 0.0
|
||||
):
|
||||
"""
|
||||
Calculate max drawdown and the corresponding close dates
|
||||
@@ -97,13 +107,18 @@ def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||
if len(trades) == 0:
|
||||
raise ValueError("Trade dataframe empty.")
|
||||
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||
max_drawdown_df = _calc_drawdown_series(
|
||||
profit_results,
|
||||
date_col=date_col,
|
||||
value_col=value_col,
|
||||
starting_balance=starting_balance)
|
||||
|
||||
return max_drawdown_df
|
||||
|
||||
|
||||
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||
value_col: str = 'profit_abs', starting_balance: float = 0
|
||||
value_col: str = 'profit_abs', starting_balance: float = 0,
|
||||
relative: bool = False
|
||||
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:
|
||||
"""
|
||||
Calculate max drawdown and the corresponding close dates
|
||||
@@ -119,9 +134,15 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date'
|
||||
if len(trades) == 0:
|
||||
raise ValueError("Trade dataframe empty.")
|
||||
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||
max_drawdown_df = _calc_drawdown_series(
|
||||
profit_results,
|
||||
date_col=date_col,
|
||||
value_col=value_col,
|
||||
starting_balance=starting_balance
|
||||
)
|
||||
|
||||
idxmin = max_drawdown_df['drawdown'].idxmin()
|
||||
idxmin = max_drawdown_df['drawdown_relative'].idxmax() if relative \
|
||||
else max_drawdown_df['drawdown'].idxmin()
|
||||
if idxmin == 0:
|
||||
raise ValueError("No losing trade, therefore no drawdown.")
|
||||
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]
|
||||
@@ -129,12 +150,10 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date'
|
||||
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
|
||||
['high_value'].idxmax(), 'cumulative']
|
||||
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
|
||||
max_drawdown_rel = 0.0
|
||||
if high_val + starting_balance != 0:
|
||||
max_drawdown_rel = (high_val - low_val) / (high_val + starting_balance)
|
||||
max_drawdown_rel = max_drawdown_df.loc[idxmin, 'drawdown_relative']
|
||||
|
||||
return (
|
||||
abs(min(max_drawdown_df['drawdown'])),
|
||||
abs(max_drawdown_df.loc[idxmin, 'drawdown']),
|
||||
high_date,
|
||||
low_date,
|
||||
high_val,
|
||||
|
@@ -15,7 +15,7 @@ from freqtrade.constants import DATETIME_PRINT_FORMAT, UNLIMITED_STAKE_AMOUNT
|
||||
from freqtrade.data.history import get_timerange, load_data, refresh_data
|
||||
from freqtrade.enums import CandleType, ExitType, RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange.exchange import timeframe_to_seconds
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||
from freqtrade.strategy.interface import IStrategy
|
||||
|
||||
|
@@ -3,6 +3,7 @@ from freqtrade.enums.backteststate import BacktestState
|
||||
from freqtrade.enums.candletype import CandleType
|
||||
from freqtrade.enums.exitchecktuple import ExitCheckTuple
|
||||
from freqtrade.enums.exittype import ExitType
|
||||
from freqtrade.enums.hyperoptstate import HyperoptState
|
||||
from freqtrade.enums.marginmode import MarginMode
|
||||
from freqtrade.enums.ordertypevalue import OrderTypeValues
|
||||
from freqtrade.enums.rpcmessagetype import RPCMessageType
|
||||
|
@@ -15,3 +15,9 @@ class ExitCheckTuple:
|
||||
@property
|
||||
def exit_flag(self):
|
||||
return self.exit_type != ExitType.NONE
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.exit_type == other.exit_type and self.exit_reason == other.exit_reason
|
||||
|
||||
def __repr__(self):
|
||||
return f"ExitCheckTuple({self.exit_type}, {self.exit_reason})"
|
||||
|
@@ -9,10 +9,12 @@ class ExitType(Enum):
|
||||
STOP_LOSS = "stop_loss"
|
||||
STOPLOSS_ON_EXCHANGE = "stoploss_on_exchange"
|
||||
TRAILING_STOP_LOSS = "trailing_stop_loss"
|
||||
LIQUIDATION = "liquidation"
|
||||
EXIT_SIGNAL = "exit_signal"
|
||||
FORCE_EXIT = "force_exit"
|
||||
EMERGENCY_EXIT = "emergency_exit"
|
||||
CUSTOM_EXIT = "custom_exit"
|
||||
PARTIAL_EXIT = "partial_exit"
|
||||
NONE = ""
|
||||
|
||||
def __str__(self):
|
||||
|
12
freqtrade/enums/hyperoptstate.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class HyperoptState(Enum):
|
||||
""" Hyperopt states """
|
||||
STARTUP = 1
|
||||
DATALOAD = 2
|
||||
INDICATORS = 3
|
||||
OPTIMIZE = 4
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.name.lower()}"
|
@@ -17,6 +17,8 @@ class RPCMessageType(Enum):
|
||||
PROTECTION_TRIGGER = 'protection_trigger'
|
||||
PROTECTION_TRIGGER_GLOBAL = 'protection_trigger_global'
|
||||
|
||||
STRATEGY_MSG = 'strategy_msg'
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
||||
|
@@ -9,12 +9,14 @@ from freqtrade.exchange.bitpanda import Bitpanda
|
||||
from freqtrade.exchange.bittrex import Bittrex
|
||||
from freqtrade.exchange.bybit import Bybit
|
||||
from freqtrade.exchange.coinbasepro import Coinbasepro
|
||||
from freqtrade.exchange.exchange import (available_exchanges, ccxt_exchanges,
|
||||
from freqtrade.exchange.exchange import (amount_to_contract_precision, amount_to_contracts,
|
||||
amount_to_precision, available_exchanges, ccxt_exchanges,
|
||||
contracts_to_amount, date_minus_candles,
|
||||
is_exchange_known_ccxt, is_exchange_officially_supported,
|
||||
market_is_active, timeframe_to_minutes, timeframe_to_msecs,
|
||||
timeframe_to_next_date, timeframe_to_prev_date,
|
||||
timeframe_to_seconds, validate_exchange,
|
||||
validate_exchanges)
|
||||
market_is_active, price_to_precision, timeframe_to_minutes,
|
||||
timeframe_to_msecs, timeframe_to_next_date,
|
||||
timeframe_to_prev_date, timeframe_to_seconds,
|
||||
validate_exchange, validate_exchanges)
|
||||
from freqtrade.exchange.ftx import Ftx
|
||||
from freqtrade.exchange.gateio import Gateio
|
||||
from freqtrade.exchange.hitbtc import Hitbtc
|
||||
|
@@ -52,12 +52,17 @@ class Binance(Exchange):
|
||||
|
||||
ordertype = 'stop' if self.trading_mode == TradingMode.FUTURES else 'stop_loss_limit'
|
||||
|
||||
return order['type'] == ordertype and (
|
||||
(side == "sell" and stop_loss > float(order['info']['stopPrice'])) or
|
||||
(side == "buy" and stop_loss < float(order['info']['stopPrice']))
|
||||
)
|
||||
return (
|
||||
order.get('stopPrice', None) is None
|
||||
or (
|
||||
order['type'] == ordertype
|
||||
and (
|
||||
(side == "sell" and stop_loss > float(order['stopPrice'])) or
|
||||
(side == "buy" and stop_loss < float(order['stopPrice']))
|
||||
)
|
||||
))
|
||||
|
||||
def get_tickers(self, symbols: List[str] = None, cached: bool = False) -> Dict:
|
||||
def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict:
|
||||
tickers = super().get_tickers(symbols=symbols, cached=cached)
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
# Binance's future result has no bid/ask values.
|
||||
@@ -95,6 +100,7 @@ class Binance(Exchange):
|
||||
async def _async_get_historic_ohlcv(self, pair: str, timeframe: str,
|
||||
since_ms: int, candle_type: CandleType,
|
||||
is_new_pair: bool = False, raise_: bool = False,
|
||||
until_ms: Optional[int] = None
|
||||
) -> Tuple[str, str, str, List]:
|
||||
"""
|
||||
Overwrite to introduce "fast new pair" functionality by detecting the pair's listing date
|
||||
@@ -115,7 +121,8 @@ class Binance(Exchange):
|
||||
since_ms=since_ms,
|
||||
is_new_pair=is_new_pair,
|
||||
raise_=raise_,
|
||||
candle_type=candle_type
|
||||
candle_type=candle_type,
|
||||
until_ms=until_ms,
|
||||
)
|
||||
|
||||
def funding_fee_cutoff(self, open_date: datetime):
|
||||
@@ -130,23 +137,27 @@ class Binance(Exchange):
|
||||
pair: str,
|
||||
open_rate: float, # Entry price of position
|
||||
is_short: bool,
|
||||
position: float, # Absolute value of position size
|
||||
amount: float,
|
||||
stake_amount: float,
|
||||
wallet_balance: float, # Or margin balance
|
||||
mm_ex_1: float = 0.0, # (Binance) Cross only
|
||||
upnl_ex_1: float = 0.0, # (Binance) Cross only
|
||||
) -> Optional[float]:
|
||||
"""
|
||||
Important: Must be fetching data from cached values as this is used by backtesting!
|
||||
MARGIN: https://www.binance.com/en/support/faq/f6b010588e55413aa58b7d63ee0125ed
|
||||
PERPETUAL: https://www.binance.com/en/support/faq/b3c689c1f50a44cabb3a84e663b81d93
|
||||
|
||||
:param exchange_name:
|
||||
:param open_rate: (EP1) Entry price of position
|
||||
:param open_rate: Entry price of position
|
||||
:param is_short: True if the trade is a short, false otherwise
|
||||
:param position: Absolute value of position size (in base currency)
|
||||
:param wallet_balance: (WB)
|
||||
:param amount: Absolute value of position size incl. leverage (in base currency)
|
||||
:param stake_amount: Stake amount - Collateral in settle currency.
|
||||
:param trading_mode: SPOT, MARGIN, FUTURES, etc.
|
||||
:param margin_mode: Either ISOLATED or CROSS
|
||||
:param wallet_balance: Amount of margin_mode in the wallet being used to trade
|
||||
Cross-Margin Mode: crossWalletBalance
|
||||
Isolated-Margin Mode: isolatedWalletBalance
|
||||
:param maintenance_amt:
|
||||
|
||||
# * Only required for Cross
|
||||
:param mm_ex_1: (TMM)
|
||||
@@ -158,12 +169,11 @@ class Binance(Exchange):
|
||||
"""
|
||||
|
||||
side_1 = -1 if is_short else 1
|
||||
position = abs(position)
|
||||
cross_vars = upnl_ex_1 - mm_ex_1 if self.margin_mode == MarginMode.CROSS else 0.0
|
||||
|
||||
# mm_ratio: Binance's formula specifies maintenance margin rate which is mm_ratio * 100%
|
||||
# maintenance_amt: (CUM) Maintenance Amount of position
|
||||
mm_ratio, maintenance_amt = self.get_maintenance_ratio_and_amt(pair, position)
|
||||
mm_ratio, maintenance_amt = self.get_maintenance_ratio_and_amt(pair, stake_amount)
|
||||
|
||||
if (maintenance_amt is None):
|
||||
raise OperationalException(
|
||||
@@ -175,9 +185,9 @@ class Binance(Exchange):
|
||||
return (
|
||||
(
|
||||
(wallet_balance + cross_vars + maintenance_amt) -
|
||||
(side_1 * position * open_rate)
|
||||
(side_1 * amount * open_rate)
|
||||
) / (
|
||||
(position * mm_ratio) - (side_1 * position)
|
||||
(amount * mm_ratio) - (side_1 * amount)
|
||||
)
|
||||
)
|
||||
else:
|
||||
|
@@ -29,3 +29,17 @@ class Bybit(Exchange):
|
||||
# (TradingMode.FUTURES, MarginMode.CROSS),
|
||||
# (TradingMode.FUTURES, MarginMode.ISOLATED)
|
||||
]
|
||||
|
||||
@property
|
||||
def _ccxt_config(self) -> Dict:
|
||||
# Parameters to add directly to ccxt sync/async initialization.
|
||||
# ccxt defaults to swap mode.
|
||||
config = {}
|
||||
if self.trading_mode == TradingMode.SPOT:
|
||||
config.update({
|
||||
"options": {
|
||||
"defaultType": "spot"
|
||||
}
|
||||
})
|
||||
config.update(super()._ccxt_config)
|
||||
return config
|
||||
|
@@ -2,6 +2,7 @@ import asyncio
|
||||
import logging
|
||||
import time
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Optional, TypeVar, cast, overload
|
||||
|
||||
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
|
||||
from freqtrade.mixins import LoggingMixin
|
||||
@@ -11,6 +12,14 @@ logger = logging.getLogger(__name__)
|
||||
__logging_mixin = None
|
||||
|
||||
|
||||
def _reset_logging_mixin():
|
||||
"""
|
||||
Reset global logging mixin - used in tests only.
|
||||
"""
|
||||
global __logging_mixin
|
||||
__logging_mixin = LoggingMixin(logger)
|
||||
|
||||
|
||||
def _get_logging_mixin():
|
||||
# Logging-mixin to cache kucoin responses
|
||||
# Only to be used in retrier
|
||||
@@ -37,6 +46,7 @@ MAP_EXCHANGE_CHILDCLASS = {
|
||||
'binanceje': 'binance',
|
||||
'binanceusdm': 'binance',
|
||||
'okex': 'okx',
|
||||
'gate': 'gateio',
|
||||
}
|
||||
|
||||
SUPPORTED_EXCHANGES = [
|
||||
@@ -54,17 +64,16 @@ EXCHANGE_HAS_REQUIRED = [
|
||||
'fetchOrder',
|
||||
'cancelOrder',
|
||||
'createOrder',
|
||||
# 'createLimitOrder', 'createMarketOrder',
|
||||
'fetchBalance',
|
||||
|
||||
# Public endpoints
|
||||
'loadMarkets',
|
||||
'fetchOHLCV',
|
||||
]
|
||||
|
||||
EXCHANGE_HAS_OPTIONAL = [
|
||||
# Private
|
||||
'fetchMyTrades', # Trades for order - fee detection
|
||||
'createLimitOrder', 'createMarketOrder', # Either OR for orders
|
||||
# 'setLeverage', # Margin/Futures trading
|
||||
# 'setMarginMode', # Margin/Futures trading
|
||||
# 'fetchFundingHistory', # Futures trading
|
||||
@@ -133,8 +142,22 @@ def retrier_async(f):
|
||||
return wrapper
|
||||
|
||||
|
||||
def retrier(_func=None, retries=API_RETRY_COUNT):
|
||||
def decorator(f):
|
||||
F = TypeVar('F', bound=Callable[..., Any])
|
||||
|
||||
|
||||
# Type shenanigans
|
||||
@overload
|
||||
def retrier(_func: F) -> F:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def retrier(*, retries=API_RETRY_COUNT) -> Callable[[F], F]:
|
||||
...
|
||||
|
||||
|
||||
def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT):
|
||||
def decorator(f: F) -> F:
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
count = kwargs.pop('count', retries)
|
||||
@@ -155,7 +178,7 @@ def retrier(_func=None, retries=API_RETRY_COUNT):
|
||||
else:
|
||||
logger.warning(msg + 'Giving up.')
|
||||
raise ex
|
||||
return wrapper
|
||||
return cast(F, wrapper)
|
||||
# Support both @retrier and @retrier(retries=2) syntax
|
||||
if _func is None:
|
||||
return decorator
|
||||
|
@@ -1,9 +1,10 @@
|
||||
""" FTX exchange subclass """
|
||||
import logging
|
||||
from typing import Any, Dict, List, Tuple
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import ccxt
|
||||
|
||||
from freqtrade.constants import BuySell
|
||||
from freqtrade.enums import MarginMode, TradingMode
|
||||
from freqtrade.exceptions import (DDosProtection, InsufficientFundsError, InvalidOrderException,
|
||||
OperationalException, TemporaryError)
|
||||
@@ -44,7 +45,7 @@ class Ftx(Exchange):
|
||||
|
||||
@retrier(retries=0)
|
||||
def stoploss(self, pair: str, amount: float, stop_price: float,
|
||||
order_types: Dict, side: str, leverage: float) -> Dict:
|
||||
order_types: Dict, side: BuySell, leverage: float) -> Dict:
|
||||
"""
|
||||
Creates a stoploss order.
|
||||
depending on order_types.stoploss configuration, uses 'market' or limit order.
|
||||
@@ -103,7 +104,7 @@ class Ftx(Exchange):
|
||||
raise OperationalException(e) from e
|
||||
|
||||
@retrier(retries=API_FETCH_ORDER_RETRY_COUNT)
|
||||
def fetch_stoploss_order(self, order_id: str, pair: str) -> Dict:
|
||||
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
||||
if self._config['dry_run']:
|
||||
return self.fetch_dry_run_order(order_id)
|
||||
|
||||
@@ -115,9 +116,17 @@ class Ftx(Exchange):
|
||||
if len(order) == 1:
|
||||
if order[0].get('status') == 'closed':
|
||||
# Trigger order was triggered ...
|
||||
real_order_id = order[0].get('info', {}).get('orderId')
|
||||
real_order_id: Optional[str] = order[0].get('info', {}).get('orderId')
|
||||
# OrderId may be None for stoploss-market orders
|
||||
# But contains "average" in these cases.
|
||||
# So we need to get it through the endpoint
|
||||
# /conditional_orders/{conditional_order_id}/triggers
|
||||
if not real_order_id:
|
||||
res = self._api.privateGetConditionalOrdersConditionalOrderIdTriggers(
|
||||
params={'conditional_order_id': order_id})
|
||||
self._log_exchange_response('fetch_stoploss_order2', res)
|
||||
real_order_id = res['result'][0]['orderId'] if res.get(
|
||||
'result', []) else None
|
||||
|
||||
if real_order_id:
|
||||
order1 = self._api.fetch_order(real_order_id, pair)
|
||||
self._log_exchange_response('fetch_stoploss_order1', order1)
|
||||
@@ -144,7 +153,7 @@ class Ftx(Exchange):
|
||||
raise OperationalException(e) from e
|
||||
|
||||
@retrier
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str) -> Dict:
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
||||
if self._config['dry_run']:
|
||||
return {}
|
||||
try:
|
||||
|
@@ -1,11 +1,13 @@
|
||||
""" Gate.io exchange subclass """
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from freqtrade.constants import BuySell
|
||||
from freqtrade.enums import MarginMode, TradingMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import Exchange
|
||||
from freqtrade.misc import safe_value_fallback2
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -23,13 +25,16 @@ class Gateio(Exchange):
|
||||
|
||||
_ft_has: Dict = {
|
||||
"ohlcv_candle_limit": 1000,
|
||||
"ohlcv_volume_currency": "quote",
|
||||
"time_in_force_parameter": "timeInForce",
|
||||
"order_time_in_force": ['gtc', 'ioc'],
|
||||
"stoploss_order_types": {"limit": "limit"},
|
||||
"stoploss_on_exchange": True,
|
||||
}
|
||||
|
||||
_ft_has_futures: Dict = {
|
||||
"needs_trading_fees": True
|
||||
"needs_trading_fees": True,
|
||||
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
|
||||
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
|
||||
}
|
||||
|
||||
_supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [
|
||||
@@ -40,13 +45,33 @@ class Gateio(Exchange):
|
||||
]
|
||||
|
||||
def validate_ordertypes(self, order_types: Dict) -> None:
|
||||
super().validate_ordertypes(order_types)
|
||||
|
||||
if self.trading_mode != TradingMode.FUTURES:
|
||||
if any(v == 'market' for k, v in order_types.items()):
|
||||
raise OperationalException(
|
||||
f'Exchange {self.name} does not support market orders.')
|
||||
|
||||
def _get_params(
|
||||
self,
|
||||
side: BuySell,
|
||||
ordertype: str,
|
||||
leverage: float,
|
||||
reduceOnly: bool,
|
||||
time_in_force: str = 'gtc',
|
||||
) -> Dict:
|
||||
params = super()._get_params(
|
||||
side=side,
|
||||
ordertype=ordertype,
|
||||
leverage=leverage,
|
||||
reduceOnly=reduceOnly,
|
||||
time_in_force=time_in_force,
|
||||
)
|
||||
if ordertype == 'market' and self.trading_mode == TradingMode.FUTURES:
|
||||
params['type'] = 'market'
|
||||
param = self._ft_has.get('time_in_force_parameter', '')
|
||||
params.update({param: 'ioc'})
|
||||
return params
|
||||
|
||||
def get_trades_for_order(self, order_id: str, pair: str, since: datetime,
|
||||
params: Optional[Dict] = None) -> List:
|
||||
trades = super().get_trades_for_order(order_id, pair, since, params)
|
||||
@@ -61,7 +86,8 @@ class Gateio(Exchange):
|
||||
pair_fees = self._trading_fees.get(pair, {})
|
||||
if pair_fees:
|
||||
for idx, trade in enumerate(trades):
|
||||
if trade.get('fee', {}).get('cost') is None:
|
||||
fee = trade.get('fee', {})
|
||||
if fee and fee.get('cost') is None:
|
||||
takerOrMaker = trade.get('takerOrMaker', 'taker')
|
||||
if pair_fees.get(takerOrMaker) is not None:
|
||||
trades[idx]['fee'] = {
|
||||
@@ -71,14 +97,31 @@ class Gateio(Exchange):
|
||||
}
|
||||
return trades
|
||||
|
||||
def fetch_stoploss_order(self, order_id: str, pair: str, params={}) -> Dict:
|
||||
return self.fetch_order(
|
||||
def get_order_id_conditional(self, order: Dict[str, Any]) -> str:
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
return safe_value_fallback2(order, order, 'id_stop', 'id')
|
||||
return order['id']
|
||||
|
||||
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
||||
order = self.fetch_order(
|
||||
order_id=order_id,
|
||||
pair=pair,
|
||||
params={'stop': True}
|
||||
)
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
if order['status'] == 'closed':
|
||||
# Places a real order - which we need to fetch explicitly.
|
||||
new_orderid = order.get('info', {}).get('trade_id')
|
||||
if new_orderid:
|
||||
order1 = self.fetch_order(order_id=new_orderid, pair=pair, params=params)
|
||||
order1['id_stop'] = order1['id']
|
||||
order1['id'] = order_id
|
||||
order1['stopPrice'] = order.get('stopPrice')
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params={}) -> Dict:
|
||||
return order1
|
||||
return order
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
||||
return self.cancel_order(
|
||||
order_id=order_id,
|
||||
pair=pair,
|
||||
@@ -90,5 +133,7 @@ class Gateio(Exchange):
|
||||
Verify stop_loss against stoploss-order value (limit or price)
|
||||
Returns True if adjustment is necessary.
|
||||
"""
|
||||
return ((side == "sell" and stop_loss > float(order['stopPrice'])) or
|
||||
(side == "buy" and stop_loss < float(order['stopPrice'])))
|
||||
return (order.get('stopPrice', None) is None or (
|
||||
side == "sell" and stop_loss > float(order['stopPrice'])) or
|
||||
(side == "buy" and stop_loss < float(order['stopPrice']))
|
||||
)
|
||||
|
@@ -27,7 +27,13 @@ class Huobi(Exchange):
|
||||
Verify stop_loss against stoploss-order value (limit or price)
|
||||
Returns True if adjustment is necessary.
|
||||
"""
|
||||
return order['type'] == 'stop' and stop_loss > float(order['stopPrice'])
|
||||
return (
|
||||
order.get('stopPrice', None) is None
|
||||
or (
|
||||
order['type'] == 'stop'
|
||||
and stop_loss > float(order['stopPrice'])
|
||||
)
|
||||
)
|
||||
|
||||
def _get_stop_params(self, ordertype: str, stop_price: float) -> Dict:
|
||||
|
||||
|
@@ -6,6 +6,7 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
import ccxt
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.constants import BuySell
|
||||
from freqtrade.enums import MarginMode, TradingMode
|
||||
from freqtrade.exceptions import (DDosProtection, InsufficientFundsError, InvalidOrderException,
|
||||
OperationalException, TemporaryError)
|
||||
@@ -22,6 +23,7 @@ class Kraken(Exchange):
|
||||
_ft_has: Dict = {
|
||||
"stoploss_on_exchange": True,
|
||||
"ohlcv_candle_limit": 720,
|
||||
"ohlcv_has_history": False,
|
||||
"trades_pagination": "id",
|
||||
"trades_pagination_arg": "since",
|
||||
"mark_ohlcv_timeframe": "4h",
|
||||
@@ -43,7 +45,7 @@ class Kraken(Exchange):
|
||||
return (parent_check and
|
||||
market.get('darkpool', False) is False)
|
||||
|
||||
def get_tickers(self, symbols: List[str] = None, cached: bool = False) -> Dict:
|
||||
def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict:
|
||||
# Only fetch tickers for current stake currency
|
||||
# Otherwise the request for kraken becomes too large.
|
||||
symbols = list(self.get_markets(quote_currencies=[self._config['stake_currency']]))
|
||||
@@ -95,7 +97,7 @@ class Kraken(Exchange):
|
||||
|
||||
@retrier(retries=0)
|
||||
def stoploss(self, pair: str, amount: float, stop_price: float,
|
||||
order_types: Dict, side: str, leverage: float) -> Dict:
|
||||
order_types: Dict, side: BuySell, leverage: float) -> Dict:
|
||||
"""
|
||||
Creates a stoploss market order.
|
||||
Stoploss market orders is the only stoploss type supported by kraken.
|
||||
@@ -165,12 +167,14 @@ class Kraken(Exchange):
|
||||
|
||||
def _get_params(
|
||||
self,
|
||||
side: BuySell,
|
||||
ordertype: str,
|
||||
leverage: float,
|
||||
reduceOnly: bool,
|
||||
time_in_force: str = 'gtc'
|
||||
) -> Dict:
|
||||
params = super()._get_params(
|
||||
side=side,
|
||||
ordertype=ordertype,
|
||||
leverage=leverage,
|
||||
reduceOnly=reduceOnly,
|
||||
|
@@ -33,7 +33,10 @@ class Kucoin(Exchange):
|
||||
Verify stop_loss against stoploss-order value (limit or price)
|
||||
Returns True if adjustment is necessary.
|
||||
"""
|
||||
return order['info'].get('stop') is not None and stop_loss > float(order['stopPrice'])
|
||||
return (
|
||||
order.get('stopPrice', None) is None
|
||||
or stop_loss > float(order['stopPrice'])
|
||||
)
|
||||
|
||||
def _get_stop_params(self, ordertype: str, stop_price: float) -> Dict:
|
||||
|
||||
|
@@ -1,11 +1,13 @@
|
||||
import logging
|
||||
from typing import Dict, List, Tuple
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import ccxt
|
||||
|
||||
from freqtrade.constants import BuySell
|
||||
from freqtrade.enums import MarginMode, TradingMode
|
||||
from freqtrade.enums.candletype import CandleType
|
||||
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
|
||||
from freqtrade.exchange import Exchange
|
||||
from freqtrade.exchange import Exchange, date_minus_candles
|
||||
from freqtrade.exchange.common import retrier
|
||||
|
||||
|
||||
@@ -19,12 +21,13 @@ class Okx(Exchange):
|
||||
"""
|
||||
|
||||
_ft_has: Dict = {
|
||||
"ohlcv_candle_limit": 300,
|
||||
"ohlcv_candle_limit": 100, # Warning, special case with data prior to X months
|
||||
"mark_ohlcv_timeframe": "4h",
|
||||
"funding_fee_timeframe": "8h",
|
||||
}
|
||||
_ft_has_futures: Dict = {
|
||||
"tickers_have_quoteVolume": False,
|
||||
"fee_cost_in_contracts": True,
|
||||
}
|
||||
|
||||
_supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [
|
||||
@@ -34,14 +37,71 @@ class Okx(Exchange):
|
||||
(TradingMode.FUTURES, MarginMode.ISOLATED),
|
||||
]
|
||||
|
||||
net_only = True
|
||||
|
||||
_ccxt_params: Dict = {'options': {'brokerId': 'ffb5405ad327SUDE'}}
|
||||
|
||||
def ohlcv_candle_limit(
|
||||
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int:
|
||||
"""
|
||||
Exchange ohlcv candle limit
|
||||
OKX has the following behaviour:
|
||||
* 300 candles for uptodate data
|
||||
* 100 candles for historic data
|
||||
* 100 candles for additional candles (not futures or spot).
|
||||
:param timeframe: Timeframe to check
|
||||
:param candle_type: Candle-type
|
||||
:param since_ms: Starting timestamp
|
||||
:return: Candle limit as integer
|
||||
"""
|
||||
if (
|
||||
candle_type in (CandleType.FUTURES, CandleType.SPOT) and
|
||||
(not since_ms or since_ms > (date_minus_candles(timeframe, 300).timestamp() * 1000))
|
||||
):
|
||||
return 300
|
||||
|
||||
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
|
||||
|
||||
@retrier
|
||||
def additional_exchange_init(self) -> None:
|
||||
"""
|
||||
Additional exchange initialization logic.
|
||||
.api will be available at this point.
|
||||
Must be overridden in child methods if required.
|
||||
"""
|
||||
try:
|
||||
if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']:
|
||||
accounts = self._api.fetch_accounts()
|
||||
if len(accounts) > 0:
|
||||
self.net_only = accounts[0].get('info', {}).get('posMode') == 'net_mode'
|
||||
except ccxt.DDoSProtection as e:
|
||||
raise DDosProtection(e) from e
|
||||
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
||||
raise TemporaryError(
|
||||
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
|
||||
except ccxt.BaseError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
def _get_posSide(self, side: BuySell, reduceOnly: bool):
|
||||
if self.net_only:
|
||||
return 'net'
|
||||
if not reduceOnly:
|
||||
# Enter
|
||||
return 'long' if side == 'buy' else 'short'
|
||||
else:
|
||||
# Exit
|
||||
return 'long' if side == 'sell' else 'short'
|
||||
|
||||
def _get_params(
|
||||
self,
|
||||
side: BuySell,
|
||||
ordertype: str,
|
||||
leverage: float,
|
||||
reduceOnly: bool,
|
||||
time_in_force: str = 'gtc',
|
||||
) -> Dict:
|
||||
params = super()._get_params(
|
||||
side=side,
|
||||
ordertype=ordertype,
|
||||
leverage=leverage,
|
||||
reduceOnly=reduceOnly,
|
||||
@@ -49,10 +109,11 @@ class Okx(Exchange):
|
||||
)
|
||||
if self.trading_mode == TradingMode.FUTURES and self.margin_mode:
|
||||
params['tdMode'] = self.margin_mode.value
|
||||
params['posSide'] = self._get_posSide(side, reduceOnly)
|
||||
return params
|
||||
|
||||
@retrier
|
||||
def _lev_prep(self, pair: str, leverage: float, side: str):
|
||||
def _lev_prep(self, pair: str, leverage: float, side: BuySell):
|
||||
if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None:
|
||||
try:
|
||||
# TODO-lev: Test me properly (check mgnMode passed)
|
||||
@@ -61,7 +122,7 @@ class Okx(Exchange):
|
||||
symbol=pair,
|
||||
params={
|
||||
"mgnMode": self.margin_mode.value,
|
||||
# "posSide": "net"",
|
||||
"posSide": self._get_posSide(side, False),
|
||||
})
|
||||
except ccxt.DDoSProtection as e:
|
||||
raise DDosProtection(e) from e
|
||||
@@ -85,4 +146,4 @@ class Okx(Exchange):
|
||||
return float('inf')
|
||||
|
||||
pair_tiers = self._leverage_tiers[pair]
|
||||
return pair_tiers[-1]['max'] / leverage
|
||||
return pair_tiers[-1]['maxNotional'] / leverage
|
||||
|
0
freqtrade/freqai/__init__.py
Normal file
608
freqtrade/freqai/data_drawer.py
Normal file
@@ -0,0 +1,608 @@
|
||||
import collections
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import shutil
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Tuple, TypedDict
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import rapidjson
|
||||
from joblib import dump, load
|
||||
from joblib.externals import cloudpickle
|
||||
from numpy.typing import NDArray
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.data.history import load_pair_history
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.strategy.interface import IStrategy
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class pair_info(TypedDict):
|
||||
model_filename: str
|
||||
first: bool
|
||||
trained_timestamp: int
|
||||
priority: int
|
||||
data_path: str
|
||||
extras: dict
|
||||
|
||||
|
||||
class FreqaiDataDrawer:
|
||||
"""
|
||||
Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving
|
||||
/loading to/from disk.
|
||||
This object remains persistent throughout live/dry.
|
||||
|
||||
Record of contribution:
|
||||
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
|
||||
project.
|
||||
|
||||
Conception and software development:
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming:
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
|
||||
Beta testing and bug reporting:
|
||||
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
||||
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
|
||||
"""
|
||||
|
||||
def __init__(self, full_path: Path, config: dict, follow_mode: bool = False):
|
||||
|
||||
self.config = config
|
||||
self.freqai_info = config.get("freqai", {})
|
||||
# dictionary holding all pair metadata necessary to load in from disk
|
||||
self.pair_dict: Dict[str, pair_info] = {}
|
||||
# dictionary holding all actively inferenced models in memory given a model filename
|
||||
self.model_dictionary: Dict[str, Any] = {}
|
||||
self.model_return_values: Dict[str, DataFrame] = {}
|
||||
self.historic_data: Dict[str, Dict[str, DataFrame]] = {}
|
||||
self.historic_predictions: Dict[str, DataFrame] = {}
|
||||
self.follower_dict: Dict[str, pair_info] = {}
|
||||
self.full_path = full_path
|
||||
self.follower_name: str = self.config.get("bot_name", "follower1")
|
||||
self.follower_dict_path = Path(
|
||||
self.full_path / f"follower_dictionary-{self.follower_name}.json"
|
||||
)
|
||||
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
||||
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
||||
self.follow_mode = follow_mode
|
||||
if follow_mode:
|
||||
self.create_follower_dict()
|
||||
self.load_drawer_from_disk()
|
||||
self.load_historic_predictions_from_disk()
|
||||
self.training_queue: Dict[str, int] = {}
|
||||
self.history_lock = threading.Lock()
|
||||
self.save_lock = threading.Lock()
|
||||
self.pair_dict_lock = threading.Lock()
|
||||
self.old_DBSCAN_eps: Dict[str, float] = {}
|
||||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
"priority": 1, "first": True, "data_path": "", "extras": {}}
|
||||
|
||||
def load_drawer_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved data drawer full of all pair model metadata in
|
||||
present model folder.
|
||||
:return: bool - whether or not the drawer was located
|
||||
"""
|
||||
exists = self.pair_dictionary_path.is_file()
|
||||
if exists:
|
||||
with open(self.pair_dictionary_path, "r") as fp:
|
||||
self.pair_dict = json.load(fp)
|
||||
elif not self.follow_mode:
|
||||
logger.info("Could not find existing datadrawer, starting from scratch")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Follower could not find pair_dictionary at {self.full_path} "
|
||||
"sending null values back to strategy"
|
||||
)
|
||||
|
||||
return exists
|
||||
|
||||
def load_historic_predictions_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved historic predictions.
|
||||
:return: bool - whether or not the drawer was located
|
||||
"""
|
||||
exists = self.historic_predictions_path.is_file()
|
||||
if exists:
|
||||
with open(self.historic_predictions_path, "rb") as fp:
|
||||
self.historic_predictions = cloudpickle.load(fp)
|
||||
logger.info(
|
||||
f"Found existing historic predictions at {self.full_path}, but beware "
|
||||
"that statistics may be inaccurate if the bot has been offline for "
|
||||
"an extended period of time."
|
||||
)
|
||||
elif not self.follow_mode:
|
||||
logger.info("Could not find existing historic_predictions, starting from scratch")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Follower could not find historic predictions at {self.full_path} "
|
||||
"sending null values back to strategy"
|
||||
)
|
||||
|
||||
return exists
|
||||
|
||||
def save_historic_predictions_to_disk(self):
|
||||
"""
|
||||
Save data drawer full of all pair model metadata in present model folder.
|
||||
"""
|
||||
with open(self.historic_predictions_path, "wb") as fp:
|
||||
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
||||
|
||||
def save_drawer_to_disk(self):
|
||||
"""
|
||||
Save data drawer full of all pair model metadata in present model folder.
|
||||
"""
|
||||
with self.save_lock:
|
||||
with open(self.pair_dictionary_path, 'w') as fp:
|
||||
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def save_follower_dict_to_disk(self):
|
||||
"""
|
||||
Save follower dictionary to disk (used by strategy for persistent prediction targets)
|
||||
"""
|
||||
with open(self.follower_dict_path, "w") as fp:
|
||||
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def create_follower_dict(self):
|
||||
"""
|
||||
Create or dictionary for each follower to maintain unique persistent prediction targets
|
||||
"""
|
||||
|
||||
whitelist_pairs = self.config.get("exchange", {}).get("pair_whitelist")
|
||||
|
||||
exists = self.follower_dict_path.is_file()
|
||||
|
||||
if exists:
|
||||
logger.info("Found an existing follower dictionary")
|
||||
|
||||
for pair in whitelist_pairs:
|
||||
self.follower_dict[pair] = {}
|
||||
|
||||
self.save_follower_dict_to_disk()
|
||||
|
||||
def np_encoder(self, object):
|
||||
if isinstance(object, np.generic):
|
||||
return object.item()
|
||||
|
||||
def get_pair_dict_info(self, pair: str) -> Tuple[str, int, bool]:
|
||||
"""
|
||||
Locate and load existing model metadata from persistent storage. If not located,
|
||||
create a new one and append the current pair to it and prepare it for its first
|
||||
training
|
||||
:param pair: str: pair to lookup
|
||||
:return:
|
||||
model_filename: str = unique filename used for loading persistent objects from disk
|
||||
trained_timestamp: int = the last time the coin was trained
|
||||
return_null_array: bool = Follower could not find pair metadata
|
||||
"""
|
||||
|
||||
pair_dict = self.pair_dict.get(pair)
|
||||
data_path_set = self.pair_dict.get(pair, self.empty_pair_dict).get("data_path", "")
|
||||
return_null_array = False
|
||||
|
||||
if pair_dict:
|
||||
model_filename = pair_dict["model_filename"]
|
||||
trained_timestamp = pair_dict["trained_timestamp"]
|
||||
elif not self.follow_mode:
|
||||
self.pair_dict[pair] = self.empty_pair_dict.copy()
|
||||
model_filename = ""
|
||||
trained_timestamp = 0
|
||||
self.pair_dict[pair]["priority"] = len(self.pair_dict)
|
||||
|
||||
if not data_path_set and self.follow_mode:
|
||||
logger.warning(
|
||||
f"Follower could not find current pair {pair} in "
|
||||
f"pair_dictionary at path {self.full_path}, sending null values "
|
||||
"back to strategy."
|
||||
)
|
||||
trained_timestamp = 0
|
||||
model_filename = ''
|
||||
return_null_array = True
|
||||
|
||||
return model_filename, trained_timestamp, return_null_array
|
||||
|
||||
def set_pair_dict_info(self, metadata: dict) -> None:
|
||||
pair_in_dict = self.pair_dict.get(metadata["pair"])
|
||||
if pair_in_dict:
|
||||
return
|
||||
else:
|
||||
self.pair_dict[metadata["pair"]] = self.empty_pair_dict.copy()
|
||||
self.pair_dict[metadata["pair"]]["priority"] = len(self.pair_dict)
|
||||
|
||||
return
|
||||
|
||||
def pair_to_end_of_training_queue(self, pair: str) -> None:
|
||||
# march all pairs up in the queue
|
||||
with self.pair_dict_lock:
|
||||
for p in self.pair_dict:
|
||||
self.pair_dict[p]["priority"] -= 1
|
||||
# send pair to end of queue
|
||||
self.pair_dict[pair]["priority"] = len(self.pair_dict)
|
||||
|
||||
def set_initial_return_values(self, pair: str, pred_df: DataFrame) -> None:
|
||||
"""
|
||||
Set the initial return values to the historical predictions dataframe. This avoids needing
|
||||
to repredict on historical candles, and also stores historical predictions despite
|
||||
retrainings (so stored predictions are true predictions, not just inferencing on trained
|
||||
data)
|
||||
"""
|
||||
|
||||
hist_df = self.historic_predictions
|
||||
len_diff = len(hist_df[pair].index) - len(pred_df.index)
|
||||
if len_diff < 0:
|
||||
df_concat = pd.concat([pred_df.iloc[:abs(len_diff)], hist_df[pair]],
|
||||
ignore_index=True, keys=hist_df[pair].keys())
|
||||
else:
|
||||
df_concat = hist_df[pair].tail(len(pred_df.index)).reset_index(drop=True)
|
||||
df_concat = df_concat.fillna(0)
|
||||
self.model_return_values[pair] = df_concat
|
||||
|
||||
def append_model_predictions(self, pair: str, predictions: DataFrame,
|
||||
do_preds: NDArray[np.int_],
|
||||
dk: FreqaiDataKitchen, len_df: int) -> None:
|
||||
"""
|
||||
Append model predictions to historic predictions dataframe, then set the
|
||||
strategy return dataframe to the tail of the historic predictions. The length of
|
||||
the tail is equivalent to the length of the dataframe that entered FreqAI from
|
||||
the strategy originally. Doing this allows FreqUI to always display the correct
|
||||
historic predictions.
|
||||
"""
|
||||
|
||||
index = self.historic_predictions[pair].index[-1:]
|
||||
columns = self.historic_predictions[pair].columns
|
||||
|
||||
nan_df = pd.DataFrame(np.nan, index=index, columns=columns)
|
||||
self.historic_predictions[pair] = pd.concat(
|
||||
[self.historic_predictions[pair], nan_df], ignore_index=True, axis=0)
|
||||
df = self.historic_predictions[pair]
|
||||
|
||||
# model outputs and associated statistics
|
||||
for label in predictions.columns:
|
||||
df[label].iloc[-1] = predictions[label].iloc[-1]
|
||||
if df[label].dtype == object:
|
||||
continue
|
||||
df[f"{label}_mean"].iloc[-1] = dk.data["labels_mean"][label]
|
||||
df[f"{label}_std"].iloc[-1] = dk.data["labels_std"][label]
|
||||
|
||||
# outlier indicators
|
||||
df["do_predict"].iloc[-1] = do_preds[-1]
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
df["DI_values"].iloc[-1] = dk.DI_values[-1]
|
||||
|
||||
# extra values the user added within custom prediction model
|
||||
if dk.data['extra_returns_per_train']:
|
||||
rets = dk.data['extra_returns_per_train']
|
||||
for return_str in rets:
|
||||
df[return_str].iloc[-1] = rets[return_str]
|
||||
|
||||
self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True)
|
||||
|
||||
def attach_return_values_to_return_dataframe(
|
||||
self, pair: str, dataframe: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Attach the return values to the strat dataframe
|
||||
:param dataframe: DataFrame = strategy dataframe
|
||||
:return: DataFrame = strat dataframe with return values attached
|
||||
"""
|
||||
df = self.model_return_values[pair]
|
||||
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
||||
dataframe = pd.concat([dataframe[to_keep], df], axis=1)
|
||||
return dataframe
|
||||
|
||||
def return_null_values_to_strategy(self, dataframe: DataFrame, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Build 0 filled dataframe to return to strategy
|
||||
"""
|
||||
|
||||
dk.find_features(dataframe)
|
||||
|
||||
full_labels = dk.label_list + dk.unique_class_list
|
||||
|
||||
for label in full_labels:
|
||||
dataframe[label] = 0
|
||||
dataframe[f"{label}_mean"] = 0
|
||||
dataframe[f"{label}_std"] = 0
|
||||
|
||||
dataframe["do_predict"] = 0
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
dataframe["DI_values"] = 0
|
||||
|
||||
if dk.data['extra_returns_per_train']:
|
||||
rets = dk.data['extra_returns_per_train']
|
||||
for return_str in rets:
|
||||
dataframe[return_str] = 0
|
||||
|
||||
dk.return_dataframe = dataframe
|
||||
|
||||
def purge_old_models(self) -> None:
|
||||
|
||||
model_folders = [x for x in self.full_path.iterdir() if x.is_dir()]
|
||||
|
||||
pattern = re.compile(r"sub-train-(\w+)_(\d{10})")
|
||||
|
||||
delete_dict: Dict[str, Any] = {}
|
||||
|
||||
for dir in model_folders:
|
||||
result = pattern.match(str(dir.name))
|
||||
if result is None:
|
||||
break
|
||||
coin = result.group(1)
|
||||
timestamp = result.group(2)
|
||||
|
||||
if coin not in delete_dict:
|
||||
delete_dict[coin] = {}
|
||||
delete_dict[coin]["num_folders"] = 1
|
||||
delete_dict[coin]["timestamps"] = {int(timestamp): dir}
|
||||
else:
|
||||
delete_dict[coin]["num_folders"] += 1
|
||||
delete_dict[coin]["timestamps"][int(timestamp)] = dir
|
||||
|
||||
for coin in delete_dict:
|
||||
if delete_dict[coin]["num_folders"] > 2:
|
||||
sorted_dict = collections.OrderedDict(
|
||||
sorted(delete_dict[coin]["timestamps"].items())
|
||||
)
|
||||
num_delete = len(sorted_dict) - 2
|
||||
deleted = 0
|
||||
for k, v in sorted_dict.items():
|
||||
if deleted >= num_delete:
|
||||
break
|
||||
logger.info(f"Freqai purging old model file {v}")
|
||||
shutil.rmtree(v)
|
||||
deleted += 1
|
||||
|
||||
def update_follower_metadata(self):
|
||||
# follower needs to load from disk to get any changes made by leader to pair_dict
|
||||
self.load_drawer_from_disk()
|
||||
if self.config.get("freqai", {}).get("purge_old_models", False):
|
||||
self.purge_old_models()
|
||||
|
||||
# Functions pulled back from FreqaiDataKitchen because they relied on DataDrawer
|
||||
|
||||
def save_data(self, model: Any, coin: str, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Saves all data associated with a model for a single sub-train time range
|
||||
:params:
|
||||
:model: User trained model which can be reused for inferencing to generate
|
||||
predictions
|
||||
"""
|
||||
|
||||
if not dk.data_path.is_dir():
|
||||
dk.data_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
save_path = Path(dk.data_path)
|
||||
|
||||
# Save the trained model
|
||||
if not dk.keras:
|
||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||
else:
|
||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||
|
||||
if dk.svm_model is not None:
|
||||
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
||||
dk.data["data_path"] = str(dk.data_path)
|
||||
dk.data["model_filename"] = str(dk.model_filename)
|
||||
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
|
||||
dk.data["label_list"] = dk.label_list
|
||||
# store the metadata
|
||||
with open(save_path / f"{dk.model_filename}_metadata.json", "w") as fp:
|
||||
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
# save the train data to file so we can check preds for area of applicability later
|
||||
dk.data_dictionary["train_features"].to_pickle(
|
||||
save_path / f"{dk.model_filename}_trained_df.pkl"
|
||||
)
|
||||
|
||||
dk.data_dictionary["train_dates"].to_pickle(
|
||||
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
|
||||
)
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
|
||||
cloudpickle.dump(
|
||||
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
||||
)
|
||||
|
||||
# if self.live:
|
||||
self.model_dictionary[coin] = model
|
||||
self.pair_dict[coin]["model_filename"] = dk.model_filename
|
||||
self.pair_dict[coin]["data_path"] = str(dk.data_path)
|
||||
self.save_drawer_to_disk()
|
||||
|
||||
return
|
||||
|
||||
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any:
|
||||
"""
|
||||
loads all data required to make a prediction on a sub-train time range
|
||||
:returns:
|
||||
:model: User trained model which can be inferenced for new predictions
|
||||
"""
|
||||
|
||||
if not self.pair_dict[coin]["model_filename"]:
|
||||
return None
|
||||
|
||||
if dk.live:
|
||||
dk.model_filename = self.pair_dict[coin]["model_filename"]
|
||||
dk.data_path = Path(self.pair_dict[coin]["data_path"])
|
||||
if self.freqai_info.get("follow_mode", False):
|
||||
# follower can be on a different system which is rsynced from the leader:
|
||||
dk.data_path = Path(
|
||||
self.config["user_data_dir"]
|
||||
/ "models"
|
||||
/ dk.data_path.parts[-2]
|
||||
/ dk.data_path.parts[-1]
|
||||
)
|
||||
|
||||
with open(dk.data_path / f"{dk.model_filename}_metadata.json", "r") as fp:
|
||||
dk.data = json.load(fp)
|
||||
dk.training_features_list = dk.data["training_features_list"]
|
||||
dk.label_list = dk.data["label_list"]
|
||||
|
||||
dk.data_dictionary["train_features"] = pd.read_pickle(
|
||||
dk.data_path / f"{dk.model_filename}_trained_df.pkl"
|
||||
)
|
||||
|
||||
# try to access model in memory instead of loading object from disk to save time
|
||||
if dk.live and coin in self.model_dictionary:
|
||||
model = self.model_dictionary[coin]
|
||||
elif not dk.keras:
|
||||
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
||||
else:
|
||||
from tensorflow import keras
|
||||
|
||||
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
||||
|
||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
||||
if not model:
|
||||
raise OperationalException(
|
||||
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
||||
)
|
||||
|
||||
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
||||
dk.pca = cloudpickle.load(
|
||||
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Append new candles to our stores historic data (in memory) so that
|
||||
we do not need to load candle history from disk and we dont need to
|
||||
pinging exchange multiple times for the same candle.
|
||||
:params:
|
||||
dataframe: DataFrame = strategy provided dataframe
|
||||
"""
|
||||
feat_params = self.freqai_info["feature_parameters"]
|
||||
with self.history_lock:
|
||||
history_data = self.historic_data
|
||||
|
||||
for pair in dk.all_pairs:
|
||||
for tf in feat_params.get("include_timeframes"):
|
||||
|
||||
# check if newest candle is already appended
|
||||
df_dp = strategy.dp.get_pair_dataframe(pair, tf)
|
||||
if len(df_dp.index) == 0:
|
||||
continue
|
||||
if str(history_data[pair][tf].iloc[-1]["date"]) == str(
|
||||
df_dp.iloc[-1:]["date"].iloc[-1]
|
||||
):
|
||||
continue
|
||||
|
||||
try:
|
||||
index = (
|
||||
df_dp.loc[
|
||||
df_dp["date"] == history_data[pair][tf].iloc[-1]["date"]
|
||||
].index[0]
|
||||
+ 1
|
||||
)
|
||||
except IndexError:
|
||||
logger.warning(
|
||||
f"Unable to update pair history for {pair}. "
|
||||
"If this does not resolve itself after 1 additional candle, "
|
||||
"please report the error to #freqai discord channel"
|
||||
)
|
||||
return
|
||||
|
||||
history_data[pair][tf] = pd.concat(
|
||||
[
|
||||
history_data[pair][tf],
|
||||
df_dp.iloc[index:],
|
||||
],
|
||||
ignore_index=True,
|
||||
axis=0,
|
||||
)
|
||||
|
||||
def load_all_pair_histories(self, timerange: TimeRange, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Load pair histories for all whitelist and corr_pairlist pairs.
|
||||
Only called once upon startup of bot.
|
||||
:params:
|
||||
timerange: TimeRange = full timerange required to populate all indicators
|
||||
for training according to user defined train_period_days
|
||||
"""
|
||||
history_data = self.historic_data
|
||||
|
||||
for pair in dk.all_pairs:
|
||||
if pair not in history_data:
|
||||
history_data[pair] = {}
|
||||
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
||||
history_data[pair][tf] = load_pair_history(
|
||||
datadir=self.config["datadir"],
|
||||
timeframe=tf,
|
||||
pair=pair,
|
||||
timerange=timerange,
|
||||
data_format=self.config.get("dataformat_ohlcv", "json"),
|
||||
candle_type=self.config.get("trading_mode", "spot"),
|
||||
)
|
||||
|
||||
def get_base_and_corr_dataframes(
|
||||
self, timerange: TimeRange, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
|
||||
"""
|
||||
Searches through our historic_data in memory and returns the dataframes relevant
|
||||
to the present pair.
|
||||
:params:
|
||||
timerange: TimeRange = full timerange required to populate all indicators
|
||||
for training according to user defined train_period_days
|
||||
metadata: dict = strategy furnished pair metadata
|
||||
"""
|
||||
with self.history_lock:
|
||||
corr_dataframes: Dict[Any, Any] = {}
|
||||
base_dataframes: Dict[Any, Any] = {}
|
||||
historic_data = self.historic_data
|
||||
pairs = self.freqai_info["feature_parameters"].get(
|
||||
"include_corr_pairlist", []
|
||||
)
|
||||
|
||||
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
||||
base_dataframes[tf] = dk.slice_dataframe(timerange, historic_data[pair][tf])
|
||||
if pairs:
|
||||
for p in pairs:
|
||||
if pair in p:
|
||||
continue # dont repeat anything from whitelist
|
||||
if p not in corr_dataframes:
|
||||
corr_dataframes[p] = {}
|
||||
corr_dataframes[p][tf] = dk.slice_dataframe(
|
||||
timerange, historic_data[p][tf]
|
||||
)
|
||||
|
||||
return corr_dataframes, base_dataframes
|
||||
|
||||
# to be used if we want to send predictions directly to the follower instead of forcing
|
||||
# follower to load models and inference
|
||||
# def save_model_return_values_to_disk(self) -> None:
|
||||
# with open(self.full_path / str('model_return_values.json'), "w") as fp:
|
||||
# json.dump(self.model_return_values, fp, default=self.np_encoder)
|
||||
|
||||
# def load_model_return_values_from_disk(self, dk: FreqaiDataKitchen) -> FreqaiDataKitchen:
|
||||
# exists = Path(self.full_path / str('model_return_values.json')).resolve().exists()
|
||||
# if exists:
|
||||
# with open(self.full_path / str('model_return_values.json'), "r") as fp:
|
||||
# self.model_return_values = json.load(fp)
|
||||
# elif not self.follow_mode:
|
||||
# logger.info("Could not find existing datadrawer, starting from scratch")
|
||||
# else:
|
||||
# logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '
|
||||
# 'sending null values back to strategy')
|
||||
|
||||
# return exists, dk
|
1088
freqtrade/freqai/data_kitchen.py
Normal file
684
freqtrade/freqai/freqai_interface.py
Normal file
@@ -0,0 +1,684 @@
|
||||
# import contextlib
|
||||
import datetime
|
||||
import logging
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from numpy.typing import NDArray
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.strategy.interface import IStrategy
|
||||
|
||||
|
||||
pd.options.mode.chained_assignment = None
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def threaded(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class IFreqaiModel(ABC):
|
||||
"""
|
||||
Class containing all tools for training and prediction in the strategy.
|
||||
Base*PredictionModels inherit from this class.
|
||||
|
||||
Record of contribution:
|
||||
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
|
||||
project.
|
||||
|
||||
Conception and software development:
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming:
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
|
||||
Beta testing and bug reporting:
|
||||
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
||||
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
|
||||
"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
self.config = config
|
||||
self.assert_config(self.config)
|
||||
self.freqai_info: Dict[str, Any] = config["freqai"]
|
||||
self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
||||
"data_split_parameters", {})
|
||||
self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
||||
"model_training_parameters", {})
|
||||
self.feature_parameters = config.get("freqai", {}).get("feature_parameters")
|
||||
self.retrain = False
|
||||
self.first = True
|
||||
self.set_full_path()
|
||||
self.follow_mode: bool = self.freqai_info.get("follow_mode", False)
|
||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
||||
self.scanning = False
|
||||
self.keras: bool = self.freqai_info.get("keras", False)
|
||||
if self.keras and self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0):
|
||||
self.freqai_info["feature_parameters"]["DI_threshold"] = 0
|
||||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
||||
self.pair_it = 0
|
||||
self.pair_it_train = 0
|
||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||
self.last_trade_database_summary: DataFrame = {}
|
||||
self.current_trade_database_summary: DataFrame = {}
|
||||
self.analysis_lock = Lock()
|
||||
self.inference_time: float = 0
|
||||
self.train_time: float = 0
|
||||
self.begin_time: float = 0
|
||||
self.begin_time_train: float = 0
|
||||
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
||||
|
||||
def assert_config(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
if not config.get("freqai", {}):
|
||||
raise OperationalException("No freqai parameters found in configuration file.")
|
||||
|
||||
def start(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> DataFrame:
|
||||
"""
|
||||
Entry point to the FreqaiModel from a specific pair, it will train a new model if
|
||||
necessary before making the prediction.
|
||||
|
||||
:param dataframe: Full dataframe coming from strategy - it contains entire
|
||||
backtesting timerange + additional historical data necessary to train
|
||||
the model.
|
||||
:param metadata: pair metadata coming from strategy.
|
||||
:param strategy: Strategy to train on
|
||||
"""
|
||||
|
||||
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
||||
self.dd.set_pair_dict_info(metadata)
|
||||
|
||||
if self.live:
|
||||
self.inference_timer('start')
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
dk = self.start_live(dataframe, metadata, strategy, self.dk)
|
||||
|
||||
# For backtesting, each pair enters and then gets trained for each window along the
|
||||
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
|
||||
# (backtest window, i.e. window immediately following the training window).
|
||||
# FreqAI slides the window and sequentially builds the backtesting results before returning
|
||||
# the concatenated results for the full backtesting period back to the strategy.
|
||||
elif not self.follow_mode:
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
with self.analysis_lock:
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
self.clean_up()
|
||||
if self.live:
|
||||
self.inference_timer('stop')
|
||||
return dataframe
|
||||
|
||||
def clean_up(self):
|
||||
"""
|
||||
Objects that should be handled by GC already between coins, but
|
||||
are explicitly shown here to help demonstrate the non-persistence of these
|
||||
objects.
|
||||
"""
|
||||
self.model = None
|
||||
self.dk = None
|
||||
|
||||
@threaded
|
||||
def start_scanning(self, strategy: IStrategy) -> None:
|
||||
"""
|
||||
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
||||
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
||||
it simply trains on what ever data is available in the self.dd.
|
||||
:param strategy: IStrategy = The user defined strategy class
|
||||
"""
|
||||
while 1:
|
||||
time.sleep(1)
|
||||
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
|
||||
|
||||
(_, trained_timestamp, _) = self.dd.get_pair_dict_info(pair)
|
||||
|
||||
if self.dd.pair_dict[pair]["priority"] != 1:
|
||||
continue
|
||||
dk = FreqaiDataKitchen(self.config, self.live, pair)
|
||||
dk.set_paths(pair, trained_timestamp)
|
||||
(
|
||||
retrain,
|
||||
new_trained_timerange,
|
||||
data_load_timerange,
|
||||
) = dk.check_if_new_training_required(trained_timestamp)
|
||||
dk.set_paths(pair, new_trained_timerange.stopts)
|
||||
|
||||
if retrain:
|
||||
self.train_timer('start')
|
||||
self.train_model_in_series(
|
||||
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
||||
)
|
||||
self.train_timer('stop')
|
||||
|
||||
self.dd.save_historic_predictions_to_disk()
|
||||
|
||||
def start_backtesting(
|
||||
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
|
||||
) -> FreqaiDataKitchen:
|
||||
"""
|
||||
The main broad execution for backtesting. For backtesting, each pair enters and then gets
|
||||
trained for each window along the sliding window defined by "train_period_days"
|
||||
(training window) and "backtest_period_days" (backtest window, i.e. window immediately
|
||||
following the training window). FreqAI slides the window and sequentially builds
|
||||
the backtesting results before returning the concatenated results for the full
|
||||
backtesting period back to the strategy.
|
||||
:param dataframe: DataFrame = strategy passed dataframe
|
||||
:param metadata: Dict = pair metadata
|
||||
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
:return:
|
||||
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
"""
|
||||
|
||||
self.pair_it += 1
|
||||
train_it = 0
|
||||
# Loop enforcing the sliding window training/backtesting paradigm
|
||||
# tr_train is the training time range e.g. 1 historical month
|
||||
# tr_backtest is the backtesting time range e.g. the week directly
|
||||
# following tr_train. Both of these windows slide through the
|
||||
# entire backtest
|
||||
for tr_train, tr_backtest in zip(dk.training_timeranges, dk.backtesting_timeranges):
|
||||
(_, _, _) = self.dd.get_pair_dict_info(metadata["pair"])
|
||||
train_it += 1
|
||||
total_trains = len(dk.backtesting_timeranges)
|
||||
self.training_timerange = tr_train
|
||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
|
||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||
|
||||
trained_timestamp = tr_train
|
||||
tr_train_startts_str = datetime.datetime.utcfromtimestamp(tr_train.startts).strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
tr_train_stopts_str = datetime.datetime.utcfromtimestamp(tr_train.stopts).strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
logger.info(
|
||||
f"Training {metadata['pair']}, {self.pair_it}/{self.total_pairs} pairs"
|
||||
f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} "
|
||||
"trains"
|
||||
)
|
||||
|
||||
dk.data_path = Path(
|
||||
dk.full_path
|
||||
/
|
||||
f"sub-train-{metadata['pair'].split('/')[0]}_{int(trained_timestamp.stopts)}"
|
||||
)
|
||||
if not self.model_exists(
|
||||
metadata["pair"], dk, trained_timestamp=int(trained_timestamp.stopts)
|
||||
):
|
||||
dk.find_features(dataframe_train)
|
||||
self.model = self.train(dataframe_train, metadata["pair"], dk)
|
||||
self.dd.pair_dict[metadata["pair"]]["trained_timestamp"] = int(
|
||||
trained_timestamp.stopts)
|
||||
dk.set_new_model_names(metadata["pair"], trained_timestamp)
|
||||
self.dd.save_data(self.model, metadata["pair"], dk)
|
||||
else:
|
||||
self.model = self.dd.load_data(metadata["pair"], dk)
|
||||
|
||||
self.check_if_feature_list_matches_strategy(dataframe_train, dk)
|
||||
|
||||
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
||||
|
||||
dk.append_predictions(pred_df, do_preds)
|
||||
|
||||
dk.fill_predictions(dataframe)
|
||||
|
||||
return dk
|
||||
|
||||
def start_live(
|
||||
self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen
|
||||
) -> FreqaiDataKitchen:
|
||||
"""
|
||||
The main broad execution for dry/live. This function will check if a retraining should be
|
||||
performed, and if so, retrain and reset the model.
|
||||
:param dataframe: DataFrame = strategy passed dataframe
|
||||
:param metadata: Dict = pair metadata
|
||||
:param strategy: IStrategy = currently employed strategy
|
||||
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
:returns:
|
||||
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
"""
|
||||
|
||||
# update follower
|
||||
if self.follow_mode:
|
||||
self.dd.update_follower_metadata()
|
||||
|
||||
# get the model metadata associated with the current pair
|
||||
(_, trained_timestamp, return_null_array) = self.dd.get_pair_dict_info(metadata["pair"])
|
||||
|
||||
# if the metadata doesn't exist, the follower returns null arrays to strategy
|
||||
if self.follow_mode and return_null_array:
|
||||
logger.info("Returning null array from follower to strategy")
|
||||
self.dd.return_null_values_to_strategy(dataframe, dk)
|
||||
return dk
|
||||
|
||||
# append the historic data once per round
|
||||
if self.dd.historic_data:
|
||||
self.dd.update_historic_data(strategy, dk)
|
||||
logger.debug(f'Updating historic data on pair {metadata["pair"]}')
|
||||
|
||||
if not self.follow_mode:
|
||||
|
||||
(_, new_trained_timerange, data_load_timerange) = dk.check_if_new_training_required(
|
||||
trained_timestamp
|
||||
)
|
||||
dk.set_paths(metadata["pair"], new_trained_timerange.stopts)
|
||||
|
||||
# download candle history if it is not already in memory
|
||||
if not self.dd.historic_data:
|
||||
logger.info(
|
||||
"Downloading all training data for all pairs in whitelist and "
|
||||
"corr_pairlist, this may take a while if you do not have the "
|
||||
"data saved"
|
||||
)
|
||||
dk.download_all_data_for_training(data_load_timerange, strategy.dp)
|
||||
self.dd.load_all_pair_histories(data_load_timerange, dk)
|
||||
|
||||
if not self.scanning:
|
||||
self.scanning = True
|
||||
self.start_scanning(strategy)
|
||||
|
||||
elif self.follow_mode:
|
||||
dk.set_paths(metadata["pair"], trained_timestamp)
|
||||
logger.info(
|
||||
"FreqAI instance set to follow_mode, finding existing pair "
|
||||
f"using { self.identifier }"
|
||||
)
|
||||
|
||||
# load the model and associated data into the data kitchen
|
||||
self.model = self.dd.load_data(metadata["pair"], dk)
|
||||
|
||||
with self.analysis_lock:
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
|
||||
if not self.model:
|
||||
logger.warning(
|
||||
f"No model ready for {metadata['pair']}, returning null values to strategy."
|
||||
)
|
||||
self.dd.return_null_values_to_strategy(dataframe, dk)
|
||||
return dk
|
||||
|
||||
# ensure user is feeding the correct indicators to the model
|
||||
self.check_if_feature_list_matches_strategy(dataframe, dk)
|
||||
|
||||
self.build_strategy_return_arrays(dataframe, dk, metadata["pair"], trained_timestamp)
|
||||
|
||||
return dk
|
||||
|
||||
def build_strategy_return_arrays(
|
||||
self, dataframe: DataFrame, dk: FreqaiDataKitchen, pair: str, trained_timestamp: int
|
||||
) -> None:
|
||||
|
||||
# hold the historical predictions in memory so we are sending back
|
||||
# correct array to strategy
|
||||
|
||||
if pair not in self.dd.model_return_values:
|
||||
# first predictions are made on entire historical candle set coming from strategy. This
|
||||
# allows FreqUI to show full return values.
|
||||
pred_df, do_preds = self.predict(dataframe, dk)
|
||||
if pair not in self.dd.historic_predictions:
|
||||
self.set_initial_historic_predictions(pred_df, dk, pair)
|
||||
self.dd.set_initial_return_values(pair, pred_df)
|
||||
|
||||
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
||||
return
|
||||
elif self.dk.check_if_model_expired(trained_timestamp):
|
||||
pred_df = DataFrame(np.zeros((2, len(dk.label_list))), columns=dk.label_list)
|
||||
do_preds = np.ones(2, dtype=np.int_) * 2
|
||||
dk.DI_values = np.zeros(2)
|
||||
logger.warning(
|
||||
f"Model expired for {pair}, returning null values to strategy. Strategy "
|
||||
"construction should take care to consider this event with "
|
||||
"prediction == 0 and do_predict == 2"
|
||||
)
|
||||
else:
|
||||
# remaining predictions are made only on the most recent candles for performance and
|
||||
# historical accuracy reasons.
|
||||
pred_df, do_preds = self.predict(dataframe.iloc[-self.CONV_WIDTH:], dk, first=False)
|
||||
|
||||
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
||||
self.fit_live_predictions(dk, pair)
|
||||
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, len(dataframe))
|
||||
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
||||
|
||||
return
|
||||
|
||||
def check_if_feature_list_matches_strategy(
|
||||
self, dataframe: DataFrame, dk: FreqaiDataKitchen
|
||||
) -> None:
|
||||
"""
|
||||
Ensure user is passing the proper feature set if they are reusing an `identifier` pointing
|
||||
to a folder holding existing models.
|
||||
:param dataframe: DataFrame = strategy provided dataframe
|
||||
:param dk: FreqaiDataKitchen = non-persistent data container/analyzer for
|
||||
current coin/bot loop
|
||||
"""
|
||||
dk.find_features(dataframe)
|
||||
if "training_features_list_raw" in dk.data:
|
||||
feature_list = dk.data["training_features_list_raw"]
|
||||
else:
|
||||
feature_list = dk.training_features_list
|
||||
if dk.training_features_list != feature_list:
|
||||
raise OperationalException(
|
||||
"Trying to access pretrained model with `identifier` "
|
||||
"but found different features furnished by current strategy."
|
||||
"Change `identifier` to train from scratch, or ensure the"
|
||||
"strategy is furnishing the same features as the pretrained"
|
||||
"model"
|
||||
)
|
||||
|
||||
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Base data cleaning method for train
|
||||
Any function inside this method should drop training data points from the filtered_dataframe
|
||||
based on user decided logic. See FreqaiDataKitchen::use_SVM_to_remove_outliers() for an
|
||||
example of how outlier data points are dropped from the dataframe used for training.
|
||||
"""
|
||||
|
||||
if self.freqai_info["feature_parameters"].get(
|
||||
"principal_component_analysis", False
|
||||
):
|
||||
dk.principal_component_analysis()
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
||||
dk.use_SVM_to_remove_outliers(predict=False)
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
||||
dk.data["avg_mean_dist"] = dk.compute_distances()
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
||||
if dk.pair in self.dd.old_DBSCAN_eps:
|
||||
eps = self.dd.old_DBSCAN_eps[dk.pair]
|
||||
else:
|
||||
eps = None
|
||||
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
|
||||
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
|
||||
|
||||
def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None:
|
||||
"""
|
||||
Base data cleaning method for predict.
|
||||
These functions each modify dk.do_predict, which is a dataframe with equal length
|
||||
to the number of candles coming from and returning to the strategy. Inside do_predict,
|
||||
1 allows prediction and < 0 signals to the strategy that the model is not confident in
|
||||
the prediction.
|
||||
See FreqaiDataKitchen::remove_outliers() for an example
|
||||
of how the do_predict vector is modified. do_predict is ultimately passed back to strategy
|
||||
for buy signals.
|
||||
"""
|
||||
if self.freqai_info["feature_parameters"].get(
|
||||
"principal_component_analysis", False
|
||||
):
|
||||
dk.pca_transform(dataframe)
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
||||
dk.use_SVM_to_remove_outliers(predict=True)
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
||||
dk.check_if_pred_in_training_spaces()
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
||||
dk.use_DBSCAN_to_remove_outliers(predict=True)
|
||||
|
||||
def model_exists(
|
||||
self,
|
||||
pair: str,
|
||||
dk: FreqaiDataKitchen,
|
||||
trained_timestamp: int = None,
|
||||
model_filename: str = "",
|
||||
scanning: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Given a pair and path, check if a model already exists
|
||||
:param pair: pair e.g. BTC/USD
|
||||
:param path: path to model
|
||||
:return:
|
||||
:boolean: whether the model file exists or not.
|
||||
"""
|
||||
coin, _ = pair.split("/")
|
||||
|
||||
if not self.live:
|
||||
dk.model_filename = model_filename = f"cb_{coin.lower()}_{trained_timestamp}"
|
||||
|
||||
path_to_modelfile = Path(dk.data_path / f"{model_filename}_model.joblib")
|
||||
file_exists = path_to_modelfile.is_file()
|
||||
if file_exists and not scanning:
|
||||
logger.info("Found model at %s", dk.data_path / dk.model_filename)
|
||||
elif not scanning:
|
||||
logger.info("Could not find model at %s", dk.data_path / dk.model_filename)
|
||||
return file_exists
|
||||
|
||||
def set_full_path(self) -> None:
|
||||
self.full_path = Path(
|
||||
self.config["user_data_dir"] / "models" / f"{self.freqai_info['identifier']}"
|
||||
)
|
||||
self.full_path.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy(
|
||||
self.config["config_files"][0],
|
||||
Path(self.full_path, Path(self.config["config_files"][0]).name),
|
||||
)
|
||||
|
||||
def train_model_in_series(
|
||||
self,
|
||||
new_trained_timerange: TimeRange,
|
||||
pair: str,
|
||||
strategy: IStrategy,
|
||||
dk: FreqaiDataKitchen,
|
||||
data_load_timerange: TimeRange,
|
||||
):
|
||||
"""
|
||||
Retrieve data and train model.
|
||||
:param new_trained_timerange: TimeRange = the timerange to train the model on
|
||||
:param metadata: dict = strategy provided metadata
|
||||
:param strategy: IStrategy = user defined strategy object
|
||||
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
|
||||
:param data_load_timerange: TimeRange = the amount of data to be loaded
|
||||
for populate_any_indicators
|
||||
(larger than new_trained_timerange so that
|
||||
new_trained_timerange does not contain any NaNs)
|
||||
"""
|
||||
|
||||
corr_dataframes, base_dataframes = self.dd.get_base_and_corr_dataframes(
|
||||
data_load_timerange, pair, dk
|
||||
)
|
||||
|
||||
with self.analysis_lock:
|
||||
unfiltered_dataframe = dk.use_strategy_to_populate_indicators(
|
||||
strategy, corr_dataframes, base_dataframes, pair
|
||||
)
|
||||
|
||||
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
|
||||
|
||||
# find the features indicated by strategy and store in datakitchen
|
||||
dk.find_features(unfiltered_dataframe)
|
||||
|
||||
model = self.train(unfiltered_dataframe, pair, dk)
|
||||
|
||||
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
|
||||
dk.set_new_model_names(pair, new_trained_timerange)
|
||||
self.dd.pair_dict[pair]["first"] = False
|
||||
if self.dd.pair_dict[pair]["priority"] == 1 and self.scanning:
|
||||
self.dd.pair_to_end_of_training_queue(pair)
|
||||
self.dd.save_data(model, pair, dk)
|
||||
|
||||
if self.freqai_info.get("purge_old_models", False):
|
||||
self.dd.purge_old_models()
|
||||
|
||||
def set_initial_historic_predictions(
|
||||
self, pred_df: DataFrame, dk: FreqaiDataKitchen, pair: str
|
||||
) -> None:
|
||||
"""
|
||||
This function is called only if the datadrawer failed to load an
|
||||
existing set of historic predictions. In this case, it builds
|
||||
the structure and sets fake predictions off the first training
|
||||
data. After that, FreqAI will append new real predictions to the
|
||||
set of historic predictions.
|
||||
|
||||
These values are used to generate live statistics which can be used
|
||||
in the strategy for adaptive values. E.g. &*_mean/std are quantities
|
||||
that can computed based on live predictions from the set of historical
|
||||
predictions. Those values can be used in the user strategy to better
|
||||
assess prediction rarity, and thus wait for probabilistically favorable
|
||||
entries relative to the live historical predictions.
|
||||
|
||||
If the user reuses an identifier on a subsequent instance,
|
||||
this function will not be called. In that case, "real" predictions
|
||||
will be appended to the loaded set of historic predictions.
|
||||
:param: df: DataFrame = the dataframe containing the training feature data
|
||||
:param: model: Any = A model which was `fit` using a common library such as
|
||||
catboost or lightgbm
|
||||
:param: dk: FreqaiDataKitchen = object containing methods for data analysis
|
||||
:param: pair: str = current pair
|
||||
"""
|
||||
|
||||
self.dd.historic_predictions[pair] = pred_df
|
||||
hist_preds_df = self.dd.historic_predictions[pair]
|
||||
|
||||
for label in hist_preds_df.columns:
|
||||
if hist_preds_df[label].dtype == object:
|
||||
continue
|
||||
hist_preds_df[f'{label}_mean'] = 0
|
||||
hist_preds_df[f'{label}_std'] = 0
|
||||
|
||||
hist_preds_df['do_predict'] = 0
|
||||
|
||||
if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0:
|
||||
hist_preds_df['DI_values'] = 0
|
||||
|
||||
for return_str in dk.data['extra_returns_per_train']:
|
||||
hist_preds_df[return_str] = 0
|
||||
|
||||
# # for keras type models, the conv_window needs to be prepended so
|
||||
# # viewing is correct in frequi
|
||||
if self.freqai_info.get('keras', False):
|
||||
n_lost_points = self.freqai_info.get('conv_width', 2)
|
||||
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
||||
columns=hist_preds_df.columns)
|
||||
self.dd.historic_predictions[pair] = pd.concat(
|
||||
[zeros_df, hist_preds_df], axis=0, ignore_index=True)
|
||||
|
||||
def fit_live_predictions(self, dk: FreqaiDataKitchen, pair: str) -> None:
|
||||
"""
|
||||
Fit the labels with a gaussian distribution
|
||||
"""
|
||||
import scipy as spy
|
||||
|
||||
# add classes from classifier label types if used
|
||||
full_labels = dk.label_list + dk.unique_class_list
|
||||
|
||||
num_candles = self.freqai_info.get("fit_live_predictions_candles", 100)
|
||||
dk.data["labels_mean"], dk.data["labels_std"] = {}, {}
|
||||
for label in full_labels:
|
||||
if self.dd.historic_predictions[dk.pair][label].dtype == object:
|
||||
continue
|
||||
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
|
||||
|
||||
return
|
||||
|
||||
def inference_timer(self, do='start'):
|
||||
"""
|
||||
Timer designed to track the cumulative time spent in FreqAI for one pass through
|
||||
the whitelist. This will check if the time spent is more than 1/4 the time
|
||||
of a single candle, and if so, it will warn the user of degraded performance
|
||||
"""
|
||||
if do == 'start':
|
||||
self.pair_it += 1
|
||||
self.begin_time = time.time()
|
||||
elif do == 'stop':
|
||||
end = time.time()
|
||||
self.inference_time += (end - self.begin_time)
|
||||
if self.pair_it == self.total_pairs:
|
||||
logger.info(
|
||||
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
|
||||
if self.inference_time > 0.25 * self.base_tf_seconds:
|
||||
logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to'
|
||||
' avoid blinding open trades and degrading performance.')
|
||||
self.pair_it = 0
|
||||
self.inference_time = 0
|
||||
return
|
||||
|
||||
def train_timer(self, do='start'):
|
||||
"""
|
||||
Timer designed to track the cumulative time spent training the full pairlist in
|
||||
FreqAI.
|
||||
"""
|
||||
if do == 'start':
|
||||
self.pair_it_train += 1
|
||||
self.begin_time_train = time.time()
|
||||
elif do == 'stop':
|
||||
end = time.time()
|
||||
self.train_time += (end - self.begin_time_train)
|
||||
if self.pair_it_train == self.total_pairs:
|
||||
logger.info(
|
||||
f'Total time spent training pairlist {self.train_time:.2f} seconds')
|
||||
self.pair_it_train = 0
|
||||
self.train_time = 0
|
||||
return
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
@abstractmethod
|
||||
def train(self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen) -> Any:
|
||||
"""
|
||||
Filter the training data and train a model to it. Train makes heavy use of the datahandler
|
||||
for storing, saving, loading, and analyzing the data.
|
||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
||||
:param metadata: pair metadata from strategy.
|
||||
:return: Trained model which can be used to inference (self.predict)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, data_dictionary: Dict[str, Any]) -> Any:
|
||||
"""
|
||||
Most regressors use the same function names and arguments e.g. user
|
||||
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
||||
management will be properly handled by Freqai.
|
||||
:param data_dictionary: Dict = the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
return
|
||||
|
||||
@abstractmethod
|
||||
def predict(
|
||||
self, dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = True
|
||||
) -> Tuple[DataFrame, NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
:param first: boolean = whether this is the first prediction or not.
|
||||
:return:
|
||||
:predictions: np.array of predictions
|
||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
|
||||
"""
|
99
freqtrade/freqai/prediction_models/BaseClassifierModel.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import logging
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import pandas as pd
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseClassifierModel(IFreqaiModel):
|
||||
"""
|
||||
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
|
||||
User *must* inherit from this class and set fit() and predict(). See example scripts
|
||||
such as prediction_models/CatboostPredictionModel.py for guidance.
|
||||
"""
|
||||
|
||||
def train(
|
||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Any:
|
||||
"""
|
||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||
for storing, saving, loading, and analyzing the data.
|
||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
||||
:param metadata: pair metadata from strategy.
|
||||
:return:
|
||||
:model: Trained model which can be used to inference (self.predict)
|
||||
"""
|
||||
|
||||
logger.info("-------------------- Starting training " f"{pair} --------------------")
|
||||
|
||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||
features_filtered, labels_filtered = dk.filter_features(
|
||||
unfiltered_dataframe,
|
||||
dk.training_features_list,
|
||||
dk.label_list,
|
||||
training_filter=True,
|
||||
)
|
||||
|
||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||
f"{end_date}--------------------")
|
||||
# split data into train/test data.
|
||||
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
|
||||
dk.fit_labels()
|
||||
# normalize all data based on train_dataset only
|
||||
data_dictionary = dk.normalize_data(data_dictionary)
|
||||
|
||||
# optional additional data cleaning/analysis
|
||||
self.data_cleaning_train(dk)
|
||||
|
||||
logger.info(
|
||||
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
|
||||
)
|
||||
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
|
||||
|
||||
model = self.fit(data_dictionary)
|
||||
|
||||
logger.info(f"--------------------done training {pair}--------------------")
|
||||
|
||||
return model
|
||||
|
||||
def predict(
|
||||
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
:return:
|
||||
:pred_df: dataframe containing the predictions
|
||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||
"""
|
||||
|
||||
dk.find_features(unfiltered_dataframe)
|
||||
filtered_dataframe, _ = dk.filter_features(
|
||||
unfiltered_dataframe, dk.training_features_list, training_filter=False
|
||||
)
|
||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||
|
||||
self.data_cleaning_predict(dk, filtered_dataframe)
|
||||
|
||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
||||
|
||||
predictions_prob = self.model.predict_proba(dk.data_dictionary["prediction_features"])
|
||||
pred_df_prob = DataFrame(predictions_prob, columns=self.model.classes_)
|
||||
|
||||
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
||||
|
||||
return (pred_df, dk.do_predict)
|
96
freqtrade/freqai/prediction_models/BaseRegressionModel.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseRegressionModel(IFreqaiModel):
|
||||
"""
|
||||
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
|
||||
User *must* inherit from this class and set fit() and predict(). See example scripts
|
||||
such as prediction_models/CatboostPredictionModel.py for guidance.
|
||||
"""
|
||||
|
||||
def train(
|
||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Any:
|
||||
"""
|
||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||
for storing, saving, loading, and analyzing the data.
|
||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
||||
:param metadata: pair metadata from strategy.
|
||||
:return:
|
||||
:model: Trained model which can be used to inference (self.predict)
|
||||
"""
|
||||
|
||||
logger.info("-------------------- Starting training " f"{pair} --------------------")
|
||||
|
||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||
features_filtered, labels_filtered = dk.filter_features(
|
||||
unfiltered_dataframe,
|
||||
dk.training_features_list,
|
||||
dk.label_list,
|
||||
training_filter=True,
|
||||
)
|
||||
|
||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||
f"{end_date}--------------------")
|
||||
# split data into train/test data.
|
||||
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
|
||||
dk.fit_labels()
|
||||
# normalize all data based on train_dataset only
|
||||
data_dictionary = dk.normalize_data(data_dictionary)
|
||||
|
||||
# optional additional data cleaning/analysis
|
||||
self.data_cleaning_train(dk)
|
||||
|
||||
logger.info(
|
||||
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
|
||||
)
|
||||
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
|
||||
|
||||
model = self.fit(data_dictionary)
|
||||
|
||||
logger.info(f"--------------------done training {pair}--------------------")
|
||||
|
||||
return model
|
||||
|
||||
def predict(
|
||||
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
:return:
|
||||
:pred_df: dataframe containing the predictions
|
||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||
"""
|
||||
|
||||
dk.find_features(unfiltered_dataframe)
|
||||
filtered_dataframe, _ = dk.filter_features(
|
||||
unfiltered_dataframe, dk.training_features_list, training_filter=False
|
||||
)
|
||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||
|
||||
# optional additional data cleaning/analysis
|
||||
self.data_cleaning_predict(dk, filtered_dataframe)
|
||||
|
||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
||||
|
||||
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||
|
||||
return (pred_df, dk.do_predict)
|
64
freqtrade/freqai/prediction_models/BaseTensorFlowModel.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseTensorFlowModel(IFreqaiModel):
|
||||
"""
|
||||
Base class for TensorFlow type models.
|
||||
User *must* inherit from this class and set fit() and predict().
|
||||
"""
|
||||
|
||||
def train(
|
||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Any:
|
||||
"""
|
||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||
for storing, saving, loading, and analyzing the data.
|
||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
||||
:param metadata: pair metadata from strategy.
|
||||
:return:
|
||||
:model: Trained model which can be used to inference (self.predict)
|
||||
"""
|
||||
|
||||
logger.info("-------------------- Starting training " f"{pair} --------------------")
|
||||
|
||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||
features_filtered, labels_filtered = dk.filter_features(
|
||||
unfiltered_dataframe,
|
||||
dk.training_features_list,
|
||||
dk.label_list,
|
||||
training_filter=True,
|
||||
)
|
||||
|
||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||
f"{end_date}--------------------")
|
||||
# split data into train/test data.
|
||||
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
|
||||
dk.fit_labels()
|
||||
# normalize all data based on train_dataset only
|
||||
data_dictionary = dk.normalize_data(data_dictionary)
|
||||
|
||||
# optional additional data cleaning/analysis
|
||||
self.data_cleaning_train(dk)
|
||||
|
||||
logger.info(
|
||||
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
|
||||
)
|
||||
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
|
||||
|
||||
model = self.fit(data_dictionary)
|
||||
|
||||
logger.info(f"--------------------done training {pair}--------------------")
|
||||
|
||||
return model
|
41
freqtrade/freqai/prediction_models/CatboostClassifier.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from catboost import CatBoostClassifier, Pool
|
||||
|
||||
from freqtrade.freqai.prediction_models.BaseClassifierModel import BaseClassifierModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostClassifier(BaseClassifierModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:params:
|
||||
:data_dictionary: the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
train_data = Pool(
|
||||
data=data_dictionary["train_features"],
|
||||
label=data_dictionary["train_labels"],
|
||||
weight=data_dictionary["train_weights"],
|
||||
)
|
||||
|
||||
cbr = CatBoostClassifier(
|
||||
allow_writing_files=False,
|
||||
loss_function='MultiClass',
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
cbr.fit(train_data)
|
||||
|
||||
return cbr
|