Compare commits
225 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2be3ff6bcb | ||
|
457b8d8761 | ||
|
70dfa1435b | ||
|
98fc5b6e65 | ||
|
c126c26501 | ||
|
2159059b87 | ||
|
f0f4faca71 | ||
|
0bc647dbd9 | ||
|
e3efb72efe | ||
|
a9ef63cb20 | ||
|
3b0daff2a2 | ||
|
67bd4f08e6 | ||
|
4c2d291eaf | ||
|
85df7faa98 | ||
|
8d3ed03184 | ||
|
9cb4832c87 | ||
|
5cfadc689b | ||
|
936ca24482 | ||
|
9c73411ac2 | ||
|
b0eff4160f | ||
|
7dbb78da95 | ||
|
0d67afe15b | ||
|
4edb30bfa8 | ||
|
0e0bda8f13 | ||
|
8c7f478724 | ||
|
52b774b5eb | ||
|
22043deffa | ||
|
ca913fb29d | ||
|
4df533feb0 | ||
|
a1a598dcab | ||
|
5019300d5c | ||
|
3264d7b890 | ||
|
c1d8ade2fa | ||
|
68db0bc647 | ||
|
a6296be2f5 | ||
|
eb8eebe492 | ||
|
016e438468 | ||
|
bc6729f724 | ||
|
7f308c5186 | ||
|
7f475e37d7 | ||
|
dc5c3a0ed2 | ||
|
4c83552f3b | ||
|
f0c04212f2 | ||
|
292d72d593 | ||
|
ca22d857b7 | ||
|
3585742b43 | ||
|
74277c7eff | ||
|
265795824b | ||
|
c2d0eca9d8 | ||
|
6ecd92de4a | ||
|
3921615023 | ||
|
ac7df58447 | ||
|
a78d6a05a6 | ||
|
616d69e0bd | ||
|
ae0a39521b | ||
|
3c789bca63 | ||
|
0af124701b | ||
|
4cf4642a6c | ||
|
f3d4c56b3b | ||
|
6defa62297 | ||
|
9691524ade | ||
|
a6bc00501f | ||
|
373132e135 | ||
|
70d6c27e3e | ||
|
0a7e4d6da5 | ||
|
f722104f7e | ||
|
6f7b75d4b0 | ||
|
b70f18f4c3 | ||
|
1727f99b58 | ||
|
21440eaec2 | ||
|
d0b8c8b1a0 | ||
|
a5bc75b48c | ||
|
e686faf1bc | ||
|
9bb061073d | ||
|
308fa43007 | ||
|
564318415e | ||
|
2c94ed2e59 | ||
|
cf882fa84e | ||
|
ab9d781b06 | ||
|
048cb95bd6 | ||
|
3e34f10e3d | ||
|
84b822dbf1 | ||
|
f4c6b99d63 | ||
|
cd514cf15d | ||
|
f2b875483f | ||
|
51556e08c3 | ||
|
6702a1b219 | ||
|
8f8b5cc28e | ||
|
201bbbcee6 | ||
|
a96aa568bf | ||
|
545d652352 | ||
|
fad9026939 | ||
|
cdc01a0781 | ||
|
47ef99f588 | ||
|
819488c906 | ||
|
c946d30596 | ||
|
649879192b | ||
|
d462f40299 | ||
|
bd664580fb | ||
|
cc06c60fd8 | ||
|
0d8dfc1a92 | ||
|
f6a0d677d2 | ||
|
7dd984e25e | ||
|
561600e98b | ||
|
2d2ff2fff6 | ||
|
2ce265bed3 | ||
|
34951f59d2 | ||
|
be48131185 | ||
|
38aca8e908 | ||
|
09e834fa21 | ||
|
578da343dc | ||
|
b4fb28e4ef | ||
|
00965d8c06 | ||
|
6e74d46660 | ||
|
7ef56e3029 | ||
|
555cc42630 | ||
|
dcf6ebe273 | ||
|
83343dc2f1 | ||
|
772abfc6f0 | ||
|
683b084323 | ||
|
099137adac | ||
|
9e36b0d2ea | ||
|
caa47a2f47 | ||
|
255c748ca2 | ||
|
30a5bb08dd | ||
|
8eda3a45a3 | ||
|
f5870a7540 | ||
|
af59572cb9 | ||
|
bd106b4b8e | ||
|
1bb45a2650 | ||
|
30d51b6939 | ||
|
1c089dcd51 | ||
|
527fd36134 | ||
|
4940fa7be3 | ||
|
0c810868de | ||
|
647200e8a7 | ||
|
77c360b264 | ||
|
9c361f4422 | ||
|
95121550ef | ||
|
f7dd3045f7 | ||
|
f5cd8f62c6 | ||
|
1c56fa034f | ||
|
7295ba0fb2 | ||
|
f6e9753c99 | ||
|
eeebb78a5c | ||
|
ea8e34e192 | ||
|
7b1d409c98 | ||
|
d056d766ed | ||
|
025b98decd | ||
|
3b97b3d5c8 | ||
|
8aac644009 | ||
|
48140bff91 | ||
|
81417cb795 | ||
|
69b3fcfd32 | ||
|
27dce20b29 | ||
|
240b529533 | ||
|
2493e0c8a5 | ||
|
1a8e1362a1 | ||
|
67cddae756 | ||
|
af8f308584 | ||
|
7766350c15 | ||
|
8c313b431d | ||
|
baa4f8e3d0 | ||
|
cdc550da9a | ||
|
d31926efdf | ||
|
3199eb453b | ||
|
05ccebf9a1 | ||
|
94cfc8e63f | ||
|
d1bee29b1e | ||
|
a61821e1c6 | ||
|
bd870e2331 | ||
|
c0cee5df07 | ||
|
b708134c1a | ||
|
b26ed7dea4 | ||
|
280a1dc3f8 | ||
|
f9a49744e6 | ||
|
a2a4bc05db | ||
|
29f0e01c4a | ||
|
d88a0dbf82 | ||
|
8b3a8234ac | ||
|
8cd4daad0a | ||
|
3eb897c2f8 | ||
|
4b9499e321 | ||
|
4baa36bdcf | ||
|
f95602f6bd | ||
|
5d4e5e69fe | ||
|
7962a1439b | ||
|
81b5aa66e8 | ||
|
45218faeb0 | ||
|
d55092ff17 | ||
|
74e4fd0633 | ||
|
b90da46b1b | ||
|
2080ff86ed | ||
|
16cec7dfbd | ||
|
0475b7cb18 | ||
|
d60a166fbf | ||
|
dd382dd370 | ||
|
69d542d3e2 | ||
|
e5df39e891 | ||
|
bf7ceba958 | ||
|
57c488a6f1 | ||
|
48bb51b458 | ||
|
b1fc5a06ca | ||
|
6d8e838a8f | ||
|
acf3484e88 | ||
|
cf0731095f | ||
|
1c81ec6016 | ||
|
13cd18dc9a | ||
|
926023935f | ||
|
096533bcb9 | ||
|
718c9d0440 | ||
|
9c78e6c26f | ||
|
6048f60f13 | ||
|
d4db5c3281 | ||
|
91683e1dca | ||
|
ecd1f55abc | ||
|
70b25461f0 | ||
|
9b895500b3 | ||
|
cd3fe44424 | ||
|
01232e9a1f | ||
|
8eeaab2746 | ||
|
ec813434f5 | ||
|
2f4d73eb06 | ||
|
c1e7db3130 | ||
|
05ed1b544f |
23
.github/workflows/draft-pdf.yml
vendored
Normal file
23
.github/workflows/draft-pdf.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
paper:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Paper Draft
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Build draft PDF
|
||||||
|
uses: openjournals/openjournals-draft-action@master
|
||||||
|
with:
|
||||||
|
journal: joss
|
||||||
|
# This should be the path to the paper within your repo.
|
||||||
|
paper-path: docs/JOSS_paper/paper.md
|
||||||
|
- name: Upload
|
||||||
|
uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: paper
|
||||||
|
# This is the output path where Pandoc will write the compiled
|
||||||
|
# PDF. Note, this should be the same directory as the input
|
||||||
|
# paper.md
|
||||||
|
path: docs/JOSS_paper/paper.pdf
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -113,3 +113,4 @@ target/
|
|||||||
!config_examples/config_full.example.json
|
!config_examples/config_full.example.json
|
||||||
!config_examples/config_kraken.example.json
|
!config_examples/config_kraken.example.json
|
||||||
!config_examples/config_freqai.example.json
|
!config_examples/config_freqai.example.json
|
||||||
|
!config_examples/config_freqai-rl.example.json
|
||||||
|
BIN
docs/JOSS_paper/assets/freqai_algo.jpg
Normal file
BIN
docs/JOSS_paper/assets/freqai_algo.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 345 KiB |
BIN
docs/JOSS_paper/assets/freqai_algorithm-diagram.jpg
Normal file
BIN
docs/JOSS_paper/assets/freqai_algorithm-diagram.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 490 KiB |
15
docs/JOSS_paper/note_to_editors.txt
Normal file
15
docs/JOSS_paper/note_to_editors.txt
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
Dear Editors,
|
||||||
|
We present a paper for ``FreqAI`` a machine learning sandbox for researchers and citizen scientists alike.
|
||||||
|
There are a large number of authors, however all have contributed in a significant way to this paper.
|
||||||
|
For clarity the contribution of each author is outlined:
|
||||||
|
|
||||||
|
- Robert Caulk : Conception and software development
|
||||||
|
- Elin Tornquist : Theoretical brainstorming, data analysis, tool dev
|
||||||
|
- Matthias Voppichler : Software architecture and code review
|
||||||
|
- Andrew R. Lawless : Extensive testing, feature brainstorming
|
||||||
|
- Ryan McMullan : Extensive testing, feature brainstorming
|
||||||
|
- Wagner Costa Santos : Major backtesting developments, extensive testing
|
||||||
|
- Pascal Schmidt : Extensive testing, feature brainstorming
|
||||||
|
- Timothy C. Pogue : Webhooks forecast sharing
|
||||||
|
- Stefan P. Gehring : Extensive testing, feature brainstorming
|
||||||
|
- Johan van der Vlugt : Extensive testing, feature brainstorming
|
207
docs/JOSS_paper/paper.bib
Normal file
207
docs/JOSS_paper/paper.bib
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
@article{scikit-learn,
|
||||||
|
title={Scikit-learn: Machine Learning in {P}ython},
|
||||||
|
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
||||||
|
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
||||||
|
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
||||||
|
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
||||||
|
journal={Journal of Machine Learning Research},
|
||||||
|
volume={12},
|
||||||
|
pages={2825--2830},
|
||||||
|
year={2011}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{catboost,
|
||||||
|
author = {Prokhorenkova, Liudmila and Gusev, Gleb and Vorobev, Aleksandr and Dorogush, Anna Veronika and Gulin, Andrey},
|
||||||
|
title = {CatBoost: Unbiased Boosting with Categorical Features},
|
||||||
|
year = {2018},
|
||||||
|
publisher = {Curran Associates Inc.},
|
||||||
|
address = {Red Hook, NY, USA},
|
||||||
|
abstract = {This paper presents the key algorithmic techniques behind CatBoost, a new gradient boosting toolkit. Their combination leads to CatBoost outperforming other publicly available boosting implementations in terms of quality on a variety of datasets. Two critical algorithmic advances introduced in CatBoost are the implementation of ordered boosting, a permutation-driven alternative to the classic algorithm, and an innovative algorithm for processing categorical features. Both techniques were created to fight a prediction shift caused by a special kind of target leakage present in all currently existing implementations of gradient boosting algorithms. In this paper, we provide a detailed analysis of this problem and demonstrate that proposed algorithms solve it effectively, leading to excellent empirical results.},
|
||||||
|
booktitle = {Proceedings of the 32nd International Conference on Neural Information Processing Systems},
|
||||||
|
pages = {6639–6649},
|
||||||
|
numpages = {11},
|
||||||
|
location = {Montr\'{e}al, Canada},
|
||||||
|
series = {NIPS'18}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@article{lightgbm,
|
||||||
|
title={Lightgbm: A highly efficient gradient boosting decision tree},
|
||||||
|
author={Ke, Guolin and Meng, Qi and Finley, Thomas and Wang, Taifeng and Chen, Wei and Ma, Weidong and Ye, Qiwei and Liu, Tie-Yan},
|
||||||
|
journal={Advances in neural information processing systems},
|
||||||
|
volume={30},
|
||||||
|
pages={3146--3154},
|
||||||
|
year={2017}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{xgboost,
|
||||||
|
author = {Chen, Tianqi and Guestrin, Carlos},
|
||||||
|
title = {{XGBoost}: A Scalable Tree Boosting System},
|
||||||
|
booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
|
||||||
|
series = {KDD '16},
|
||||||
|
year = {2016},
|
||||||
|
isbn = {978-1-4503-4232-2},
|
||||||
|
location = {San Francisco, California, USA},
|
||||||
|
pages = {785--794},
|
||||||
|
numpages = {10},
|
||||||
|
url = {http://doi.acm.org/10.1145/2939672.2939785},
|
||||||
|
doi = {10.1145/2939672.2939785},
|
||||||
|
acmid = {2939785},
|
||||||
|
publisher = {ACM},
|
||||||
|
address = {New York, NY, USA},
|
||||||
|
keywords = {large-scale machine learning},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{stable-baselines3,
|
||||||
|
author = {Antonin Raffin and Ashley Hill and Adam Gleave and Anssi Kanervisto and Maximilian Ernestus and Noah Dormann},
|
||||||
|
title = {Stable-Baselines3: Reliable Reinforcement Learning Implementations},
|
||||||
|
journal = {Journal of Machine Learning Research},
|
||||||
|
year = {2021},
|
||||||
|
volume = {22},
|
||||||
|
number = {268},
|
||||||
|
pages = {1-8},
|
||||||
|
url = {http://jmlr.org/papers/v22/20-1364.html}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{openai,
|
||||||
|
title={OpenAI Gym},
|
||||||
|
author={Greg Brockman and Vicki Cheung and Ludwig Pettersson and Jonas Schneider and John Schulman and Jie Tang and Wojciech Zaremba},
|
||||||
|
year={2016},
|
||||||
|
eprint={1606.01540},
|
||||||
|
archivePrefix={arXiv},
|
||||||
|
primaryClass={cs.LG}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{tensorflow,
|
||||||
|
title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
|
||||||
|
url={https://www.tensorflow.org/},
|
||||||
|
note={Software available from tensorflow.org},
|
||||||
|
author={
|
||||||
|
Mart\'{i}n~Abadi and
|
||||||
|
Ashish~Agarwal and
|
||||||
|
Paul~Barham and
|
||||||
|
Eugene~Brevdo and
|
||||||
|
Zhifeng~Chen and
|
||||||
|
Craig~Citro and
|
||||||
|
Greg~S.~Corrado and
|
||||||
|
Andy~Davis and
|
||||||
|
Jeffrey~Dean and
|
||||||
|
Matthieu~Devin and
|
||||||
|
Sanjay~Ghemawat and
|
||||||
|
Ian~Goodfellow and
|
||||||
|
Andrew~Harp and
|
||||||
|
Geoffrey~Irving and
|
||||||
|
Michael~Isard and
|
||||||
|
Yangqing Jia and
|
||||||
|
Rafal~Jozefowicz and
|
||||||
|
Lukasz~Kaiser and
|
||||||
|
Manjunath~Kudlur and
|
||||||
|
Josh~Levenberg and
|
||||||
|
Dandelion~Man\'{e} and
|
||||||
|
Rajat~Monga and
|
||||||
|
Sherry~Moore and
|
||||||
|
Derek~Murray and
|
||||||
|
Chris~Olah and
|
||||||
|
Mike~Schuster and
|
||||||
|
Jonathon~Shlens and
|
||||||
|
Benoit~Steiner and
|
||||||
|
Ilya~Sutskever and
|
||||||
|
Kunal~Talwar and
|
||||||
|
Paul~Tucker and
|
||||||
|
Vincent~Vanhoucke and
|
||||||
|
Vijay~Vasudevan and
|
||||||
|
Fernanda~Vi\'{e}gas and
|
||||||
|
Oriol~Vinyals and
|
||||||
|
Pete~Warden and
|
||||||
|
Martin~Wattenberg and
|
||||||
|
Martin~Wicke and
|
||||||
|
Yuan~Yu and
|
||||||
|
Xiaoqiang~Zheng},
|
||||||
|
year={2015},
|
||||||
|
}
|
||||||
|
|
||||||
|
@incollection{pytorch,
|
||||||
|
title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
|
||||||
|
author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
|
||||||
|
booktitle = {Advances in Neural Information Processing Systems 32},
|
||||||
|
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
|
||||||
|
pages = {8024--8035},
|
||||||
|
year = {2019},
|
||||||
|
publisher = {Curran Associates, Inc.},
|
||||||
|
url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ARTICLE{scipy,
|
||||||
|
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
|
||||||
|
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
|
||||||
|
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
|
||||||
|
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
|
||||||
|
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
|
||||||
|
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
|
||||||
|
Kern, Robert and Larson, Eric and Carey, C J and
|
||||||
|
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
|
||||||
|
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
|
||||||
|
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
|
||||||
|
Harris, Charles R. and Archibald, Anne M. and
|
||||||
|
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
|
||||||
|
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
|
||||||
|
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
|
||||||
|
Computing in Python}},
|
||||||
|
journal = {Nature Methods},
|
||||||
|
year = {2020},
|
||||||
|
volume = {17},
|
||||||
|
pages = {261--272},
|
||||||
|
adsurl = {https://rdcu.be/b08Wh},
|
||||||
|
doi = {10.1038/s41592-019-0686-2},
|
||||||
|
}
|
||||||
|
|
||||||
|
@Article{numpy,
|
||||||
|
title = {Array programming with {NumPy}},
|
||||||
|
author = {Charles R. Harris and K. Jarrod Millman and St{\'{e}}fan J.
|
||||||
|
van der Walt and Ralf Gommers and Pauli Virtanen and David
|
||||||
|
Cournapeau and Eric Wieser and Julian Taylor and Sebastian
|
||||||
|
Berg and Nathaniel J. Smith and Robert Kern and Matti Picus
|
||||||
|
and Stephan Hoyer and Marten H. van Kerkwijk and Matthew
|
||||||
|
Brett and Allan Haldane and Jaime Fern{\'{a}}ndez del
|
||||||
|
R{\'{i}}o and Mark Wiebe and Pearu Peterson and Pierre
|
||||||
|
G{\'{e}}rard-Marchant and Kevin Sheppard and Tyler Reddy and
|
||||||
|
Warren Weckesser and Hameer Abbasi and Christoph Gohlke and
|
||||||
|
Travis E. Oliphant},
|
||||||
|
year = {2020},
|
||||||
|
month = sep,
|
||||||
|
journal = {Nature},
|
||||||
|
volume = {585},
|
||||||
|
number = {7825},
|
||||||
|
pages = {357--362},
|
||||||
|
doi = {10.1038/s41586-020-2649-2},
|
||||||
|
publisher = {Springer Science and Business Media {LLC}},
|
||||||
|
url = {https://doi.org/10.1038/s41586-020-2649-2}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{pandas,
|
||||||
|
title={Data structures for statistical computing in python},
|
||||||
|
author={McKinney, Wes and others},
|
||||||
|
booktitle={Proceedings of the 9th Python in Science Conference},
|
||||||
|
volume={445},
|
||||||
|
pages={51--56},
|
||||||
|
year={2010},
|
||||||
|
organization={Austin, TX},
|
||||||
|
doi={10.25080/Majora-92bf1922-00a}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@online{finrl,
|
||||||
|
title = {AI4Finance-Foundation},
|
||||||
|
year = 2022,
|
||||||
|
url = {https://github.com/AI4Finance-Foundation/FinRL},
|
||||||
|
urldate = {2022-09-30}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@online{tensortrade,
|
||||||
|
title = {tensortrade},
|
||||||
|
year = 2022,
|
||||||
|
url = {https://tensortradex.readthedocs.io/en/latest/L},
|
||||||
|
urldate = {2022-09-30}
|
||||||
|
}
|
941
docs/JOSS_paper/paper.jats
Normal file
941
docs/JOSS_paper/paper.jats
Normal file
@@ -0,0 +1,941 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.2 20190208//EN"
|
||||||
|
"JATS-publishing1.dtd">
|
||||||
|
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="1.2" article-type="other">
|
||||||
|
<front>
|
||||||
|
<journal-meta>
|
||||||
|
<journal-id></journal-id>
|
||||||
|
<journal-title-group>
|
||||||
|
<journal-title>Journal of Open Source Software</journal-title>
|
||||||
|
<abbrev-journal-title>JOSS</abbrev-journal-title>
|
||||||
|
</journal-title-group>
|
||||||
|
<issn publication-format="electronic">2475-9066</issn>
|
||||||
|
<publisher>
|
||||||
|
<publisher-name>Open Journals</publisher-name>
|
||||||
|
</publisher>
|
||||||
|
</journal-meta>
|
||||||
|
<article-meta>
|
||||||
|
<article-id pub-id-type="publisher-id">0</article-id>
|
||||||
|
<article-id pub-id-type="doi">N/A</article-id>
|
||||||
|
<title-group>
|
||||||
|
<article-title><monospace>FreqAI</monospace>: generalizing adaptive
|
||||||
|
modeling for chaotic time-series market forecasts</article-title>
|
||||||
|
</title-group>
|
||||||
|
<contrib-group>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<contrib-id contrib-id-type="orcid">0000-0001-5618-8629</contrib-id>
|
||||||
|
<name>
|
||||||
|
<surname>Ph.D</surname>
|
||||||
|
<given-names>Robert A. Caulk</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<contrib-id contrib-id-type="orcid">0000-0003-3289-8604</contrib-id>
|
||||||
|
<name>
|
||||||
|
<surname>Ph.D</surname>
|
||||||
|
<given-names>Elin Törnquist</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Voppichler</surname>
|
||||||
|
<given-names>Matthias</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Lawless</surname>
|
||||||
|
<given-names>Andrew R.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>McMullan</surname>
|
||||||
|
<given-names>Ryan</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Santos</surname>
|
||||||
|
<given-names>Wagner Costa</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Pogue</surname>
|
||||||
|
<given-names>Timothy C.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>van der Vlugt</surname>
|
||||||
|
<given-names>Johan</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Gehring</surname>
|
||||||
|
<given-names>Stefan P.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Schmidt</surname>
|
||||||
|
<given-names>Pascal</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<aff id="aff-1">
|
||||||
|
<institution-wrap>
|
||||||
|
<institution>Emergent Methods LLC, Arvada Colorado, 80005,
|
||||||
|
USA</institution>
|
||||||
|
</institution-wrap>
|
||||||
|
</aff>
|
||||||
|
<aff id="aff-2">
|
||||||
|
<institution-wrap>
|
||||||
|
<institution>Freqtrade open source project</institution>
|
||||||
|
</institution-wrap>
|
||||||
|
</aff>
|
||||||
|
</contrib-group>
|
||||||
|
<volume>¿VOL?</volume>
|
||||||
|
<issue>¿ISSUE?</issue>
|
||||||
|
<fpage>¿PAGE?</fpage>
|
||||||
|
<permissions>
|
||||||
|
<copyright-statement>Authors of papers retain copyright and release the
|
||||||
|
work under a Creative Commons Attribution 4.0 International License (CC
|
||||||
|
BY 4.0)</copyright-statement>
|
||||||
|
<copyright-year>2022</copyright-year>
|
||||||
|
<copyright-holder>The article authors</copyright-holder>
|
||||||
|
<license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
|
||||||
|
<license-p>Authors of papers retain copyright and release the work under
|
||||||
|
a Creative Commons Attribution 4.0 International License (CC BY
|
||||||
|
4.0)</license-p>
|
||||||
|
</license>
|
||||||
|
</permissions>
|
||||||
|
<kwd-group kwd-group-type="author">
|
||||||
|
<kwd>Python</kwd>
|
||||||
|
<kwd>Machine Learning</kwd>
|
||||||
|
<kwd>adaptive modeling</kwd>
|
||||||
|
<kwd>chaotic systems</kwd>
|
||||||
|
<kwd>time-series forecasting</kwd>
|
||||||
|
</kwd-group>
|
||||||
|
</article-meta>
|
||||||
|
</front>
|
||||||
|
<body>
|
||||||
|
<sec id="statement-of-need">
|
||||||
|
<title>Statement of need</title>
|
||||||
|
<p>Forecasting chaotic time-series based systems, such as
|
||||||
|
equity/cryptocurrency markets, requires a broad set of tools geared
|
||||||
|
toward testing a wide range of hypotheses. Fortunately, a recent
|
||||||
|
maturation of robust machine learning libraries
|
||||||
|
(e.g. <monospace>scikit-learn</monospace>), has opened up a wide range
|
||||||
|
of research possibilities. Scientists from a diverse range of fields
|
||||||
|
can now easily prototype their studies on an abundance of established
|
||||||
|
machine learning algorithms. Similarly, these user-friendly libraries
|
||||||
|
enable “citzen scientists” to use their basic Python skills for
|
||||||
|
data-exploration. However, leveraging these machine learning libraries
|
||||||
|
on historical and live chaotic data sources can be logistically
|
||||||
|
difficult and expensive. Additionally, robust data-collection,
|
||||||
|
storage, and handling presents a disparate challenge.
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
aims to provide a generalized and extensible open-sourced framework
|
||||||
|
geared toward live deployments of adaptive modeling for market
|
||||||
|
forecasting. The <monospace>FreqAI</monospace> framework is
|
||||||
|
effectively a sandbox for the rich world of open-source machine
|
||||||
|
learning libraries. Inside the <monospace>FreqAI</monospace> sandbox,
|
||||||
|
users find they can combine a wide variety of third-party libraries to
|
||||||
|
test creative hypotheses on a free live 24/7 chaotic data source -
|
||||||
|
cryptocurrency exchange data.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="summary">
|
||||||
|
<title>Summary</title>
|
||||||
|
<p><ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
evolved from a desire to test and compare a range of adaptive
|
||||||
|
time-series forecasting methods on chaotic data. Cryptocurrency
|
||||||
|
markets provide a unique data source since they are operational 24/7
|
||||||
|
and the data is freely available. Luckily, an existing open-source
|
||||||
|
software,
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/stable/"><monospace>Freqtrade</monospace></ext-link>,
|
||||||
|
had already matured under a range of talented developers to support
|
||||||
|
robust data collection/storage, as well as robust live environmental
|
||||||
|
interactions for standard algorithmic trading.
|
||||||
|
<monospace>Freqtrade</monospace> also provides a set of data
|
||||||
|
analysis/visualization tools for the evaluation of historical
|
||||||
|
performance as well as live environmental feedback.
|
||||||
|
<monospace>FreqAI</monospace> builds on top of
|
||||||
|
<monospace>Freqtrade</monospace> to include a user-friendly well
|
||||||
|
tested interface for integrating external machine learning libraries
|
||||||
|
for adaptive time-series forecasting. Beyond enabling the integration
|
||||||
|
of existing libraries, <monospace>FreqAI</monospace> hosts a range of
|
||||||
|
custom algorithms and methodologies aimed at improving computational
|
||||||
|
and predictive performances. Thus, <monospace>FreqAI</monospace>
|
||||||
|
contains a range of unique features which can be easily tested in
|
||||||
|
combination with all the existing Python-accessible machine learning
|
||||||
|
libraries to generate novel research on live and historical data.</p>
|
||||||
|
<p>The high-level overview of the software is depicted in Figure
|
||||||
|
1.</p>
|
||||||
|
<p><named-content content-type="image">freqai-algo</named-content>
|
||||||
|
<italic>Abstracted overview of FreqAI algorithm</italic></p>
|
||||||
|
<sec id="connecting-machine-learning-libraries">
|
||||||
|
<title>Connecting machine learning libraries</title>
|
||||||
|
<p>Although the <monospace>FreqAI</monospace> framework is designed
|
||||||
|
to accommodate any Python library in the “Model training” and
|
||||||
|
“Feature set engineering” portions of the software (Figure 1), it
|
||||||
|
already boasts a wide range of well documented examples based on
|
||||||
|
various combinations of:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>scikit-learn
|
||||||
|
(<xref alt="Pedregosa et al., 2011" rid="ref-scikit-learn" ref-type="bibr">Pedregosa
|
||||||
|
et al., 2011</xref>), Catboost
|
||||||
|
(<xref alt="Prokhorenkova et al., 2018" rid="ref-catboost" ref-type="bibr">Prokhorenkova
|
||||||
|
et al., 2018</xref>), LightGBM
|
||||||
|
(<xref alt="Ke et al., 2017" rid="ref-lightgbm" ref-type="bibr">Ke
|
||||||
|
et al., 2017</xref>), XGBoost
|
||||||
|
(<xref alt="Chen & Guestrin, 2016" rid="ref-xgboost" ref-type="bibr">Chen
|
||||||
|
& Guestrin, 2016</xref>), stable_baselines3
|
||||||
|
(<xref alt="Raffin et al., 2021" rid="ref-stable-baselines3" ref-type="bibr">Raffin
|
||||||
|
et al., 2021</xref>), openai gym
|
||||||
|
(<xref alt="Brockman et al., 2016" rid="ref-openai" ref-type="bibr">Brockman
|
||||||
|
et al., 2016</xref>), tensorflow
|
||||||
|
(<xref alt="Abadi et al., 2015" rid="ref-tensorflow" ref-type="bibr">Abadi
|
||||||
|
et al., 2015</xref>), pytorch
|
||||||
|
(<xref alt="Paszke et al., 2019" rid="ref-pytorch" ref-type="bibr">Paszke
|
||||||
|
et al., 2019</xref>), Scipy
|
||||||
|
(<xref alt="Virtanen et al., 2020" rid="ref-scipy" ref-type="bibr">Virtanen
|
||||||
|
et al., 2020</xref>), Numpy
|
||||||
|
(<xref alt="Harris et al., 2020" rid="ref-numpy" ref-type="bibr">Harris
|
||||||
|
et al., 2020</xref>), and pandas
|
||||||
|
(<xref alt="McKinney & others, 2010" rid="ref-pandas" ref-type="bibr">McKinney
|
||||||
|
& others, 2010</xref>).</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These mature projects contain a wide range of peer-reviewed and
|
||||||
|
industry standard methods, including:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Regression, Classification, Neural Networks, Reinforcement
|
||||||
|
Learning, Support Vector Machines, Principal Component Analysis,
|
||||||
|
point clustering, and much more.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>which are all leveraged in <monospace>FreqAI</monospace> for
|
||||||
|
users to use as templates or extend with their own methods.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="furnishing-novel-methods-and-features">
|
||||||
|
<title>Furnishing novel methods and features</title>
|
||||||
|
<p>Beyond the industry standard methods available through external
|
||||||
|
libraries - <monospace>FreqAI</monospace> includes novel methods
|
||||||
|
which are not available anywhere else in the open-source (or
|
||||||
|
scientific) world. For example, <monospace>FreqAI</monospace>
|
||||||
|
provides :</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>a custom algorithm/methodology for adaptive modeling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>rapid and self-monitored feature engineering tools</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>unique model features/indicators</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>optimized data collection algorithms</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>safely integrated outlier detection methods</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>websocket communicated forecasts</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>Of particular interest for researchers,
|
||||||
|
<monospace>FreqAI</monospace> provides the option of large scale
|
||||||
|
experimentation via an optimized websocket communications
|
||||||
|
interface.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="optimizing-the-back-end">
|
||||||
|
<title>Optimizing the back-end</title>
|
||||||
|
<p><monospace>FreqAI</monospace> aims to make it simple for users to
|
||||||
|
combine all the above tools to run studies based in two distinct
|
||||||
|
modules:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>backtesting studies</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>live-deployments</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>Both of these modules and their respective data management
|
||||||
|
systems are built on top of
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/"><monospace>Freqtrade</monospace></ext-link>,
|
||||||
|
a mature and actively developed cryptocurrency trading software.
|
||||||
|
This means that <monospace>FreqAI</monospace> benefits from a wide
|
||||||
|
range of tangential/disparate feature developments such as:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>FreqUI, a graphical interface for backtesting and live
|
||||||
|
monitoring</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>telegram control</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>robust database handling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>futures/leverage trading</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>dollar cost averaging</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>trading strategy handling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>a variety of free data sources via CCXT (FTX, Binance, Kucoin
|
||||||
|
etc.)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These features derive from a strong external developer community
|
||||||
|
that shares in the benefit and stability of a communal CI
|
||||||
|
(Continuous Integration) system. Beyond the developer community,
|
||||||
|
<monospace>FreqAI</monospace> benefits strongly from the userbase of
|
||||||
|
<monospace>Freqtrade</monospace>, where most
|
||||||
|
<monospace>FreqAI</monospace> beta-testers/developers originated.
|
||||||
|
This symbiotic relationship between <monospace>Freqtrade</monospace>
|
||||||
|
and <monospace>FreqAI</monospace> ignited a thoroughly tested
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/freqtrade/freqtrade/pull/6832"><monospace>beta</monospace></ext-link>,
|
||||||
|
which demanded a four month beta and
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">comprehensive
|
||||||
|
documentation</ext-link> containing:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>numerous example scripts</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>a full parameter table</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>methodological descriptions</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>high-resolution diagrams/figures</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>detailed parameter setting recommendations</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</sec>
|
||||||
|
<sec id="providing-a-reproducible-foundation-for-researchers">
|
||||||
|
<title>Providing a reproducible foundation for researchers</title>
|
||||||
|
<p><monospace>FreqAI</monospace> provides an extensible, robust,
|
||||||
|
framework for researchers and citizen data scientists. The
|
||||||
|
<monospace>FreqAI</monospace> sandbox enables rapid conception and
|
||||||
|
testing of exotic hypotheses. From a research perspective,
|
||||||
|
<monospace>FreqAI</monospace> handles the multitude of logistics
|
||||||
|
associated with live deployments, historical backtesting, and
|
||||||
|
feature engineering. With <monospace>FreqAI</monospace>, researchers
|
||||||
|
can focus on their primary interests of feature engineering and
|
||||||
|
hypothesis testing rather than figuring out how to collect and
|
||||||
|
handle data. Further - the well maintained and easily installed
|
||||||
|
open-source framework of <monospace>FreqAI</monospace> enables
|
||||||
|
reproducible scientific studies. This reproducibility component is
|
||||||
|
essential to general scientific advancement in time-series
|
||||||
|
forecasting for chaotic systems.</p>
|
||||||
|
</sec>
|
||||||
|
</sec>
|
||||||
|
<sec id="technical-details">
|
||||||
|
<title>Technical details</title>
|
||||||
|
<p>Typical users configure <monospace>FreqAI</monospace> via two
|
||||||
|
files:</p>
|
||||||
|
<list list-type="order">
|
||||||
|
<list-item>
|
||||||
|
<p>A <monospace>configuration</monospace> file
|
||||||
|
(<monospace>--config</monospace>) which provides access to the
|
||||||
|
full parameter list available
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">here</ext-link>:</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>control high-level feature engineering</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>customize adaptive modeling techniques</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>set any model training parameters available in third-party
|
||||||
|
libraries</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>manage adaptive modeling parameters (retrain frequency,
|
||||||
|
training window size, continual learning, etc.)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="order">
|
||||||
|
<list-item>
|
||||||
|
<label>2.</label>
|
||||||
|
<p>A strategy file (<monospace>--strategy</monospace>) where
|
||||||
|
users:</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>list of the base training features</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>set standard technical-analysis strategies</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>control trade entry/exit criteria</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>With these two files, most users can exploit a wide range of
|
||||||
|
pre-existing integrations in <monospace>Catboost</monospace> and 7
|
||||||
|
other libraries with a simple command:</p>
|
||||||
|
<preformat>freqtrade trade --config config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel CatboostRegressor</preformat>
|
||||||
|
<p>Advanced users will edit one of the existing
|
||||||
|
<monospace>--freqaimodel</monospace> files, which are simply an
|
||||||
|
children of the <monospace>IFreqaiModel</monospace> (details below).
|
||||||
|
Within these files, advanced users can customize training procedures,
|
||||||
|
prediction procedures, outlier detection methods, data preparation,
|
||||||
|
data saving methods, etc. This is all configured in a way where they
|
||||||
|
can customize as little or as much as they want. This flexible
|
||||||
|
customization is owed to the foundational architecture in
|
||||||
|
<monospace>FreqAI</monospace>, which is comprised of three distinct
|
||||||
|
Python objects:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>IFreqaiModel</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>A singular long-lived object containing all the necessary
|
||||||
|
logic to collect data, store data, process data, engineer
|
||||||
|
features, run training, and inference models.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>FreqaiDataKitchen</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>A short-lived object which is uniquely created for each
|
||||||
|
asset/model. Beyond metadata, it also contains a variety of
|
||||||
|
data processing tools.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>FreqaiDataDrawer</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Singular long-lived object containing all the historical
|
||||||
|
predictions, models, and save/load methods.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These objects interact with one another with one goal in mind - to
|
||||||
|
provide a clean data set to machine learning experts/enthusiasts at
|
||||||
|
the user endpoint. These power-users interact with an inherited
|
||||||
|
<monospace>IFreqaiModel</monospace> that allows them to dig as deep or
|
||||||
|
as shallow as they wish into the inheritence tree. Typical power-users
|
||||||
|
focus their efforts on customizing training procedures and testing
|
||||||
|
exotic functionalities available in third-party libraries. Thus,
|
||||||
|
power-users are freed from the algorithmic weight associated with data
|
||||||
|
management, and can instead focus their energy on testing creative
|
||||||
|
hypotheses. Meanwhile, some users choose to override deeper
|
||||||
|
functionalities within <monospace>IFreqaiModel</monospace> to help
|
||||||
|
them craft unique data structures and training procedures.</p>
|
||||||
|
<p>The class structure and algorithmic details are depicted in the
|
||||||
|
following diagram:</p>
|
||||||
|
<p><named-content content-type="image">image</named-content>
|
||||||
|
<italic>Class diagram summarizing object interactions in
|
||||||
|
FreqAI</italic></p>
|
||||||
|
</sec>
|
||||||
|
<sec id="online-documentation">
|
||||||
|
<title>Online documentation</title>
|
||||||
|
<p>The documentation for
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
is available online at
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">https://www.freqtrade.io/en/latest/freqai/</ext-link>
|
||||||
|
and covers a wide range of materials:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Quick-start with a single command and example files -
|
||||||
|
(beginners)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Introduction to the feature engineering interface and basic
|
||||||
|
configurations - (intermediate users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Parameter table with indepth descriptions and default parameter
|
||||||
|
setting recommendations - (intermediate users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Data analysis and post-processing - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Methodological considerations complemented by high resolution
|
||||||
|
figures - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Instructions for integrating third party machine learning
|
||||||
|
libraries into custom prediction models - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Software architectural description with class diagram -
|
||||||
|
(developers)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>File structure descriptions - (developers)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>The docs direct users to a variety of pre-made examples which
|
||||||
|
integrate <monospace>Catboost</monospace>,
|
||||||
|
<monospace>LightGBM</monospace>, <monospace>XGBoost</monospace>,
|
||||||
|
<monospace>Sklearn</monospace>,
|
||||||
|
<monospace>stable_baselines3</monospace>,
|
||||||
|
<monospace>torch</monospace>, <monospace>tensorflow</monospace>.
|
||||||
|
Meanwhile, developers will also find thorough docstrings and type
|
||||||
|
hinting throughout the source code to aid in code readability and
|
||||||
|
customization.</p>
|
||||||
|
<p><monospace>FreqAI</monospace> also benefits from a strong support
|
||||||
|
network of users and developers on the
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://discord.gg/w6nDM6cM4y"><monospace>Freqtrade</monospace>
|
||||||
|
discord</ext-link> as well as on the
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://discord.gg/xE4RMg4QYw"><monospace>FreqAI</monospace>
|
||||||
|
discord</ext-link>. Within the <monospace>FreqAI</monospace> discord,
|
||||||
|
users will find a deep and easily searched knowledge base containing
|
||||||
|
common errors. But more importantly, users in the
|
||||||
|
<monospace>FreqAI</monospace> discord share anectdotal and
|
||||||
|
quantitative observations which compare performance between various
|
||||||
|
third-party libraries and methods.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="state-of-the-field">
|
||||||
|
<title>State of the field</title>
|
||||||
|
<p>There are two other open-source tools which are geared toward
|
||||||
|
helping users build models for time-series forecasts on market based
|
||||||
|
data. However, each of these tools suffer from a non-generalized
|
||||||
|
frameworks that do not permit comparison of methods and libraries.
|
||||||
|
Additionally, they do not permit easy live-deployments or
|
||||||
|
adaptive-modeling methods. For example, two open-sourced projects
|
||||||
|
called
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://tensortradex.readthedocs.io/en/latest/"><monospace>tensortrade</monospace></ext-link>
|
||||||
|
(<xref alt="Tensortrade, 2022" rid="ref-tensortrade" ref-type="bibr"><italic>Tensortrade</italic>,
|
||||||
|
2022</xref>) and
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/AI4Finance-Foundation/FinRL"><monospace>FinRL</monospace></ext-link>
|
||||||
|
(<xref alt="AI4Finance-Foundation, 2022" rid="ref-finrl" ref-type="bibr"><italic>AI4Finance-Foundation</italic>,
|
||||||
|
2022</xref>) limit users to the exploration of reinforcement learning
|
||||||
|
on historical data. These softwares also do not provide robust live
|
||||||
|
deployments, they do not furnish novel feature engineering algorithms,
|
||||||
|
and they do not provide custom data analysis tools.
|
||||||
|
<monospace>FreqAI</monospace> fills the gap.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="on-going-research">
|
||||||
|
<title>On-going research</title>
|
||||||
|
<p>Emergent Methods, based in Arvada CO, is actively using
|
||||||
|
<monospace>FreqAI</monospace> to perform large scale experiments aimed
|
||||||
|
at comparing machine learning libraries in live and historical
|
||||||
|
environments. Past projects include backtesting parametric sweeps,
|
||||||
|
while active projects include a 3 week live deployment comparison
|
||||||
|
between <monospace>CatboosRegressor</monospace>,
|
||||||
|
<monospace>LightGBMRegressor</monospace>, and
|
||||||
|
<monospace>XGBoostRegressor</monospace>. Results from these studies
|
||||||
|
are on track for publication in scientific journals as well as more
|
||||||
|
general data science blogs (e.g. Medium).</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="installing-and-running-freqai">
|
||||||
|
<title>Installing and running <monospace>FreqAI</monospace></title>
|
||||||
|
<p><monospace>FreqAI</monospace> is automatically installed with
|
||||||
|
<monospace>Freqtrade</monospace> using the following commands on linux
|
||||||
|
systems:</p>
|
||||||
|
<preformat>git clone git@github.com:freqtrade/freqtrade.git
|
||||||
|
cd freqtrade
|
||||||
|
./setup.sh -i</preformat>
|
||||||
|
<p>However, <monospace>FreqAI</monospace> also benefits from
|
||||||
|
<monospace>Freqtrade</monospace> docker distributions, and can be run
|
||||||
|
with docker by pulling the stable or develop images from
|
||||||
|
<monospace>Freqtrade</monospace> distributions.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="funding-sources">
|
||||||
|
<title>Funding sources</title>
|
||||||
|
<p><ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
has had no official sponsors, and is entirely grass roots. All
|
||||||
|
donations into the project (e.g. the GitHub sponsor system) are kept
|
||||||
|
inside the project to help support development of open-sourced and
|
||||||
|
communally beneficial features.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="acknowledgements">
|
||||||
|
<title>Acknowledgements</title>
|
||||||
|
<p>We would like to acknowledge various beta testers of
|
||||||
|
<monospace>FreqAI</monospace>:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Richárd Józsa</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Juha Nykänen</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Salah Lamkadem</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>As well as various <monospace>Freqtrade</monospace>
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/freqtrade/freqtrade/graphs/contributors">developers</ext-link>
|
||||||
|
maintaining tangential, yet essential, modules.</p>
|
||||||
|
</sec>
|
||||||
|
</body>
|
||||||
|
<back>
|
||||||
|
<ref-list>
|
||||||
|
<ref id="ref-scikit-learn">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Pedregosa</surname><given-names>F.</given-names></name>
|
||||||
|
<name><surname>Varoquaux</surname><given-names>G.</given-names></name>
|
||||||
|
<name><surname>Gramfort</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>Michel</surname><given-names>V.</given-names></name>
|
||||||
|
<name><surname>Thirion</surname><given-names>B.</given-names></name>
|
||||||
|
<name><surname>Grisel</surname><given-names>O.</given-names></name>
|
||||||
|
<name><surname>Blondel</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Prettenhofer</surname><given-names>P.</given-names></name>
|
||||||
|
<name><surname>Weiss</surname><given-names>R.</given-names></name>
|
||||||
|
<name><surname>Dubourg</surname><given-names>V.</given-names></name>
|
||||||
|
<name><surname>Vanderplas</surname><given-names>J.</given-names></name>
|
||||||
|
<name><surname>Passos</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>D.</given-names></name>
|
||||||
|
<name><surname>Brucher</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Perrot</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Duchesnay</surname><given-names>E.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Scikit-learn: Machine learning in Python</article-title>
|
||||||
|
<source>Journal of Machine Learning Research</source>
|
||||||
|
<year iso-8601-date="2011">2011</year>
|
||||||
|
<volume>12</volume>
|
||||||
|
<fpage>2825</fpage>
|
||||||
|
<lpage>2830</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-catboost">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Prokhorenkova</surname><given-names>Liudmila</given-names></name>
|
||||||
|
<name><surname>Gusev</surname><given-names>Gleb</given-names></name>
|
||||||
|
<name><surname>Vorobev</surname><given-names>Aleksandr</given-names></name>
|
||||||
|
<name><surname>Dorogush</surname><given-names>Anna Veronika</given-names></name>
|
||||||
|
<name><surname>Gulin</surname><given-names>Andrey</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>CatBoost: Unbiased boosting with categorical features</article-title>
|
||||||
|
<source>Proceedings of the 32nd international conference on neural information processing systems</source>
|
||||||
|
<publisher-name>Curran Associates Inc.</publisher-name>
|
||||||
|
<publisher-loc>Red Hook, NY, USA</publisher-loc>
|
||||||
|
<year iso-8601-date="2018">2018</year>
|
||||||
|
<fpage>6639</fpage>
|
||||||
|
<lpage>6649</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-lightgbm">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Ke</surname><given-names>Guolin</given-names></name>
|
||||||
|
<name><surname>Meng</surname><given-names>Qi</given-names></name>
|
||||||
|
<name><surname>Finley</surname><given-names>Thomas</given-names></name>
|
||||||
|
<name><surname>Wang</surname><given-names>Taifeng</given-names></name>
|
||||||
|
<name><surname>Chen</surname><given-names>Wei</given-names></name>
|
||||||
|
<name><surname>Ma</surname><given-names>Weidong</given-names></name>
|
||||||
|
<name><surname>Ye</surname><given-names>Qiwei</given-names></name>
|
||||||
|
<name><surname>Liu</surname><given-names>Tie-Yan</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Lightgbm: A highly efficient gradient boosting decision tree</article-title>
|
||||||
|
<source>Advances in neural information processing systems</source>
|
||||||
|
<year iso-8601-date="2017">2017</year>
|
||||||
|
<volume>30</volume>
|
||||||
|
<fpage>3146</fpage>
|
||||||
|
<lpage>3154</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-xgboost">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Chen</surname><given-names>Tianqi</given-names></name>
|
||||||
|
<name><surname>Guestrin</surname><given-names>Carlos</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>XGBoost: A scalable tree boosting system</article-title>
|
||||||
|
<source>Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining</source>
|
||||||
|
<publisher-name>ACM</publisher-name>
|
||||||
|
<publisher-loc>New York, NY, USA</publisher-loc>
|
||||||
|
<year iso-8601-date="2016">2016</year>
|
||||||
|
<isbn>978-1-4503-4232-2</isbn>
|
||||||
|
<uri>http://doi.acm.org/10.1145/2939672.2939785</uri>
|
||||||
|
<pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id>
|
||||||
|
<fpage>785</fpage>
|
||||||
|
<lpage>794</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-stable-baselines3">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Raffin</surname><given-names>Antonin</given-names></name>
|
||||||
|
<name><surname>Hill</surname><given-names>Ashley</given-names></name>
|
||||||
|
<name><surname>Gleave</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Kanervisto</surname><given-names>Anssi</given-names></name>
|
||||||
|
<name><surname>Ernestus</surname><given-names>Maximilian</given-names></name>
|
||||||
|
<name><surname>Dormann</surname><given-names>Noah</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Stable-Baselines3: Reliable reinforcement learning implementations</article-title>
|
||||||
|
<source>Journal of Machine Learning Research</source>
|
||||||
|
<year iso-8601-date="2021">2021</year>
|
||||||
|
<volume>22</volume>
|
||||||
|
<issue>268</issue>
|
||||||
|
<uri>http://jmlr.org/papers/v22/20-1364.html</uri>
|
||||||
|
<fpage>1</fpage>
|
||||||
|
<lpage>8</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-openai">
|
||||||
|
<element-citation>
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Brockman</surname><given-names>Greg</given-names></name>
|
||||||
|
<name><surname>Cheung</surname><given-names>Vicki</given-names></name>
|
||||||
|
<name><surname>Pettersson</surname><given-names>Ludwig</given-names></name>
|
||||||
|
<name><surname>Schneider</surname><given-names>Jonas</given-names></name>
|
||||||
|
<name><surname>Schulman</surname><given-names>John</given-names></name>
|
||||||
|
<name><surname>Tang</surname><given-names>Jie</given-names></name>
|
||||||
|
<name><surname>Zaremba</surname><given-names>Wojciech</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>OpenAI gym</article-title>
|
||||||
|
<year iso-8601-date="2016">2016</year>
|
||||||
|
<uri>https://arxiv.org/abs/1606.01540</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-tensorflow">
|
||||||
|
<element-citation>
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Abadi</surname><given-names>Martín</given-names></name>
|
||||||
|
<name><surname>Agarwal</surname><given-names>Ashish</given-names></name>
|
||||||
|
<name><surname>Barham</surname><given-names>Paul</given-names></name>
|
||||||
|
<name><surname>Brevdo</surname><given-names>Eugene</given-names></name>
|
||||||
|
<name><surname>Chen</surname><given-names>Zhifeng</given-names></name>
|
||||||
|
<name><surname>Citro</surname><given-names>Craig</given-names></name>
|
||||||
|
<name><surname>Corrado</surname><given-names>Greg S.</given-names></name>
|
||||||
|
<name><surname>Davis</surname><given-names>Andy</given-names></name>
|
||||||
|
<name><surname>Dean</surname><given-names>Jeffrey</given-names></name>
|
||||||
|
<name><surname>Devin</surname><given-names>Matthieu</given-names></name>
|
||||||
|
<name><surname>Ghemawat</surname><given-names>Sanjay</given-names></name>
|
||||||
|
<name><surname>Goodfellow</surname><given-names>Ian</given-names></name>
|
||||||
|
<name><surname>Harp</surname><given-names>Andrew</given-names></name>
|
||||||
|
<name><surname>Irving</surname><given-names>Geoffrey</given-names></name>
|
||||||
|
<name><surname>Isard</surname><given-names>Michael</given-names></name>
|
||||||
|
<name><surname>Jia</surname><given-names>Yangqing</given-names></name>
|
||||||
|
<name><surname>Jozefowicz</surname><given-names>Rafal</given-names></name>
|
||||||
|
<name><surname>Kaiser</surname><given-names>Lukasz</given-names></name>
|
||||||
|
<name><surname>Kudlur</surname><given-names>Manjunath</given-names></name>
|
||||||
|
<name><surname>Levenberg</surname><given-names>Josh</given-names></name>
|
||||||
|
<name><surname>Mané</surname><given-names>Dandelion</given-names></name>
|
||||||
|
<name><surname>Monga</surname><given-names>Rajat</given-names></name>
|
||||||
|
<name><surname>Moore</surname><given-names>Sherry</given-names></name>
|
||||||
|
<name><surname>Murray</surname><given-names>Derek</given-names></name>
|
||||||
|
<name><surname>Olah</surname><given-names>Chris</given-names></name>
|
||||||
|
<name><surname>Schuster</surname><given-names>Mike</given-names></name>
|
||||||
|
<name><surname>Shlens</surname><given-names>Jonathon</given-names></name>
|
||||||
|
<name><surname>Steiner</surname><given-names>Benoit</given-names></name>
|
||||||
|
<name><surname>Sutskever</surname><given-names>Ilya</given-names></name>
|
||||||
|
<name><surname>Talwar</surname><given-names>Kunal</given-names></name>
|
||||||
|
<name><surname>Tucker</surname><given-names>Paul</given-names></name>
|
||||||
|
<name><surname>Vanhoucke</surname><given-names>Vincent</given-names></name>
|
||||||
|
<name><surname>Vasudevan</surname><given-names>Vijay</given-names></name>
|
||||||
|
<name><surname>Viégas</surname><given-names>Fernanda</given-names></name>
|
||||||
|
<name><surname>Vinyals</surname><given-names>Oriol</given-names></name>
|
||||||
|
<name><surname>Warden</surname><given-names>Pete</given-names></name>
|
||||||
|
<name><surname>Wattenberg</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Wicke</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Yu</surname><given-names>Yuan</given-names></name>
|
||||||
|
<name><surname>Zheng</surname><given-names>Xiaoqiang</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>TensorFlow: Large-scale machine learning on heterogeneous systems</article-title>
|
||||||
|
<year iso-8601-date="2015">2015</year>
|
||||||
|
<uri>https://www.tensorflow.org/</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-pytorch">
|
||||||
|
<element-citation publication-type="chapter">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Paszke</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Gross</surname><given-names>Sam</given-names></name>
|
||||||
|
<name><surname>Massa</surname><given-names>Francisco</given-names></name>
|
||||||
|
<name><surname>Lerer</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Bradbury</surname><given-names>James</given-names></name>
|
||||||
|
<name><surname>Chanan</surname><given-names>Gregory</given-names></name>
|
||||||
|
<name><surname>Killeen</surname><given-names>Trevor</given-names></name>
|
||||||
|
<name><surname>Lin</surname><given-names>Zeming</given-names></name>
|
||||||
|
<name><surname>Gimelshein</surname><given-names>Natalia</given-names></name>
|
||||||
|
<name><surname>Antiga</surname><given-names>Luca</given-names></name>
|
||||||
|
<name><surname>Desmaison</surname><given-names>Alban</given-names></name>
|
||||||
|
<name><surname>Kopf</surname><given-names>Andreas</given-names></name>
|
||||||
|
<name><surname>Yang</surname><given-names>Edward</given-names></name>
|
||||||
|
<name><surname>DeVito</surname><given-names>Zachary</given-names></name>
|
||||||
|
<name><surname>Raison</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Tejani</surname><given-names>Alykhan</given-names></name>
|
||||||
|
<name><surname>Chilamkurthy</surname><given-names>Sasank</given-names></name>
|
||||||
|
<name><surname>Steiner</surname><given-names>Benoit</given-names></name>
|
||||||
|
<name><surname>Fang</surname><given-names>Lu</given-names></name>
|
||||||
|
<name><surname>Bai</surname><given-names>Junjie</given-names></name>
|
||||||
|
<name><surname>Chintala</surname><given-names>Soumith</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>PyTorch: An imperative style, high-performance deep learning library</article-title>
|
||||||
|
<source>Advances in neural information processing systems 32</source>
|
||||||
|
<person-group person-group-type="editor">
|
||||||
|
<name><surname>Wallach</surname><given-names>H.</given-names></name>
|
||||||
|
<name><surname>Larochelle</surname><given-names>H.</given-names></name>
|
||||||
|
<name><surname>Beygelzimer</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>dAlché-Buc</surname><given-names>F.</given-names></name>
|
||||||
|
<name><surname>Fox</surname><given-names>E.</given-names></name>
|
||||||
|
<name><surname>Garnett</surname><given-names>R.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<publisher-name>Curran Associates, Inc.</publisher-name>
|
||||||
|
<year iso-8601-date="2019">2019</year>
|
||||||
|
<uri>http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf</uri>
|
||||||
|
<fpage>8024</fpage>
|
||||||
|
<lpage>8035</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-scipy">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Virtanen</surname><given-names>Pauli</given-names></name>
|
||||||
|
<name><surname>Gommers</surname><given-names>Ralf</given-names></name>
|
||||||
|
<name><surname>Oliphant</surname><given-names>Travis E.</given-names></name>
|
||||||
|
<name><surname>Haberland</surname><given-names>Matt</given-names></name>
|
||||||
|
<name><surname>Reddy</surname><given-names>Tyler</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>David</given-names></name>
|
||||||
|
<name><surname>Burovski</surname><given-names>Evgeni</given-names></name>
|
||||||
|
<name><surname>Peterson</surname><given-names>Pearu</given-names></name>
|
||||||
|
<name><surname>Weckesser</surname><given-names>Warren</given-names></name>
|
||||||
|
<name><surname>Bright</surname><given-names>Jonathan</given-names></name>
|
||||||
|
<name><surname>van der Walt</surname><given-names>Stéfan J.</given-names></name>
|
||||||
|
<name><surname>Brett</surname><given-names>Matthew</given-names></name>
|
||||||
|
<name><surname>Wilson</surname><given-names>Joshua</given-names></name>
|
||||||
|
<name><surname>Millman</surname><given-names>K. Jarrod</given-names></name>
|
||||||
|
<name><surname>Mayorov</surname><given-names>Nikolay</given-names></name>
|
||||||
|
<name><surname>Nelson</surname><given-names>Andrew R. J.</given-names></name>
|
||||||
|
<name><surname>Jones</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Kern</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Larson</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Carey</surname><given-names>C J</given-names></name>
|
||||||
|
<name><surname>Polat</surname><given-names>İlhan</given-names></name>
|
||||||
|
<name><surname>Feng</surname><given-names>Yu</given-names></name>
|
||||||
|
<name><surname>Moore</surname><given-names>Eric W.</given-names></name>
|
||||||
|
<name><surname>VanderPlas</surname><given-names>Jake</given-names></name>
|
||||||
|
<name><surname>Laxalde</surname><given-names>Denis</given-names></name>
|
||||||
|
<name><surname>Perktold</surname><given-names>Josef</given-names></name>
|
||||||
|
<name><surname>Cimrman</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Henriksen</surname><given-names>Ian</given-names></name>
|
||||||
|
<name><surname>Quintero</surname><given-names>E. A.</given-names></name>
|
||||||
|
<name><surname>Harris</surname><given-names>Charles R.</given-names></name>
|
||||||
|
<name><surname>Archibald</surname><given-names>Anne M.</given-names></name>
|
||||||
|
<name><surname>Ribeiro</surname><given-names>Antônio H.</given-names></name>
|
||||||
|
<name><surname>Pedregosa</surname><given-names>Fabian</given-names></name>
|
||||||
|
<name><surname>van Mulbregt</surname><given-names>Paul</given-names></name>
|
||||||
|
<string-name>SciPy 1.0 Contributors</string-name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>SciPy 1.0: Fundamental Algorithms for Scientific Computing in Python</article-title>
|
||||||
|
<source>Nature Methods</source>
|
||||||
|
<year iso-8601-date="2020">2020</year>
|
||||||
|
<volume>17</volume>
|
||||||
|
<pub-id pub-id-type="doi">10.1038/s41592-019-0686-2</pub-id>
|
||||||
|
<fpage>261</fpage>
|
||||||
|
<lpage>272</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-numpy">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Harris</surname><given-names>Charles R.</given-names></name>
|
||||||
|
<name><surname>Millman</surname><given-names>K. Jarrod</given-names></name>
|
||||||
|
<name><surname>Walt</surname><given-names>Stéfan J. van der</given-names></name>
|
||||||
|
<name><surname>Gommers</surname><given-names>Ralf</given-names></name>
|
||||||
|
<name><surname>Virtanen</surname><given-names>Pauli</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>David</given-names></name>
|
||||||
|
<name><surname>Wieser</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Taylor</surname><given-names>Julian</given-names></name>
|
||||||
|
<name><surname>Berg</surname><given-names>Sebastian</given-names></name>
|
||||||
|
<name><surname>Smith</surname><given-names>Nathaniel J.</given-names></name>
|
||||||
|
<name><surname>Kern</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Picus</surname><given-names>Matti</given-names></name>
|
||||||
|
<name><surname>Hoyer</surname><given-names>Stephan</given-names></name>
|
||||||
|
<name><surname>Kerkwijk</surname><given-names>Marten H. van</given-names></name>
|
||||||
|
<name><surname>Brett</surname><given-names>Matthew</given-names></name>
|
||||||
|
<name><surname>Haldane</surname><given-names>Allan</given-names></name>
|
||||||
|
<name><surname>Río</surname><given-names>Jaime Fernández del</given-names></name>
|
||||||
|
<name><surname>Wiebe</surname><given-names>Mark</given-names></name>
|
||||||
|
<name><surname>Peterson</surname><given-names>Pearu</given-names></name>
|
||||||
|
<name><surname>Gérard-Marchant</surname><given-names>Pierre</given-names></name>
|
||||||
|
<name><surname>Sheppard</surname><given-names>Kevin</given-names></name>
|
||||||
|
<name><surname>Reddy</surname><given-names>Tyler</given-names></name>
|
||||||
|
<name><surname>Weckesser</surname><given-names>Warren</given-names></name>
|
||||||
|
<name><surname>Abbasi</surname><given-names>Hameer</given-names></name>
|
||||||
|
<name><surname>Gohlke</surname><given-names>Christoph</given-names></name>
|
||||||
|
<name><surname>Oliphant</surname><given-names>Travis E.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Array programming with NumPy</article-title>
|
||||||
|
<source>Nature</source>
|
||||||
|
<publisher-name>Springer Science; Business Media LLC</publisher-name>
|
||||||
|
<year iso-8601-date="2020-09">2020</year><month>09</month>
|
||||||
|
<volume>585</volume>
|
||||||
|
<issue>7825</issue>
|
||||||
|
<uri>https://doi.org/10.1038/s41586-020-2649-2</uri>
|
||||||
|
<pub-id pub-id-type="doi">10.1038/s41586-020-2649-2</pub-id>
|
||||||
|
<fpage>357</fpage>
|
||||||
|
<lpage>362</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-pandas">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>McKinney</surname><given-names>Wes</given-names></name>
|
||||||
|
<name><surname>others</surname></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Data structures for statistical computing in python</article-title>
|
||||||
|
<source>Proceedings of the 9th python in science conference</source>
|
||||||
|
<publisher-name>Austin, TX</publisher-name>
|
||||||
|
<year iso-8601-date="2010">2010</year>
|
||||||
|
<volume>445</volume>
|
||||||
|
<fpage>51</fpage>
|
||||||
|
<lpage>56</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-finrl">
|
||||||
|
<element-citation publication-type="webpage">
|
||||||
|
<article-title>AI4Finance-foundation</article-title>
|
||||||
|
<year iso-8601-date="2022">2022</year>
|
||||||
|
<date-in-citation content-type="access-date"><year iso-8601-date="2022-09-30">2022</year><month>09</month><day>30</day></date-in-citation>
|
||||||
|
<uri>https://github.com/AI4Finance-Foundation/FinRL</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-tensortrade">
|
||||||
|
<element-citation publication-type="webpage">
|
||||||
|
<article-title>Tensortrade</article-title>
|
||||||
|
<year iso-8601-date="2022">2022</year>
|
||||||
|
<date-in-citation content-type="access-date"><year iso-8601-date="2022-09-30">2022</year><month>09</month><day>30</day></date-in-citation>
|
||||||
|
<uri>https://tensortradex.readthedocs.io/en/latest/L</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
</ref-list>
|
||||||
|
</back>
|
||||||
|
</article>
|
212
docs/JOSS_paper/paper.md
Normal file
212
docs/JOSS_paper/paper.md
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
---
|
||||||
|
title: '`FreqAI`: generalizing adaptive modeling for chaotic time-series market forecasts'
|
||||||
|
tags:
|
||||||
|
- Python
|
||||||
|
- Machine Learning
|
||||||
|
- adaptive modeling
|
||||||
|
- chaotic systems
|
||||||
|
- time-series forecasting
|
||||||
|
authors:
|
||||||
|
- name: Robert A. Caulk
|
||||||
|
orcid: 0000-0001-5618-8629
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Elin Törnquist
|
||||||
|
orcid: 0000-0003-3289-8604
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Matthias Voppichler
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Andrew R. Lawless
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Ryan McMullan
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Wagner Costa Santos
|
||||||
|
orcid:
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Timothy C. Pogue
|
||||||
|
orcid:
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Johan van der Vlugt
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Stefan P. Gehring
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Pascal Schmidt
|
||||||
|
orcid: 0000-0001-9328-4345
|
||||||
|
affiliation: 2
|
||||||
|
|
||||||
|
<!-- affiliation: "1, 2" # (Multiple affiliations must be quoted) -->
|
||||||
|
affiliations:
|
||||||
|
- name: Emergent Methods LLC, Arvada Colorado, 80005, USA
|
||||||
|
index: 1
|
||||||
|
- name: Freqtrade open source project
|
||||||
|
index: 2
|
||||||
|
date: October 2022
|
||||||
|
bibliography: paper.bib
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Statement of need
|
||||||
|
|
||||||
|
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`), has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citizen scientists" to use their basic Python skills for data-exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data-collection, storage, and handling presents a disparate challenge. [`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
||||||
|
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
[`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) evolved from a desire to test and compare a range of adaptive time-series forecasting methods on chaotic data. Cryptocurrency markets provide a unique data source since they are operational 24/7 and the data is freely available via a variety of open-sourced [exchange APIs](https://docs.ccxt.com/en/latest/manual.html#exchange-structure). Luckily, an existing open-source software, [`Freqtrade`](https://www.freqtrade.io/en/stable/), had already matured under a range of talented developers to support robust data collection/storage, as well as robust live environmental interactions for standard algorithmic trading. `Freqtrade` also provides a set of data analysis/visualization tools for the evaluation of historical performance as well as live environmental feedback. `FreqAI` builds on top of `Freqtrade` to include a user-friendly well tested interface for integrating external machine learning libraries for adaptive time-series forecasting. Beyond enabling the integration of existing libraries, `FreqAI` hosts a range of custom algorithms and methodologies aimed at improving computational and predictive performances. Thus, `FreqAI` contains a range of unique features which can be easily tested in combination with all the existing Python-accessible machine learning libraries to generate novel research on live and historical data.
|
||||||
|
|
||||||
|
The high-level overview of the software is depicted in Figure 1.
|
||||||
|
|
||||||
|

|
||||||
|
*Abstracted overview of FreqAI algorithm*
|
||||||
|
|
||||||
|
## Connecting machine learning libraries
|
||||||
|
|
||||||
|
Although the `FreqAI` framework is designed to accommodate any Python library in the "Model training" and "Feature set engineering" portions of the software (Figure 1), it already boasts a wide range of well documented examples based on various combinations of:
|
||||||
|
|
||||||
|
* scikit-learn [@scikit-learn], Catboost [@catboost], LightGBM [@lightgbm], XGBoost [@xgboost], stable_baselines3 [@stable-baselines3], openai gym [@openai], tensorflow [@tensorflow], pytorch [@pytorch], Scipy [@scipy], Numpy [@numpy], and pandas [@pandas].
|
||||||
|
|
||||||
|
These mature projects contain a wide range of peer-reviewed and industry standard methods, including:
|
||||||
|
|
||||||
|
* Regression, Classification, Neural Networks, Reinforcement Learning, Support Vector Machines, Principal Component Analysis, point clustering, and much more.
|
||||||
|
|
||||||
|
which are all leveraged in `FreqAI` for users to use as templates or extend with their own methods.
|
||||||
|
|
||||||
|
## Furnishing novel methods and features
|
||||||
|
|
||||||
|
Beyond the industry standard methods available through external libraries - `FreqAI` includes novel methods which are not available anywhere else in the open-source (or scientific) world. For example, `FreqAI` provides :
|
||||||
|
|
||||||
|
* a custom algorithm/methodology for adaptive modeling details [here](https://www.freqtrade.io/en/stable/freqai/#general-approach) and [here](https://www.freqtrade.io/en/stable/freqai-developers/#project-architecture)
|
||||||
|
* rapid and self-monitored feature engineering tools, details [here](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#feature-engineering)
|
||||||
|
* unique model features/indicators, such as the [inlier metric](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#inlier-metric)
|
||||||
|
* optimized data collection/storage algorithms, all code shown [here](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/freqai/data_drawer.py)
|
||||||
|
* safely integrated outlier detection methods, details [here](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#outlier-detection)
|
||||||
|
* websocket communicated forecasts, details [here](https://www.freqtrade.io/en/stable/producer-consumer/)
|
||||||
|
|
||||||
|
Of particular interest for researchers, `FreqAI` provides the option of large scale experimentation via an optimized [websocket communications interface](https://www.freqtrade.io/en/stable/producer-consumer/).
|
||||||
|
|
||||||
|
## Optimizing the back-end
|
||||||
|
|
||||||
|
`FreqAI` aims to make it simple for users to combine all the above tools to run studies based in two distinct modules:
|
||||||
|
|
||||||
|
* backtesting studies
|
||||||
|
* live-deployments
|
||||||
|
|
||||||
|
Both of these modules and their respective data management systems are built on top of [`Freqtrade`](https://www.freqtrade.io/en/latest/), a mature and actively developed cryptocurrency trading software. This means that `FreqAI` benefits from a wide range of tangential/disparate feature developments such as:
|
||||||
|
|
||||||
|
* FreqUI, a graphical interface for backtesting and live monitoring
|
||||||
|
* telegram control
|
||||||
|
* robust database handling
|
||||||
|
* futures/leverage trading
|
||||||
|
* dollar cost averaging
|
||||||
|
* trading strategy handling
|
||||||
|
* a variety of free data sources via [CCXT](https://docs.ccxt.com/en/latest/manual.html#exchange-structure) (FTX, Binance, Kucoin etc.)
|
||||||
|
|
||||||
|
These features derive from a strong external developer community that shares in the benefit and stability of a communal CI (Continuous Integration) system. Beyond the developer community, `FreqAI` benefits strongly from the userbase of `Freqtrade`, where most `FreqAI` beta-testers/developers originated. This symbiotic relationship between `Freqtrade` and `FreqAI` ignited a thoroughly tested [`beta`](https://github.com/freqtrade/freqtrade/pull/6832), which demanded a four month beta and [comprehensive documentation](https://www.freqtrade.io/en/latest/freqai/) containing:
|
||||||
|
|
||||||
|
* numerous example scripts
|
||||||
|
* a full parameter table
|
||||||
|
* methodological descriptions
|
||||||
|
* high-resolution diagrams/figures
|
||||||
|
* detailed parameter setting recommendations
|
||||||
|
|
||||||
|
## Providing a reproducible foundation for researchers
|
||||||
|
|
||||||
|
`FreqAI` provides an extensible, robust, framework for researchers and citizen data scientists. The `FreqAI` sandbox enables rapid conception and testing of exotic hypotheses. From a research perspective, `FreqAI` handles the multitude of logistics associated with live deployments, historical backtesting, and feature engineering. With `FreqAI`, researchers can focus on their primary interests of feature engineering and hypothesis testing rather than figuring out how to collect and handle data. Further - the well maintained and easily installed open-source framework of `FreqAI` enables reproducible scientific studies. This reproducibility component is essential to general scientific advancement in time-series forecasting for chaotic systems.
|
||||||
|
|
||||||
|
# Technical details
|
||||||
|
|
||||||
|
Typical users configure `FreqAI` via two files:
|
||||||
|
|
||||||
|
1. A `configuration` file (`--config`) which provides access to the full parameter list available [here](https://www.freqtrade.io/en/latest/freqai/):
|
||||||
|
* control high-level feature engineering
|
||||||
|
* customize adaptive modeling techniques
|
||||||
|
* set any model training parameters available in third-party libraries
|
||||||
|
* manage adaptive modeling parameters (retrain frequency, training window size, continual learning, etc.)
|
||||||
|
|
||||||
|
2. A strategy file (`--strategy`) where users:
|
||||||
|
* list of the base training features
|
||||||
|
* set standard technical-analysis strategies
|
||||||
|
* control trade entry/exit criteria
|
||||||
|
|
||||||
|
With these two files, most users can exploit a wide range of pre-existing integrations in `Catboost` and 7 other libraries with a simple command:
|
||||||
|
|
||||||
|
```
|
||||||
|
freqtrade trade --config config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel CatboostRegressor
|
||||||
|
```
|
||||||
|
|
||||||
|
Advanced users will edit one of the existing `--freqaimodel` files, which are simply an children of the `IFreqaiModel` (details below). Within these files, advanced users can customize training procedures, prediction procedures, outlier detection methods, data preparation, data saving methods, etc. This is all configured in a way where they can customize as little or as much as they want. This flexible customization is owed to the foundational architecture in `FreqAI`, which is comprised of three distinct Python objects:
|
||||||
|
|
||||||
|
* `IFreqaiModel`
|
||||||
|
* A singular long-lived object containing all the necessary logic to collect data, store data, process data, engineer features, run training, and inference models.
|
||||||
|
* `FreqaiDataKitchen`
|
||||||
|
* A short-lived object which is uniquely created for each asset/model. Beyond metadata, it also contains a variety of data processing tools.
|
||||||
|
* `FreqaiDataDrawer`
|
||||||
|
* Singular long-lived object containing all the historical predictions, models, and save/load methods.
|
||||||
|
|
||||||
|
These objects interact with one another with one goal in mind - to provide a clean data set to machine learning experts/enthusiasts at the user endpoint. These power-users interact with an inherited `IFreqaiModel` that allows them to dig as deep or as shallow as they wish into the inheritence tree. Typical power-users focus their efforts on customizing training procedures and testing exotic functionalities available in third-party libraries. Thus, power-users are freed from the algorithmic weight associated with data management, and can instead focus their energy on testing creative hypotheses. Meanwhile, some users choose to override deeper functionalities within `IFreqaiModel` to help them craft unique data structures and training procedures.
|
||||||
|
|
||||||
|
The class structure and algorithmic details are depicted in the following diagram:
|
||||||
|
|
||||||
|

|
||||||
|
*Class diagram summarizing object interactions in FreqAI*
|
||||||
|
|
||||||
|
# Online documentation
|
||||||
|
|
||||||
|
The documentation for [`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) is available online at [https://www.freqtrade.io/en/latest/freqai/](https://www.freqtrade.io/en/latest/freqai/) and covers a wide range of materials:
|
||||||
|
|
||||||
|
* Quick-start with a single command and example files - (beginners)
|
||||||
|
* Introduction to the feature engineering interface and basic configurations - (intermediate users)
|
||||||
|
* Parameter table with indepth descriptions and default parameter setting recommendations - (intermediate users)
|
||||||
|
* Data analysis and post-processing - (advanced users)
|
||||||
|
* Methodological considerations complemented by high resolution figures - (advanced users)
|
||||||
|
* Instructions for integrating third party machine learning libraries into custom prediction models - (advanced users)
|
||||||
|
* Software architectural description with class diagram - (developers)
|
||||||
|
* File structure descriptions - (developers)
|
||||||
|
|
||||||
|
The docs direct users to a variety of pre-made examples which integrate `Catboost`, `LightGBM`, `XGBoost`, `Sklearn`, `stable_baselines3`, `torch`, `tensorflow`. Meanwhile, developers will also find thorough docstrings and type hinting throughout the source code to aid in code readability and customization.
|
||||||
|
|
||||||
|
`FreqAI` also benefits from a strong support network of users and developers on the [`Freqtrade` discord](https://discord.gg/w6nDM6cM4y) as well as on the [`FreqAI` discord](https://discord.gg/xE4RMg4QYw). Within the `FreqAI` discord, users will find a deep and easily searched knowledge base containing common errors. But more importantly, users in the `FreqAI` discord share anectdotal and quantitative observations which compare performance between various third-party libraries and methods.
|
||||||
|
|
||||||
|
# State of the field
|
||||||
|
|
||||||
|
There are two other open-source tools which are geared toward helping users build models for time-series forecasts on market based data. However, each of these tools suffer from a non-generalized frameworks that do not permit comparison of methods and libraries. Additionally, they do not permit easy live-deployments or adaptive-modeling methods. For example, two open-sourced projects called [`tensortrade`](https://tensortradex.readthedocs.io/en/latest/) [@tensortrade] and [`FinRL`](https://github.com/AI4Finance-Foundation/FinRL) [@finrl] limit users to the exploration of reinforcement learning on historical data. These softwares also do not provide robust live deployments, they do not furnish novel feature engineering algorithms, and they do not provide custom data analysis tools. `FreqAI` fills the gap.
|
||||||
|
|
||||||
|
# On-going research
|
||||||
|
|
||||||
|
Emergent Methods, based in Arvada CO, is actively using `FreqAI` to perform large scale experiments aimed at comparing machine learning libraries in live and historical environments. Past projects include backtesting parametric sweeps, while active projects include a 3 week live deployment comparison between `CatboostRegressor`, `LightGBMRegressor`, and `XGBoostRegressor`. Results from these studies are planned for submission to scientific journals as well as more general data science blogs (e.g. Medium).
|
||||||
|
|
||||||
|
# Installing and running `FreqAI`
|
||||||
|
|
||||||
|
`FreqAI` is automatically installed with `Freqtrade` using the following commands on linux systems:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone git@github.com:freqtrade/freqtrade.git
|
||||||
|
cd freqtrade
|
||||||
|
./setup.sh -i
|
||||||
|
```
|
||||||
|
|
||||||
|
However, `FreqAI` also benefits from `Freqtrade` docker distributions, and can be run with docker by pulling the stable or develop images from `Freqtrade` distributions.
|
||||||
|
|
||||||
|
# Funding sources
|
||||||
|
|
||||||
|
[`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) has had no official sponsors, and is entirely grass roots. All donations into the project (e.g. the GitHub sponsor system) are kept inside the project to help support development of open-sourced and communally beneficial features.
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
|
||||||
|
We would like to acknowledge various beta testers of `FreqAI`:
|
||||||
|
|
||||||
|
- Longlong Yu (lolongcovas)
|
||||||
|
- Richárd Józsa (richardjozsa)
|
||||||
|
- Juha Nykänen (suikula)
|
||||||
|
- Emre Suzen (aemr3)
|
||||||
|
- Salah Lamkadem (ikonx)
|
||||||
|
|
||||||
|
As well as various `Freqtrade` [developers](https://github.com/freqtrade/freqtrade/graphs/contributors) maintaining tangential, yet essential, modules.
|
||||||
|
|
||||||
|
# References
|
BIN
docs/JOSS_paper/paper.pdf
Normal file
BIN
docs/JOSS_paper/paper.pdf
Normal file
Binary file not shown.
BIN
docs/assets/binance_futures_settings.png
Normal file
BIN
docs/assets/binance_futures_settings.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 80 KiB |
BIN
docs/assets/tensorboard.jpg
Normal file
BIN
docs/assets/tensorboard.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 362 KiB |
@@ -60,11 +60,18 @@ Binance supports [time_in_force](configuration.md#understand-order_time_in_force
|
|||||||
Binance supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
Binance supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||||
On futures, Binance supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
On futures, Binance supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
||||||
|
|
||||||
### Binance Blacklist
|
### Binance Blacklist recommendation
|
||||||
|
|
||||||
For Binance, it is suggested to add `"BNB/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `BNB` on the account or unless you're willing to disable using `BNB` for fees.
|
For Binance, it is suggested to add `"BNB/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `BNB` on the account or unless you're willing to disable using `BNB` for fees.
|
||||||
Binance accounts may use `BNB` for fees, and if a trade happens to be on `BNB`, further trades may consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
Binance accounts may use `BNB` for fees, and if a trade happens to be on `BNB`, further trades may consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
||||||
|
|
||||||
|
### Binance sites
|
||||||
|
|
||||||
|
Binance has been split into 2, and users must use the correct ccxt exchange ID for their exchange, otherwise API keys are not recognized.
|
||||||
|
|
||||||
|
* [binance.com](https://www.binance.com/) - International users. Use exchange id: `binance`.
|
||||||
|
* [binance.us](https://www.binance.us/) - US based users. Use exchange id: `binanceus`.
|
||||||
|
|
||||||
### Binance Futures
|
### Binance Futures
|
||||||
|
|
||||||
Binance has specific (unfortunately complex) [Futures Trading Quantitative Rules](https://www.binance.com/en/support/faq/4f462ebe6ff445d4a170be7d9e897272) which need to be followed, and which prohibit a too low stake-amount (among others) for too many orders.
|
Binance has specific (unfortunately complex) [Futures Trading Quantitative Rules](https://www.binance.com/en/support/faq/4f462ebe6ff445d4a170be7d9e897272) which need to be followed, and which prohibit a too low stake-amount (among others) for too many orders.
|
||||||
@@ -87,12 +94,14 @@ When trading on Binance Futures market, orderbook must be used because there is
|
|||||||
},
|
},
|
||||||
```
|
```
|
||||||
|
|
||||||
### Binance sites
|
#### Binance futures settings
|
||||||
|
|
||||||
Binance has been split into 2, and users must use the correct ccxt exchange ID for their exchange, otherwise API keys are not recognized.
|
Users will also have to have the futures-setting "Position Mode" set to "One-way Mode", and "Asset Mode" set to "Single-Asset Mode".
|
||||||
|
These settings will be checked on startup, and freqtrade will show an error if this setting is wrong.
|
||||||
|
|
||||||
* [binance.com](https://www.binance.com/) - International users. Use exchange id: `binance`.
|

|
||||||
* [binance.us](https://www.binance.us/) - US based users. Use exchange id: `binanceus`.
|
|
||||||
|
Freqtrade will not attempt to change these settings.
|
||||||
|
|
||||||
## Kraken
|
## Kraken
|
||||||
|
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
`FreqAI` is configured through the typical [Freqtrade config file](configuration.md) and the standard [Freqtrade strategy](strategy-customization.md). Examples of `FreqAI` config and strategy files can be found in `config_examples/config_freqai.example.json` and `freqtrade/templates/FreqaiExampleStrategy.py`, respectively.
|
FreqAI is configured through the typical [Freqtrade config file](configuration.md) and the standard [Freqtrade strategy](strategy-customization.md). Examples of FreqAI config and strategy files can be found in `config_examples/config_freqai.example.json` and `freqtrade/templates/FreqaiExampleStrategy.py`, respectively.
|
||||||
|
|
||||||
## Setting up the configuration file
|
## Setting up the configuration file
|
||||||
|
|
||||||
Although there are plenty of additional parameters to choose from, as highlighted in the [parameter table](freqai-parameter-table.md#parameter-table), a `FreqAI` config must at minimum include the following parameters (the parameter values are only examples):
|
Although there are plenty of additional parameters to choose from, as highlighted in the [parameter table](freqai-parameter-table.md#parameter-table), a FreqAI config must at minimum include the following parameters (the parameter values are only examples):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
@@ -35,9 +35,9 @@
|
|||||||
|
|
||||||
A full example config is available in `config_examples/config_freqai.example.json`.
|
A full example config is available in `config_examples/config_freqai.example.json`.
|
||||||
|
|
||||||
## Building a `FreqAI` strategy
|
## Building a FreqAI strategy
|
||||||
|
|
||||||
The `FreqAI` strategy requires including the following lines of code in the standard [Freqtrade strategy](strategy-customization.md):
|
The FreqAI strategy requires including the following lines of code in the standard [Freqtrade strategy](strategy-customization.md):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# user should define the maximum startup candle count (the largest number of candles
|
# user should define the maximum startup candle count (the largest number of candles
|
||||||
@@ -129,7 +129,7 @@ Notice also the location of the labels under `if set_generalized_indicators:` at
|
|||||||
The `self.freqai.start()` function cannot be called outside the `populate_indicators()`.
|
The `self.freqai.start()` function cannot be called outside the `populate_indicators()`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Features **must** be defined in `populate_any_indicators()`. Defining `FreqAI` features in `populate_indicators()`
|
Features **must** be defined in `populate_any_indicators()`. Defining FreqAI features in `populate_indicators()`
|
||||||
will cause the algorithm to fail in live/dry mode. In order to add generalized features that are not associated with a specific pair or timeframe, the following structure inside `populate_any_indicators()` should be used
|
will cause the algorithm to fail in live/dry mode. In order to add generalized features that are not associated with a specific pair or timeframe, the following structure inside `populate_any_indicators()` should be used
|
||||||
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`):
|
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`):
|
||||||
|
|
||||||
@@ -166,15 +166,15 @@ Below are the values you can expect to include/use inside a typical strategy dat
|
|||||||
|
|
||||||
| DataFrame Key | Description |
|
| DataFrame Key | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| `df['&*']` | Any dataframe column prepended with `&` in `populate_any_indicators()` is treated as a training target (label) inside `FreqAI` (typically following the naming convention `&-s*`). The names of these dataframe columns are fed back as the predictions. For example, to predict the price change in the next 40 candles (similar to `templates/FreqaiExampleStrategy.py`), you would set `df['&-s_close']`. `FreqAI` makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
| `df['&*']` | Any dataframe column prepended with `&` in `populate_any_indicators()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
||||||
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
||||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -1 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, `FreqAI` will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -1 and 2.
|
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||||
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence `FreqAI` has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
||||||
| `df['%*']` | Any dataframe column prepended with `%` in `populate_any_indicators()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features is easily engineered using the multiplictative functionality described in the `feature_parameters` table shown above), these features are removed from the dataframe upon return from `FreqAI`. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
| `df['%*']` | Any dataframe column prepended with `%` in `populate_any_indicators()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
||||||
|
|
||||||
## Setting the `startup_candle_count`
|
## Setting the `startup_candle_count`
|
||||||
|
|
||||||
The `startup_candle_count` in the `FreqAI` strategy needs to be set up in the same way as in the standard Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling the `dataprovider`, to avoid any NaNs at the beginning of the first training. You can easily set this value by identifying the longest period (in candle units) which is passed to the indicator creation functions (e.g., Ta-Lib functions). In the presented example, `startup_candle_count` is 20 since this is the maximum value in `indicators_periods_candles`.
|
The `startup_candle_count` in the FreqAI strategy needs to be set up in the same way as in the standard Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling the `dataprovider`, to avoid any NaNs at the beginning of the first training. You can easily set this value by identifying the longest period (in candle units) which is passed to the indicator creation functions (e.g., Ta-Lib functions). In the presented example, `startup_candle_count` is 20 since this is the maximum value in `indicators_periods_candles`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
There are instances where the Ta-Lib functions actually require more data than just the passed `period` or else the feature dataset gets populated with NaNs. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Hence, it is typically safest to multiply the expected `startup_candle_count` by 2. Look out for this log message to confirm that the data is clean:
|
There are instances where the Ta-Lib functions actually require more data than just the passed `period` or else the feature dataset gets populated with NaNs. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Hence, it is typically safest to multiply the expected `startup_candle_count` by 2. Look out for this log message to confirm that the data is clean:
|
||||||
@@ -185,7 +185,7 @@ The `startup_candle_count` in the `FreqAI` strategy needs to be set up in the sa
|
|||||||
|
|
||||||
## Creating a dynamic target threshold
|
## Creating a dynamic target threshold
|
||||||
|
|
||||||
Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. `FreqAI` allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out.
|
Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. FreqAI allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||||
@@ -200,15 +200,15 @@ To consider the population of *historical predictions* for creating the dynamic
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
If this value is set, `FreqAI` will initially use the predictions from the training data and subsequently begin introducing real prediction data as it is generated. `FreqAI` will save this historical data to be reloaded if you stop and restart a model with the same `identifier`.
|
If this value is set, FreqAI will initially use the predictions from the training data and subsequently begin introducing real prediction data as it is generated. FreqAI will save this historical data to be reloaded if you stop and restart a model with the same `identifier`.
|
||||||
|
|
||||||
## Using different prediction models
|
## Using different prediction models
|
||||||
|
|
||||||
`FreqAI` has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `Catboost`, `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`. However, it is possible to customize and create your own prediction models using the `IFreqaiModel` class. You are encouraged to inherit `fit()`, `train()`, and `predict()` to let these customize various aspects of the training procedures.
|
FreqAI has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `Catboost`, `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`. However, it is possible to customize and create your own prediction models using the `IFreqaiModel` class. You are encouraged to inherit `fit()`, `train()`, and `predict()` to let these customize various aspects of the training procedures.
|
||||||
|
|
||||||
### Setting classifier targets
|
### Setting classifier targets
|
||||||
|
|
||||||
`FreqAI` includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example:
|
FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||||
|
@@ -2,13 +2,13 @@
|
|||||||
|
|
||||||
## Project architecture
|
## Project architecture
|
||||||
|
|
||||||
The architecture and functions of `FreqAI` are generalized to encourages development of unique features, functions, models, etc.
|
The architecture and functions of FreqAI are generalized to encourages development of unique features, functions, models, etc.
|
||||||
|
|
||||||
The class structure and a detailed algorithmic overview is depicted in the following diagram:
|
The class structure and a detailed algorithmic overview is depicted in the following diagram:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
As shown, there are three distinct objects comprising `FreqAI`:
|
As shown, there are three distinct objects comprising FreqAI:
|
||||||
|
|
||||||
* **IFreqaiModel** - A singular persistent object containing all the necessary logic to collect, store, and process data, engineer features, run training, and inference models.
|
* **IFreqaiModel** - A singular persistent object containing all the necessary logic to collect, store, and process data, engineer features, run training, and inference models.
|
||||||
* **FreqaiDataKitchen** - A non-persistent object which is created uniquely for each unique asset/model. Beyond metadata, it also contains a variety of data processing tools.
|
* **FreqaiDataKitchen** - A non-persistent object which is created uniquely for each unique asset/model. Beyond metadata, it also contains a variety of data processing tools.
|
||||||
@@ -18,7 +18,7 @@ There are a variety of built-in [prediction models](freqai-configuration.md#usin
|
|||||||
|
|
||||||
## Data handling
|
## Data handling
|
||||||
|
|
||||||
`FreqAI` aims to organize model files, prediction data, and meta data in a way that simplifies post-processing and enhances crash resilience by automatic data reloading. The data is saved in a file structure,`user_data_dir/models/`, which contains all the data associated with the trainings and backtests. The `FreqaiDataKitchen()` relies heavily on the file structure for proper training and inferencing and should therefore not be manually modified.
|
FreqAI aims to organize model files, prediction data, and meta data in a way that simplifies post-processing and enhances crash resilience by automatic data reloading. The data is saved in a file structure,`user_data_dir/models/`, which contains all the data associated with the trainings and backtests. The `FreqaiDataKitchen()` relies heavily on the file structure for proper training and inferencing and should therefore not be manually modified.
|
||||||
|
|
||||||
### File structure
|
### File structure
|
||||||
|
|
||||||
@@ -27,13 +27,13 @@ The file structure is automatically generated based on the model `identifier` se
|
|||||||
| Structure | Description |
|
| Structure | Description |
|
||||||
|-----------|-------------|
|
|-----------|-------------|
|
||||||
| `config_*.json` | A copy of the model specific configuration file. |
|
| `config_*.json` | A copy of the model specific configuration file. |
|
||||||
| `historic_predictions.pkl` | A file containing all historic predictions generated during the lifetime of the `identifier` model during live deployment. `historic_predictions.pkl` is used to reload the model after a crash or a config change. A backup file is always held incase of corruption on the main file. **`FreqAI` automatically detects corruption and replaces the corrupted file with the backup**. |
|
| `historic_predictions.pkl` | A file containing all historic predictions generated during the lifetime of the `identifier` model during live deployment. `historic_predictions.pkl` is used to reload the model after a crash or a config change. A backup file is always held in case of corruption on the main file. FreqAI **automatically** detects corruption and replaces the corrupted file with the backup. |
|
||||||
| `pair_dictionary.json` | A file containing the training queue as well as the on disk location of the most recently trained model. |
|
| `pair_dictionary.json` | A file containing the training queue as well as the on disk location of the most recently trained model. |
|
||||||
| `sub-train-*_TIMESTAMP` | A folder containing all the files associated with a single model, such as: <br>
|
| `sub-train-*_TIMESTAMP` | A folder containing all the files associated with a single model, such as: <br>
|
||||||
|| `*_metadata.json` - Metadata for the model, such as normalization max/mins, expected training feature list, etc. <br>
|
|| `*_metadata.json` - Metadata for the model, such as normalization max/min, expected training feature list, etc. <br>
|
||||||
|| `*_model.*` - The model file saved to disk for reloading from a crash. Can be `joblib` (typical boosting libs), `zip` (stable_baselines), `hd5` (keras type), etc. <br>
|
|| `*_model.*` - The model file saved to disk for reloading from a crash. Can be `joblib` (typical boosting libs), `zip` (stable_baselines), `hd5` (keras type), etc. <br>
|
||||||
|| `*_pca_object.pkl` - The [Principal component analysis (PCA)](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis) transform (if `principal_component_analysis: true` is set in the config) which will be used to transform unseen prediction features. <br>
|
|| `*_pca_object.pkl` - The [Principal component analysis (PCA)](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis) transform (if `principal_component_analysis: True` is set in the config) which will be used to transform unseen prediction features. <br>
|
||||||
|| `*_svm_model.pkl` - The [Support Vector Machine (SVM)](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm) model which is used to detect outliers in unseen prediction features. <br>
|
|| `*_svm_model.pkl` - The [Support Vector Machine (SVM)](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm) model (if `use_SVM_to_remove_outliers: True` is set in the config) which is used to detect outliers in unseen prediction features. <br>
|
||||||
|| `*_trained_df.pkl` - The dataframe containing all the training features used to train the `identifier` model. This is used for computing the [Dissimilarity Index (DI)](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di) and can also be used for post-processing. <br>
|
|| `*_trained_df.pkl` - The dataframe containing all the training features used to train the `identifier` model. This is used for computing the [Dissimilarity Index (DI)](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di) and can also be used for post-processing. <br>
|
||||||
|| `*_trained_dates.df.pkl` - The dates associated with the `trained_df.pkl`, which is useful for post-processing. |
|
|| `*_trained_dates.df.pkl` - The dates associated with the `trained_df.pkl`, which is useful for post-processing. |
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
Low level feature engineering is performed in the user strategy within a function called `populate_any_indicators()`. That function sets the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. One important syntax rule is that all `base features` string names are prepended with `%`, while labels/targets are prepended with `&`.
|
Low level feature engineering is performed in the user strategy within a function called `populate_any_indicators()`. That function sets the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. One important syntax rule is that all `base features` string names are prepended with `%`, while labels/targets are prepended with `&`.
|
||||||
|
|
||||||
Meanwhile, high level feature engineering is handled within `"feature_parameters":{}` in the `FreqAI` config. Within this file, it is possible to decide large scale feature expansions on top of the `base_features` such as "including correlated pairs" or "including informative timeframes" or even "including recent candles."
|
Meanwhile, high level feature engineering is handled within `"feature_parameters":{}` in the FreqAI config. Within this file, it is possible to decide large scale feature expansions on top of the `base_features` such as "including correlated pairs" or "including informative timeframes" or even "including recent candles."
|
||||||
|
|
||||||
It is advisable to start from the template `populate_any_indicators()` in the source provided example strategy (found in `templates/FreqaiExampleStrategy.py`) to ensure that the feature definitions are following the correct conventions. Here is an example of how to set the indicators and labels in the strategy:
|
It is advisable to start from the template `populate_any_indicators()` in the source provided example strategy (found in `templates/FreqaiExampleStrategy.py`) to ensure that the feature definitions are following the correct conventions. Here is an example of how to set the indicators and labels in the strategy:
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ The `include_timeframes` in the config above are the timeframes (`tf`) of each c
|
|||||||
|
|
||||||
You can ask for each of the defined features to be included also for informative pairs using the `include_corr_pairlist`. This means that the feature set will include all the features from `populate_any_indicators` on all the `include_timeframes` for each of the correlated pairs defined in the config (`ETH/USD`, `LINK/USD`, and `BNB/USD` in the presented example).
|
You can ask for each of the defined features to be included also for informative pairs using the `include_corr_pairlist`. This means that the feature set will include all the features from `populate_any_indicators` on all the `include_timeframes` for each of the correlated pairs defined in the config (`ETH/USD`, `LINK/USD`, and `BNB/USD` in the presented example).
|
||||||
|
|
||||||
`include_shifted_candles` indicates the number of previous candles to include in the feature set. For example, `include_shifted_candles: 2` tells `FreqAI` to include the past 2 candles for each of the features in the feature set.
|
`include_shifted_candles` indicates the number of previous candles to include in the feature set. For example, `include_shifted_candles: 2` tells FreqAI to include the past 2 candles for each of the features in the feature set.
|
||||||
|
|
||||||
In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `populate_any_indicators()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
|
In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `populate_any_indicators()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
|
||||||
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
||||||
@@ -131,7 +131,7 @@ In total, the number of features the user of the presented example strat has cre
|
|||||||
|
|
||||||
Important metrics can be returned to the strategy at the end of each model training by assigning them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside the custom prediction model class.
|
Important metrics can be returned to the strategy at the end of each model training by assigning them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside the custom prediction model class.
|
||||||
|
|
||||||
`FreqAI` takes the `my_new_value` assigned in this dictionary and expands it to fit the dataframe that is returned to the strategy. You can then use the returned metrics in your strategy through `dataframe['my_new_value']`. An example of how return values can be used in `FreqAI` are the `&*_mean` and `&*_std` values that are used to [created a dynamic target threshold](freqai-configuration.md#creating-a-dynamic-target-threshold).
|
FreqAI takes the `my_new_value` assigned in this dictionary and expands it to fit the dataframe that is returned to the strategy. You can then use the returned metrics in your strategy through `dataframe['my_new_value']`. An example of how return values can be used in FreqAI are the `&*_mean` and `&*_std` values that are used to [created a dynamic target threshold](freqai-configuration.md#creating-a-dynamic-target-threshold).
|
||||||
|
|
||||||
Another example, where the user wants to use live metrics from the trade database, is shown below:
|
Another example, where the user wants to use live metrics from the trade database, is shown below:
|
||||||
|
|
||||||
@@ -141,15 +141,15 @@ Another example, where the user wants to use live metrics from the trade databas
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
You need to set the standard dictionary in the config so that `FreqAI` can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the preset values are what will be returned.
|
You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned.
|
||||||
|
|
||||||
## Feature normalization
|
## Feature normalization
|
||||||
|
|
||||||
`FreqAI` is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization:
|
FreqAI is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization:
|
||||||
|
|
||||||
$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$
|
$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$
|
||||||
|
|
||||||
All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. `FreqAI` stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify `FreqAI` internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions.
|
All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. FreqAI stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify FreqAI internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions.
|
||||||
|
|
||||||
## Data dimensionality reduction with Principal Component Analysis
|
## Data dimensionality reduction with Principal Component Analysis
|
||||||
|
|
||||||
@@ -169,17 +169,17 @@ This will perform PCA on the features and reduce their dimensionality so that th
|
|||||||
|
|
||||||
The `inlier_metric` is a metric aimed at quantifying how similar a the features of a data point are to the most recent historic data points.
|
The `inlier_metric` is a metric aimed at quantifying how similar a the features of a data point are to the most recent historic data points.
|
||||||
|
|
||||||
You define the lookback window by setting `inlier_metric_window` and `FreqAI` computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5.
|
You define the lookback window by setting `inlier_metric_window` and FreqAI computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
`FreqAI` adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information.
|
FreqAI adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information.
|
||||||
|
|
||||||
This function does **not** remove outliers from the data set.
|
This function does **not** remove outliers from the data set.
|
||||||
|
|
||||||
## Weighting features for temporal importance
|
## Weighting features for temporal importance
|
||||||
|
|
||||||
`FreqAI` allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
|
FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
|
||||||
|
|
||||||
$$ W_i = \exp(\frac{-i}{\alpha*n}) $$
|
$$ W_i = \exp(\frac{-i}{\alpha*n}) $$
|
||||||
|
|
||||||
@@ -189,13 +189,13 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B
|
|||||||
|
|
||||||
## Outlier detection
|
## Outlier detection
|
||||||
|
|
||||||
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. `FreqAI` implements a variety of methods to identify such outliers and hence mitigate risk.
|
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk.
|
||||||
|
|
||||||
### Identifying outliers with the Dissimilarity Index (DI)
|
### Identifying outliers with the Dissimilarity Index (DI)
|
||||||
|
|
||||||
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
|
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
|
||||||
|
|
||||||
You can tell `FreqAI` to remove outlier data points from the training/test data sets using the DI by including the following statement in the config:
|
You can tell FreqAI to remove outlier data points from the training/test data sets using the DI by including the following statement in the config:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
@@ -205,7 +205,7 @@ You can tell `FreqAI` to remove outlier data points from the training/test data
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, `FreqAI` measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
|
The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
|
||||||
|
|
||||||
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
||||||
|
|
||||||
@@ -229,7 +229,7 @@ Below is a figure that describes the DI for a 3D data set.
|
|||||||
|
|
||||||
### Identifying outliers using a Support Vector Machine (SVM)
|
### Identifying outliers using a Support Vector Machine (SVM)
|
||||||
|
|
||||||
You can tell `FreqAI` to remove outlier data points from the training/test data sets using a Support Vector Machine (SVM) by including the following statement in the config:
|
You can tell FreqAI to remove outlier data points from the training/test data sets using a Support Vector Machine (SVM) by including the following statement in the config:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
@@ -241,7 +241,7 @@ You can tell `FreqAI` to remove outlier data points from the training/test data
|
|||||||
|
|
||||||
The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
|
The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
|
||||||
|
|
||||||
`FreqAI` uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`.
|
FreqAI uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`.
|
||||||
|
|
||||||
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
|
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
|
||||||
|
|
||||||
@@ -249,7 +249,7 @@ The parameter `nu`, *very* broadly, is the amount of data points that should be
|
|||||||
|
|
||||||
### Identifying outliers with DBSCAN
|
### Identifying outliers with DBSCAN
|
||||||
|
|
||||||
You can configure `FreqAI` to use DBSCAN to cluster and remove outliers from the training/test data set or incoming outliers from predictions, by activating `use_DBSCAN_to_remove_outliers` in the config:
|
You can configure FreqAI to use DBSCAN to cluster and remove outliers from the training/test data set or incoming outliers from predictions, by activating `use_DBSCAN_to_remove_outliers` in the config:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
@@ -265,4 +265,4 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
`FreqAI` uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points (candles) in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
||||||
|
@@ -1,18 +1,18 @@
|
|||||||
# Parameter table
|
# Parameter table
|
||||||
|
|
||||||
The table below will list all configuration parameters available for `FreqAI`. Some of the parameters are exemplified in `config_examples/config_freqai.example.json`.
|
The table below will list all configuration parameters available for FreqAI. Some of the parameters are exemplified in `config_examples/config_freqai.example.json`.
|
||||||
|
|
||||||
Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways.
|
Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways.
|
||||||
|
|
||||||
| Parameter | Description |
|
| Parameter | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| | **General configuration parameters**
|
| | **General configuration parameters**
|
||||||
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling `FreqAI`. <br> **Datatype:** Dictionary.
|
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
||||||
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
||||||
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
||||||
| `identifier` | **Required.** <br> A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data. <br> **Datatype:** String.
|
| `identifier` | **Required.** <br> A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data. <br> **Datatype:** String.
|
||||||
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> **Datatype:** Float > 0. <br> Default: 0 (models retrain as often as possible).
|
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> **Datatype:** Float > 0. <br> Default: `0` (models retrain as often as possible).
|
||||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> **Datatype:** Positive integer. <br> Default: 0 (models never expire).
|
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> **Datatype:** Positive integer. <br> Default: `0` (models never expire).
|
||||||
| `purge_old_models` | Delete obsolete models. <br> **Datatype:** Boolean. <br> Default: `False` (all historic models remain on disk).
|
| `purge_old_models` | Delete obsolete models. <br> **Datatype:** Boolean. <br> Default: `False` (all historic models remain on disk).
|
||||||
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
||||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
||||||
@@ -21,32 +21,41 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||||||
| | **Feature parameters**
|
| | **Feature parameters**
|
||||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
||||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
||||||
| `include_corr_pairlist` | A list of correlated coins that `FreqAI` will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset. <br> **Datatype:** List of assets (strings).
|
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset. <br> **Datatype:** List of assets (strings).
|
||||||
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). You can create custom labels and choose whether to make use of this parameter or not. <br> **Datatype:** Positive integer.
|
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). You can create custom labels and choose whether to make use of this parameter or not. <br> **Datatype:** Positive integer.
|
||||||
| `include_shifted_candles` | Add features from previous candles to subsequent candles with the intent of adding historical information. If used, `FreqAI` will duplicate and shift all features from the `include_shifted_candles` previous candles so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
| `include_shifted_candles` | Add features from previous candles to subsequent candles with the intent of adding historical information. If used, FreqAI will duplicate and shift all features from the `include_shifted_candles` previous candles so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
||||||
| `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)). <br> **Datatype:** Positive float (typically < 1).
|
| `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. `FreqAI` uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN <br> **Datatype:** Positive integer.
|
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
|
||||||
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
|
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
|
||||||
| `stratify_training_data` | Split the feature set into training and testing datasets. For example, `stratify_training_data: 2` would set every 2nd data point into a separate dataset to be pulled from during training/testing. See details about how it works [here](freqai-running.md#data-stratification-for-training-and-testing-the-model). <br> **Datatype:** Positive integer.
|
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. defaults to `false`.
|
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features.<br> **Datatype:** Integer, defaults to `0`.
|
|
||||||
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
|
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using the DBSCAN algorithm to identify and remove outliers from training and prediction data. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
| `use_DBSCAN_to_remove_outliers` | Cluster data using the DBSCAN algorithm to identify and remove outliers from training and prediction data. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||||
| `inlier_metric_window` | If set, `FreqAI` adds an `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`, i.e., the number of previous time points to compare the current candle to. Details of how the `inlier_metric` is computed can be found [here](freqai-feature-engineering.md#inlier-metric). <br> **Datatype:** Integer. <br> Default: 0.
|
| `inlier_metric_window` | If set, FreqAI adds an `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`, i.e., the number of previous time points to compare the current candle to. Details of how the `inlier_metric` is computed can be found [here](freqai-feature-engineering.md#inlier-metric). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `noise_standard_deviation` | If set, `FreqAI` adds noise to the training features with the aim of preventing overfitting. `FreqAI` generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in `FreqAI` is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: 0.
|
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, `FreqAI` will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||||
| | **Data split parameters**
|
| | **Data split parameters**
|
||||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||||
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||||
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean.
|
| `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean. <br> Defaut: `False`.
|
||||||
| | **Model training parameters**
|
| | **Model training parameters**
|
||||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.
|
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.
|
||||||
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
| `n_estimators` | The number of boosted trees to fit in the training of the model. <br> **Datatype:** Integer.
|
||||||
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
| `learning_rate` | Boosting learning rate during training of the model. <br> **Datatype:** Float.
|
||||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||||
|
| | *Reinforcement Learning Parameters**
|
||||||
|
| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary.
|
||||||
|
| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer.
|
||||||
|
| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process. <br> **Datatype:** int.
|
||||||
|
| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()` <br> **Datatype:** int.
|
||||||
|
| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string.
|
||||||
|
| `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string.
|
||||||
|
| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training. <br> **Datatype:** float. <br> Default: 0.8
|
||||||
|
| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). <br> **Datatype:** int.
|
||||||
|
| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int.
|
||||||
| | **Extraneous parameters**
|
| | **Extraneous parameters**
|
||||||
| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: 2.
|
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
202
docs/freqai-reinforcement-learning.md
Normal file
202
docs/freqai-reinforcement-learning.md
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
# Reinforcement Learning
|
||||||
|
|
||||||
|
!!! Note
|
||||||
|
Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`.
|
||||||
|
|
||||||
|
Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def populate_any_indicators(
|
||||||
|
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||||
|
):
|
||||||
|
|
||||||
|
coin = pair.split('/')[0]
|
||||||
|
|
||||||
|
if informative is None:
|
||||||
|
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||||
|
|
||||||
|
# first loop is automatically duplicating indicators for time periods
|
||||||
|
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||||
|
|
||||||
|
t = int(t)
|
||||||
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||||
|
|
||||||
|
# The following features are necessary for RL models
|
||||||
|
informative[f"%-{coin}raw_close"] = informative["close"]
|
||||||
|
informative[f"%-{coin}raw_open"] = informative["open"]
|
||||||
|
informative[f"%-{coin}raw_high"] = informative["high"]
|
||||||
|
informative[f"%-{coin}raw_low"] = informative["low"]
|
||||||
|
|
||||||
|
indicators = [col for col in informative if col.startswith("%")]
|
||||||
|
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||||
|
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||||
|
if n == 0:
|
||||||
|
continue
|
||||||
|
informative_shift = informative[indicators].shift(n)
|
||||||
|
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||||
|
informative = pd.concat((informative, informative_shift), axis=1)
|
||||||
|
|
||||||
|
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||||
|
skip_columns = [
|
||||||
|
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||||
|
]
|
||||||
|
df = df.drop(columns=skip_columns)
|
||||||
|
|
||||||
|
# Add generalized indicators here (because in live, it will call this
|
||||||
|
# function to populate indicators during training). Notice how we ensure not to
|
||||||
|
# add them multiple times
|
||||||
|
if set_generalized_indicators:
|
||||||
|
|
||||||
|
# For RL, there are no direct targets to set. This is filler (neutral)
|
||||||
|
# until the agent sends an action.
|
||||||
|
df["&-action"] = 0
|
||||||
|
|
||||||
|
return df
|
||||||
|
```
|
||||||
|
|
||||||
|
Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# The following features are necessary for RL models
|
||||||
|
informative[f"%-{coin}raw_close"] = informative["close"]
|
||||||
|
informative[f"%-{coin}raw_open"] = informative["open"]
|
||||||
|
informative[f"%-{coin}raw_high"] = informative["high"]
|
||||||
|
informative[f"%-{coin}raw_low"] = informative["low"]
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
|
||||||
|
|
||||||
|
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
|
||||||
|
enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1]
|
||||||
|
|
||||||
|
if enter_long_conditions:
|
||||||
|
df.loc[
|
||||||
|
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||||
|
] = (1, "long")
|
||||||
|
|
||||||
|
enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3]
|
||||||
|
|
||||||
|
if enter_short_conditions:
|
||||||
|
df.loc[
|
||||||
|
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
|
||||||
|
] = (1, "short")
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2]
|
||||||
|
if exit_long_conditions:
|
||||||
|
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
||||||
|
|
||||||
|
exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4]
|
||||||
|
if exit_short_conditions:
|
||||||
|
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
||||||
|
|
||||||
|
return df
|
||||||
|
```
|
||||||
|
|
||||||
|
It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short.
|
||||||
|
|
||||||
|
## Configuring the Reinforcement Learner
|
||||||
|
|
||||||
|
In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"rl_config": {
|
||||||
|
"train_cycles": 25,
|
||||||
|
"max_trade_duration_candles": 300,
|
||||||
|
"max_training_drawdown_pct": 0.02,
|
||||||
|
"cpu_count": 8,
|
||||||
|
"model_type": "PPO",
|
||||||
|
"policy_type": "MlpPolicy",
|
||||||
|
"model_reward_parameters": {
|
||||||
|
"rr": 1,
|
||||||
|
"profit_aim": 0.025
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link).
|
||||||
|
|
||||||
|
## Creating the reward
|
||||||
|
|
||||||
|
As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
|
"""
|
||||||
|
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||||
|
Users can override any functions from those parent classes. Here is an example
|
||||||
|
of a user customized `calculate_reward()` function.
|
||||||
|
"""
|
||||||
|
def calculate_reward(self, action):
|
||||||
|
# first, penalize if the action is not valid
|
||||||
|
if not self._is_valid(action):
|
||||||
|
return -2
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
|
||||||
|
factor = 100
|
||||||
|
# reward agent for entering trades
|
||||||
|
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
||||||
|
and self._position == Positions.Neutral:
|
||||||
|
return 25
|
||||||
|
# discourage agent from not entering trades
|
||||||
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
|
return -1
|
||||||
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
|
if trade_duration <= max_trade_duration:
|
||||||
|
factor *= 1.5
|
||||||
|
elif trade_duration > max_trade_duration:
|
||||||
|
factor *= 0.5
|
||||||
|
# discourage sitting in position
|
||||||
|
if self._position in (Positions.Short, Positions.Long) and \
|
||||||
|
action == Actions.Neutral.value:
|
||||||
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
# close long
|
||||||
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(pnl * factor)
|
||||||
|
# close short
|
||||||
|
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(pnl * factor)
|
||||||
|
return 0.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Creating a custom agent
|
||||||
|
|
||||||
|
Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py`
|
||||||
|
|
||||||
|
### Using Tensorboard
|
||||||
|
|
||||||
|
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install tensorboard
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, the user can activate Tensorboard with the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd freqtrade
|
||||||
|
tensorboard --logdir user_data/models/unique-id
|
||||||
|
```
|
||||||
|
|
||||||
|
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
|
||||||
|
|
||||||
|

|
@@ -1,6 +1,6 @@
|
|||||||
# Running FreqAI
|
# Running FreqAI
|
||||||
|
|
||||||
There are two ways to train and deploy an adaptive machine learning model - live deployment and historical backtesting. In both cases, `FreqAI` runs/simulates periodic retraining of models as shown in the following figure:
|
There are two ways to train and deploy an adaptive machine learning model - live deployment and historical backtesting. In both cases, FreqAI runs/simulates periodic retraining of models as shown in the following figure:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ FreqAI automatically downloads the proper amount of data needed to ensure traini
|
|||||||
|
|
||||||
### Saving prediction data
|
### Saving prediction data
|
||||||
|
|
||||||
All predictions made during the lifetime of a specific `identifier` model are stored in `historical_predictions.pkl` to allow for reloading after a crash or changes made to the config.
|
All predictions made during the lifetime of a specific `identifier` model are stored in `historic_predictions.pkl` to allow for reloading after a crash or changes made to the config.
|
||||||
|
|
||||||
### Purging old model data
|
### Purging old model data
|
||||||
|
|
||||||
@@ -75,19 +75,19 @@ To allow for tweaking your strategy (**not** the features!), FreqAI will automat
|
|||||||
|
|
||||||
An additional directory called `predictions`, which contains all the predictions stored in `hdf` format, will be created in the `unique-id` folder.
|
An additional directory called `predictions`, which contains all the predictions stored in `hdf` format, will be created in the `unique-id` folder.
|
||||||
|
|
||||||
To change your **features**, you **must** set a new `identifier` in the config to signal to `FreqAI` to train new models.
|
To change your **features**, you **must** set a new `identifier` in the config to signal to FreqAI to train new models.
|
||||||
|
|
||||||
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
||||||
|
|
||||||
### Downloading data to cover the full backtest period
|
### Downloading data to cover the full backtest period
|
||||||
|
|
||||||
For live/dry deployments, FreqAI will download the necessary data automatically. However, to use backtesting functionality, you need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). You need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that there is a sufficient amount of training data *before* the start of the backtesting timerange. The amount of additional data can be roughly estimated by moving the start date of the timerange backwards by `train_period_days` and the `startup_candle_count` (see the [parameter table](freqai-parameter-table.md) for detailed descriptions of these parameters) from the beginning of the desired backtesting timerange.
|
For live/dry deployments, FreqAI will download the necessary data automatically. However, to use backtesting functionality, you need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). You need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that there is a sufficient amount of training data *before* the start of the backtesting time range. The amount of additional data can be roughly estimated by moving the start date of the time range backwards by `train_period_days` and the `startup_candle_count` (see the [parameter table](freqai-parameter-table.md) for detailed descriptions of these parameters) from the beginning of the desired backtesting time range.
|
||||||
|
|
||||||
As an example, to backtest the `--timerange 20210501-20210701` using the [example config](freqai-configuration.md#setting-up-the-configuration-file) which sets `train_period_days` to 30, together with `startup_candle_count: 40` on a maximum `include_timeframes` of 1h, the start date for the downloaded data needs to be `20210501` - 30 days - 40 * 1h / 24 hours = 20210330 (31.7 days earlier than the start of the desired training timerange).
|
As an example, to backtest the `--timerange 20210501-20210701` using the [example config](freqai-configuration.md#setting-up-the-configuration-file) which sets `train_period_days` to 30, together with `startup_candle_count: 40` on a maximum `include_timeframes` of 1h, the start date for the downloaded data needs to be `20210501` - 30 days - 40 * 1h / 24 hours = 20210330 (31.7 days earlier than the start of the desired training time range).
|
||||||
|
|
||||||
### Deciding the size of the sliding training window and backtesting duration
|
### Deciding the size of the sliding training window and backtesting duration
|
||||||
|
|
||||||
The backtesting timerange is defined with the typical `--timerange` parameter in the configuration file. The duration of the sliding training window is set by `train_period_days`, whilst `backtest_period_days` is the sliding backtesting window, both in number of days (`backtest_period_days` can be
|
The backtesting time range is defined with the typical `--timerange` parameter in the configuration file. The duration of the sliding training window is set by `train_period_days`, whilst `backtest_period_days` is the sliding backtesting window, both in number of days (`backtest_period_days` can be
|
||||||
a float to indicate sub-daily retraining in live/dry mode). In the presented [example config](freqai-configuration.md#setting-up-the-configuration-file) (found in `config_examples/config_freqai.example.json`), the user is asking FreqAI to use a training period of 30 days and backtest on the subsequent 7 days. After the training of the model, FreqAI will backtest the subsequent 7 days. The "sliding window" then moves one week forward (emulating FreqAI retraining once per week in live mode) and the new model uses the previous 30 days (including the 7 days used for backtesting by the previous model) to train. This is repeated until the end of `--timerange`. This means that if you set `--timerange 20210501-20210701`, FreqAI will have trained 8 separate models at the end of `--timerange` (because the full range comprises 8 weeks).
|
a float to indicate sub-daily retraining in live/dry mode). In the presented [example config](freqai-configuration.md#setting-up-the-configuration-file) (found in `config_examples/config_freqai.example.json`), the user is asking FreqAI to use a training period of 30 days and backtest on the subsequent 7 days. After the training of the model, FreqAI will backtest the subsequent 7 days. The "sliding window" then moves one week forward (emulating FreqAI retraining once per week in live mode) and the new model uses the previous 30 days (including the 7 days used for backtesting by the previous model) to train. This is repeated until the end of `--timerange`. This means that if you set `--timerange 20210501-20210701`, FreqAI will have trained 8 separate models at the end of `--timerange` (because the full range comprises 8 weeks).
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
@@ -105,23 +105,6 @@ During dry/live mode, FreqAI trains each coin pair sequentially (on separate thr
|
|||||||
|
|
||||||
In the presented example config, the user will only allow predictions on models that are less than 1/2 hours old.
|
In the presented example config, the user will only allow predictions on models that are less than 1/2 hours old.
|
||||||
|
|
||||||
## Data stratification for training and testing the model
|
|
||||||
|
|
||||||
You can stratify (group) the training/testing data using:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"freqai": {
|
|
||||||
"feature_parameters" : {
|
|
||||||
"stratify_training_data": 3
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This will split the data chronologically so that every Xth data point is used to test the model after training. In the example above, the user is asking for every third data point in the dataframe to be used for
|
|
||||||
testing; the other points are used for training.
|
|
||||||
|
|
||||||
The test data is used to evaluate the performance of the model after training. If the test score is high, the model is able to capture the behavior of the data well. If the test score is low, either the model does not capture the complexity of the data, the test data is significantly different from the train data, or a different type of model should be used.
|
|
||||||
|
|
||||||
## Controlling the model learning process
|
## Controlling the model learning process
|
||||||
|
|
||||||
Model training parameters are unique to the selected machine learning library. FreqAI allows you to set any parameter for any library using the `model_training_parameters` dictionary in the config. The example config (found in `config_examples/config_freqai.example.json`) shows some of the example parameters associated with `Catboost` and `LightGBM`, but you can add any parameters available in those libraries or any other machine learning library you choose to implement.
|
Model training parameters are unique to the selected machine learning library. FreqAI allows you to set any parameter for any library using the `model_training_parameters` dictionary in the config. The example config (found in `config_examples/config_freqai.example.json`) shows some of the example parameters associated with `Catboost` and `LightGBM`, but you can add any parameters available in those libraries or any other machine learning library you choose to implement.
|
||||||
@@ -132,7 +115,7 @@ The FreqAI specific parameter `label_period_candles` defines the offset (number
|
|||||||
|
|
||||||
## Continual learning
|
## Continual learning
|
||||||
|
|
||||||
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `false` which means that all new models are trained from scratch, without input from previous models.
|
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `False` which means that all new models are trained from scratch, without input from previous models.
|
||||||
|
|
||||||
## Hyperopt
|
## Hyperopt
|
||||||
|
|
||||||
|
@@ -1,10 +1,10 @@
|
|||||||

|

|
||||||
|
|
||||||
# `FreqAI`
|
# FreqAI
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
`FreqAI` is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input features.
|
FreqAI is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input features.
|
||||||
|
|
||||||
Features include:
|
Features include:
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ Features include:
|
|||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
The easiest way to quickly test `FreqAI` is to run it in dry mode with the following command:
|
The easiest way to quickly test FreqAI is to run it in dry mode with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates
|
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates
|
||||||
@@ -37,7 +37,7 @@ An example strategy, prediction model, and config to use as a starting points ca
|
|||||||
|
|
||||||
## General approach
|
## General approach
|
||||||
|
|
||||||
You provide `FreqAI` with a set of custom *base indicators* (the same way as in a [typical Freqtrade strategy](strategy-customization.md)) as well as target values (*labels*). For each pair in the whitelist, `FreqAI` trains a model to predict the target values based on the input of custom indicators. The models are then consistently retrained, with a predetermined frequency, to adapt to market conditions. `FreqAI` offers the ability to both backtest strategies (emulating reality with periodic retraining on historic data) and deploy dry/live runs. In dry/live conditions, `FreqAI` can be set to constant retraining in a background thread to keep models as up to date as possible.
|
You provide FreqAI with a set of custom *base indicators* (the same way as in a [typical Freqtrade strategy](strategy-customization.md)) as well as target values (*labels*). For each pair in the whitelist, FreqAI trains a model to predict the target values based on the input of custom indicators. The models are then consistently retrained, with a predetermined frequency, to adapt to market conditions. FreqAI offers the ability to both backtest strategies (emulating reality with periodic retraining on historic data) and deploy dry/live runs. In dry/live conditions, FreqAI can be set to constant retraining in a background thread to keep models as up to date as possible.
|
||||||
|
|
||||||
An overview of the algorithm, explaining the data processing pipeline and model usage, is shown below.
|
An overview of the algorithm, explaining the data processing pipeline and model usage, is shown below.
|
||||||
|
|
||||||
@@ -45,21 +45,21 @@ An overview of the algorithm, explaining the data processing pipeline and model
|
|||||||
|
|
||||||
### Important machine learning vocabulary
|
### Important machine learning vocabulary
|
||||||
|
|
||||||
**Features** - the parameters, based on historic data, on which a model is trained. All features for a single candle is stored as a vector. In `FreqAI`, you build a feature data sets from anything you can construct in the strategy.
|
**Features** - the parameters, based on historic data, on which a model is trained. All features for a single candle are stored as a vector. In FreqAI, you build a feature data set from anything you can construct in the strategy.
|
||||||
|
|
||||||
**Labels** - the target values that a model is trained toward. Each feature vector is associated with a single label that is defined by you within the strategy. These labels intentionally look into the future, and are not available to the model during dry/live/backtesting.
|
**Labels** - the target values that the model is trained toward. Each feature vector is associated with a single label that is defined by you within the strategy. These labels intentionally look into the future and are what you are training the model to be able to predict.
|
||||||
|
|
||||||
**Training** - the process of "teaching" the model to match the feature sets to the associated labels. Different types of models "learn" in different ways. More information about the different models can be found [here](freqai-configuration.md#using-different-prediction-models).
|
**Training** - the process of "teaching" the model to match the feature sets to the associated labels. Different types of models "learn" in different ways which means that one might be better than another for a specific application. More information about the different models that are already implemented in FreqAI can be found [here](freqai-configuration.md#using-different-prediction-models).
|
||||||
|
|
||||||
**Train data** - a subset of the feature data set that is fed to the model during training. This data directly influences weight connections in the model.
|
**Train data** - a subset of the feature data set that is fed to the model during training to "teach" the model how to predict the targets. This data directly influences weight connections in the model.
|
||||||
|
|
||||||
**Test data** - a subset of the feature data set that is used to evaluate the performance of the model after training. This data does not influence nodal weights within the model.
|
**Test data** - a subset of the feature data set that is used to evaluate the performance of the model after training. This data does not influence nodal weights within the model.
|
||||||
|
|
||||||
**Inferencing** - the process of feeding a trained model new data on which it will make a prediction.
|
**Inferencing** - the process of feeding a trained model new unseen data on which it will make a prediction.
|
||||||
|
|
||||||
## Install prerequisites
|
## Install prerequisites
|
||||||
|
|
||||||
The normal Freqtrade install process will ask if you wish to install `FreqAI` dependencies. You should reply "yes" to this question if you wish to use `FreqAI`. If you did not reply yes, you can manually install these dependencies after the install with:
|
The normal Freqtrade install process will ask if you wish to install FreqAI dependencies. You should reply "yes" to this question if you wish to use FreqAI. If you did not reply yes, you can manually install these dependencies after the install with:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
pip install -r requirements-freqai.txt
|
pip install -r requirements-freqai.txt
|
||||||
@@ -70,18 +70,18 @@ pip install -r requirements-freqai.txt
|
|||||||
|
|
||||||
### Usage with docker
|
### Usage with docker
|
||||||
|
|
||||||
If you are using docker, a dedicated tag with `FreqAI` dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular `FreqAI` dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||||
|
|
||||||
## Common pitfalls
|
## Common pitfalls
|
||||||
|
|
||||||
`FreqAI` cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically).
|
FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically).
|
||||||
This is for performance reasons - `FreqAI` relies on making quick predictions/retrains. To do this effectively,
|
This is for performance reasons - FreqAI relies on making quick predictions/retrains. To do this effectively,
|
||||||
it needs to download all the training data at the beginning of a dry/live instance. `FreqAI` stores and appends
|
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
|
||||||
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, `FreqAI` does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
|
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
`FreqAI` is developed by a group of individuals who all contribute specific skillsets to the project.
|
FreqAI is developed by a group of individuals who all contribute specific skillsets to the project.
|
||||||
|
|
||||||
Conception and software development:
|
Conception and software development:
|
||||||
Robert Caulk @robcaulk
|
Robert Caulk @robcaulk
|
||||||
@@ -96,5 +96,4 @@ Software development:
|
|||||||
Wagner Costa @wagnercosta
|
Wagner Costa @wagnercosta
|
||||||
|
|
||||||
Beta testing and bug reporting:
|
Beta testing and bug reporting:
|
||||||
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Robert Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau,
|
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza, Timothy Pogue @wizrds
|
||||||
Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza
|
|
||||||
|
@@ -22,6 +22,7 @@ You may also use something like `.*DOWN/BTC` or `.*UP/BTC` to exclude leveraged
|
|||||||
|
|
||||||
* [`StaticPairList`](#static-pair-list) (default, if not configured differently)
|
* [`StaticPairList`](#static-pair-list) (default, if not configured differently)
|
||||||
* [`VolumePairList`](#volume-pair-list)
|
* [`VolumePairList`](#volume-pair-list)
|
||||||
|
* [`ProducerPairList`](#producerpairlist)
|
||||||
* [`AgeFilter`](#agefilter)
|
* [`AgeFilter`](#agefilter)
|
||||||
* [`OffsetFilter`](#offsetfilter)
|
* [`OffsetFilter`](#offsetfilter)
|
||||||
* [`PerformanceFilter`](#performancefilter)
|
* [`PerformanceFilter`](#performancefilter)
|
||||||
@@ -84,7 +85,7 @@ Filtering instances (not the first position in the list) will not apply any cach
|
|||||||
|
|
||||||
You can define a minimum volume with `min_value` - which will filter out pairs with a volume lower than the specified value in the specified timerange.
|
You can define a minimum volume with `min_value` - which will filter out pairs with a volume lower than the specified value in the specified timerange.
|
||||||
|
|
||||||
### VolumePairList Advanced mode
|
##### VolumePairList Advanced mode
|
||||||
|
|
||||||
`VolumePairList` can also operate in an advanced mode to build volume over a given timerange of specified candle size. It utilizes exchange historical candle data, builds a typical price (calculated by (open+high+low)/3) and multiplies the typical price with every candle's volume. The sum is the `quoteVolume` over the given range. This allows different scenarios, for a more smoothened volume, when using longer ranges with larger candle sizes, or the opposite when using a short range with small candles.
|
`VolumePairList` can also operate in an advanced mode to build volume over a given timerange of specified candle size. It utilizes exchange historical candle data, builds a typical price (calculated by (open+high+low)/3) and multiplies the typical price with every candle's volume. The sum is the `quoteVolume` over the given range. This allows different scenarios, for a more smoothened volume, when using longer ranges with larger candle sizes, or the opposite when using a short range with small candles.
|
||||||
|
|
||||||
@@ -146,6 +147,32 @@ More sophisticated approach can be used, by using `lookback_timeframe` for candl
|
|||||||
!!! Note
|
!!! Note
|
||||||
`VolumePairList` does not support backtesting mode.
|
`VolumePairList` does not support backtesting mode.
|
||||||
|
|
||||||
|
#### ProducerPairList
|
||||||
|
|
||||||
|
With `ProducerPairList`, you can reuse the pairlist from a [Producer](producer-consumer.md) without explicitly defining the pairlist on each consumer.
|
||||||
|
|
||||||
|
[Consumer mode](producer-consumer.md) is required for this pairlist to work.
|
||||||
|
|
||||||
|
The pairlist will perform a check on active pairs against the current exchange configuration to avoid attempting to trade on invalid markets.
|
||||||
|
|
||||||
|
You can limit the length of the pairlist with the optional parameter `number_assets`. Using `"number_assets"=0` or omitting this key will result in the reuse of all producer pairs valid for the current setup.
|
||||||
|
|
||||||
|
```json
|
||||||
|
"pairlists": [
|
||||||
|
{
|
||||||
|
"method": "ProducerPairList",
|
||||||
|
"number_assets": 5,
|
||||||
|
"producer_name": "default",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
!!! Tip "Combining pairlists"
|
||||||
|
This pairlist can be combined with all other pairlists and filters for further pairlist reduction, and can also act as an "additional" pairlist, on top of already defined pairs.
|
||||||
|
`ProducerPairList` can also be used multiple times in sequence, combining the pairs from multiple producers.
|
||||||
|
Obviously in complex such configurations, the Producer may not provide data for all pairs, so the strategy must be fit for this.
|
||||||
|
|
||||||
#### AgeFilter
|
#### AgeFilter
|
||||||
|
|
||||||
Removes pairs that have been listed on the exchange for less than `min_days_listed` days (defaults to `10`) or more than `max_days_listed` days (defaults `None` mean infinity).
|
Removes pairs that have been listed on the exchange for less than `min_days_listed` days (defaults to `10`) or more than `max_days_listed` days (defaults `None` mean infinity).
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
markdown==3.3.7
|
markdown==3.3.7
|
||||||
mkdocs==1.3.1
|
mkdocs==1.4.0
|
||||||
mkdocs-material==8.5.3
|
mkdocs-material==8.5.6
|
||||||
mdx_truly_sane_lists==1.3
|
mdx_truly_sane_lists==1.3
|
||||||
pymdown-extensions==9.5
|
pymdown-extensions==9.6
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
|
@@ -643,7 +643,7 @@ This callback is **not** called when there is an open order (either buy or sell)
|
|||||||
|
|
||||||
Additional Buys are ignored once you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits.
|
Additional Buys are ignored once you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits.
|
||||||
|
|
||||||
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade. Modifications to leverage are not possible.
|
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade. Modifications to leverage are not possible, and the stake-amount is assumed to be before applying leverage.
|
||||||
|
|
||||||
!!! Note "About stake size"
|
!!! Note "About stake size"
|
||||||
Using fixed stake size means it will be the amount used for the first order, just like without position adjustment.
|
Using fixed stake size means it will be the amount used for the first order, just like without position adjustment.
|
||||||
|
@@ -37,3 +37,12 @@ pip install -e .
|
|||||||
# Ensure freqUI is at the latest version
|
# Ensure freqUI is at the latest version
|
||||||
freqtrade install-ui
|
freqtrade install-ui
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Problems updating
|
||||||
|
|
||||||
|
Update-problems usually come missing dependencies (you didn't follow the above instructions) - or from updated dependencies, which fail to install (for example TA-lib).
|
||||||
|
Please refer to the corresponding installation sections (common problems linked below)
|
||||||
|
|
||||||
|
Common problems and their solutions:
|
||||||
|
|
||||||
|
* [ta-lib update on windows](windows_installation.md#2-install-ta-lib)
|
||||||
|
@@ -34,7 +34,7 @@ python -m venv .env
|
|||||||
.env\Scripts\activate.ps1
|
.env\Scripts\activate.ps1
|
||||||
# optionally install ta-lib from wheel
|
# optionally install ta-lib from wheel
|
||||||
# Eventually adjust the below filename to match the downloaded wheel
|
# Eventually adjust the below filename to match the downloaded wheel
|
||||||
pip install --find-links build_helpers\ TA-Lib
|
pip install --find-links build_helpers\ TA-Lib -U
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
pip install -e .
|
pip install -e .
|
||||||
freqtrade
|
freqtrade
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
""" Freqtrade bot """
|
""" Freqtrade bot """
|
||||||
__version__ = '2022.9.1'
|
__version__ = '2022.10.dev'
|
||||||
|
|
||||||
if 'dev' in __version__:
|
if 'dev' in __version__:
|
||||||
try:
|
try:
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
# flake8: noqa: F401
|
# flake8: noqa: F401
|
||||||
|
|
||||||
from freqtrade.configuration.check_exchange import check_exchange
|
|
||||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||||
from freqtrade.configuration.configuration import Configuration
|
from freqtrade.configuration.configuration import Configuration
|
||||||
|
@@ -8,7 +8,6 @@ from pathlib import Path
|
|||||||
from typing import Any, Callable, Dict, List, Optional
|
from typing import Any, Callable, Dict, List, Optional
|
||||||
|
|
||||||
from freqtrade import constants
|
from freqtrade import constants
|
||||||
from freqtrade.configuration.check_exchange import check_exchange
|
|
||||||
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
|
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
|
||||||
from freqtrade.configuration.directory_operations import create_datadir, create_userdata_dir
|
from freqtrade.configuration.directory_operations import create_datadir, create_userdata_dir
|
||||||
from freqtrade.configuration.environment_vars import enironment_vars_to_dict
|
from freqtrade.configuration.environment_vars import enironment_vars_to_dict
|
||||||
@@ -100,6 +99,9 @@ class Configuration:
|
|||||||
|
|
||||||
self._process_freqai_options(config)
|
self._process_freqai_options(config)
|
||||||
|
|
||||||
|
# Import check_exchange here to avoid import cycle problems
|
||||||
|
from freqtrade.exchange.check_exchange import check_exchange
|
||||||
|
|
||||||
# Check if the exchange set by the user is supported
|
# Check if the exchange set by the user is supported
|
||||||
check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))
|
check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))
|
||||||
|
|
||||||
|
@@ -31,7 +31,7 @@ HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',
|
|||||||
'CalmarHyperOptLoss',
|
'CalmarHyperOptLoss',
|
||||||
'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',
|
'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',
|
||||||
'ProfitDrawDownHyperOptLoss']
|
'ProfitDrawDownHyperOptLoss']
|
||||||
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList',
|
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList',
|
||||||
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
||||||
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
||||||
'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']
|
'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']
|
||||||
@@ -567,13 +567,11 @@ CONF_SCHEMA = {
|
|||||||
"properties": {
|
"properties": {
|
||||||
"test_size": {"type": "number"},
|
"test_size": {"type": "number"},
|
||||||
"random_state": {"type": "integer"},
|
"random_state": {"type": "integer"},
|
||||||
|
"shuffle": {"type": "boolean", "default": False}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"model_training_parameters": {
|
"model_training_parameters": {
|
||||||
"type": "object",
|
"type": "object"
|
||||||
"properties": {
|
|
||||||
"n_estimators": {"type": "integer", "default": 1000}
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"required": [
|
"required": [
|
||||||
|
@@ -47,8 +47,7 @@ def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
|
|||||||
|
|
||||||
|
|
||||||
def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
|
def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
|
||||||
fill_missing: bool = True,
|
fill_missing: bool, drop_incomplete: bool) -> DataFrame:
|
||||||
drop_incomplete: bool = True) -> DataFrame:
|
|
||||||
"""
|
"""
|
||||||
Cleanse a OHLCV dataframe by
|
Cleanse a OHLCV dataframe by
|
||||||
* Grouping it by date (removes duplicate tics)
|
* Grouping it by date (removes duplicate tics)
|
||||||
|
@@ -26,7 +26,7 @@ def load_pair_history(pair: str,
|
|||||||
datadir: Path, *,
|
datadir: Path, *,
|
||||||
timerange: Optional[TimeRange] = None,
|
timerange: Optional[TimeRange] = None,
|
||||||
fill_up_missing: bool = True,
|
fill_up_missing: bool = True,
|
||||||
drop_incomplete: bool = True,
|
drop_incomplete: bool = False,
|
||||||
startup_candles: int = 0,
|
startup_candles: int = 0,
|
||||||
data_format: str = None,
|
data_format: str = None,
|
||||||
data_handler: IDataHandler = None,
|
data_handler: IDataHandler = None,
|
||||||
|
@@ -275,7 +275,7 @@ class IDataHandler(ABC):
|
|||||||
candle_type: CandleType, *,
|
candle_type: CandleType, *,
|
||||||
timerange: Optional[TimeRange] = None,
|
timerange: Optional[TimeRange] = None,
|
||||||
fill_missing: bool = True,
|
fill_missing: bool = True,
|
||||||
drop_incomplete: bool = True,
|
drop_incomplete: bool = False,
|
||||||
startup_candles: int = 0,
|
startup_candles: int = 0,
|
||||||
warn_no_data: bool = True,
|
warn_no_data: bool = True,
|
||||||
) -> DataFrame:
|
) -> DataFrame:
|
||||||
|
@@ -12,8 +12,8 @@ from freqtrade.exchange.coinbasepro import Coinbasepro
|
|||||||
from freqtrade.exchange.exchange import (amount_to_contract_precision, amount_to_contracts,
|
from freqtrade.exchange.exchange import (amount_to_contract_precision, amount_to_contracts,
|
||||||
amount_to_precision, available_exchanges, ccxt_exchanges,
|
amount_to_precision, available_exchanges, ccxt_exchanges,
|
||||||
contracts_to_amount, date_minus_candles,
|
contracts_to_amount, date_minus_candles,
|
||||||
is_exchange_known_ccxt, is_exchange_officially_supported,
|
is_exchange_known_ccxt, market_is_active,
|
||||||
market_is_active, price_to_precision, timeframe_to_minutes,
|
price_to_precision, timeframe_to_minutes,
|
||||||
timeframe_to_msecs, timeframe_to_next_date,
|
timeframe_to_msecs, timeframe_to_next_date,
|
||||||
timeframe_to_prev_date, timeframe_to_seconds,
|
timeframe_to_prev_date, timeframe_to_seconds,
|
||||||
validate_exchange, validate_exchanges)
|
validate_exchange, validate_exchanges)
|
||||||
|
@@ -68,6 +68,37 @@ class Binance(Exchange):
|
|||||||
tickers = deep_merge_dicts(bidsasks, tickers, allow_null_overrides=False)
|
tickers = deep_merge_dicts(bidsasks, tickers, allow_null_overrides=False)
|
||||||
return tickers
|
return tickers
|
||||||
|
|
||||||
|
@retrier
|
||||||
|
def additional_exchange_init(self) -> None:
|
||||||
|
"""
|
||||||
|
Additional exchange initialization logic.
|
||||||
|
.api will be available at this point.
|
||||||
|
Must be overridden in child methods if required.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']:
|
||||||
|
position_side = self._api.fapiPrivateGetPositionsideDual()
|
||||||
|
self._log_exchange_response('position_side_setting', position_side)
|
||||||
|
assets_margin = self._api.fapiPrivateGetMultiAssetsMargin()
|
||||||
|
self._log_exchange_response('multi_asset_margin', assets_margin)
|
||||||
|
msg = ""
|
||||||
|
if position_side.get('dualSidePosition') is True:
|
||||||
|
msg += (
|
||||||
|
"\nHedge Mode is not supported by freqtrade. "
|
||||||
|
"Please change 'Position Mode' on your binance futures account.")
|
||||||
|
if assets_margin.get('multiAssetsMargin') is True:
|
||||||
|
msg += ("\nMulti-Asset Mode is not supported by freqtrade. "
|
||||||
|
"Please change 'Asset Mode' on your binance futures account.")
|
||||||
|
if msg:
|
||||||
|
raise OperationalException(msg)
|
||||||
|
except ccxt.DDoSProtection as e:
|
||||||
|
raise DDosProtection(e) from e
|
||||||
|
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
||||||
|
raise TemporaryError(
|
||||||
|
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
|
||||||
|
except ccxt.BaseError as e:
|
||||||
|
raise OperationalException(e) from e
|
||||||
|
|
||||||
@retrier
|
@retrier
|
||||||
def _set_leverage(
|
def _set_leverage(
|
||||||
self,
|
self,
|
||||||
|
@@ -3,8 +3,8 @@ import logging
|
|||||||
from freqtrade.constants import Config
|
from freqtrade.constants import Config
|
||||||
from freqtrade.enums import RunMode
|
from freqtrade.enums import RunMode
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import (available_exchanges, is_exchange_known_ccxt,
|
from freqtrade.exchange import available_exchanges, is_exchange_known_ccxt, validate_exchange
|
||||||
is_exchange_officially_supported, validate_exchange)
|
from freqtrade.exchange.common import MAP_EXCHANGE_CHILDCLASS, SUPPORTED_EXCHANGES
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -52,7 +52,7 @@ def check_exchange(config: Config, check_for_bad: bool = True) -> bool:
|
|||||||
else:
|
else:
|
||||||
logger.warning(f'Exchange "{exchange}" will not work with Freqtrade. Reason: {reason}')
|
logger.warning(f'Exchange "{exchange}" will not work with Freqtrade. Reason: {reason}')
|
||||||
|
|
||||||
if is_exchange_officially_supported(exchange):
|
if MAP_EXCHANGE_CHILDCLASS.get(exchange, exchange) in SUPPORTED_EXCHANGES:
|
||||||
logger.info(f'Exchange "{exchange}" is officially supported '
|
logger.info(f'Exchange "{exchange}" is officially supported '
|
||||||
f'by the Freqtrade development team.')
|
f'by the Freqtrade development team.')
|
||||||
else:
|
else:
|
@@ -30,8 +30,7 @@ from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFun
|
|||||||
RetryableOrderError, TemporaryError)
|
RetryableOrderError, TemporaryError)
|
||||||
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, BAD_EXCHANGES,
|
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, BAD_EXCHANGES,
|
||||||
EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED,
|
EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED,
|
||||||
SUPPORTED_EXCHANGES, remove_credentials, retrier,
|
remove_credentials, retrier, retrier_async)
|
||||||
retrier_async)
|
|
||||||
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
||||||
safe_value_fallback2)
|
safe_value_fallback2)
|
||||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||||
@@ -1292,7 +1291,14 @@ class Exchange:
|
|||||||
order = self.fetch_order(order_id, pair)
|
order = self.fetch_order(order_id, pair)
|
||||||
except InvalidOrderException:
|
except InvalidOrderException:
|
||||||
logger.warning(f"Could not fetch cancelled order {order_id}.")
|
logger.warning(f"Could not fetch cancelled order {order_id}.")
|
||||||
order = {'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}}
|
order = {
|
||||||
|
'id': order_id,
|
||||||
|
'status': 'canceled',
|
||||||
|
'amount': amount,
|
||||||
|
'filled': 0.0,
|
||||||
|
'fee': {},
|
||||||
|
'info': {}
|
||||||
|
}
|
||||||
|
|
||||||
return order
|
return order
|
||||||
|
|
||||||
@@ -1863,6 +1869,38 @@ class Exchange:
|
|||||||
return self._async_get_candle_history(
|
return self._async_get_candle_history(
|
||||||
pair, timeframe, since_ms=since_ms, candle_type=candle_type)
|
pair, timeframe, since_ms=since_ms, candle_type=candle_type)
|
||||||
|
|
||||||
|
def _build_ohlcv_dl_jobs(
|
||||||
|
self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int],
|
||||||
|
cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]:
|
||||||
|
"""
|
||||||
|
Build Coroutines to execute as part of refresh_latest_ohlcv
|
||||||
|
"""
|
||||||
|
input_coroutines = []
|
||||||
|
cached_pairs = []
|
||||||
|
for pair, timeframe, candle_type in set(pair_list):
|
||||||
|
if (
|
||||||
|
timeframe not in self.timeframes
|
||||||
|
and candle_type in (CandleType.SPOT, CandleType.FUTURES)
|
||||||
|
):
|
||||||
|
logger.warning(
|
||||||
|
f"Cannot download ({pair}, {timeframe}) combination as this timeframe is "
|
||||||
|
f"not available on {self.name}. Available timeframes are "
|
||||||
|
f"{', '.join(self.timeframes)}.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if ((pair, timeframe, candle_type) not in self._klines or not cache
|
||||||
|
or self._now_is_time_to_refresh(pair, timeframe, candle_type)):
|
||||||
|
input_coroutines.append(self._build_coroutine(
|
||||||
|
pair, timeframe, candle_type=candle_type, since_ms=since_ms))
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..."
|
||||||
|
)
|
||||||
|
cached_pairs.append((pair, timeframe, candle_type))
|
||||||
|
|
||||||
|
return input_coroutines, cached_pairs
|
||||||
|
|
||||||
def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *,
|
def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *,
|
||||||
since_ms: Optional[int] = None, cache: bool = True,
|
since_ms: Optional[int] = None, cache: bool = True,
|
||||||
drop_incomplete: Optional[bool] = None
|
drop_incomplete: Optional[bool] = None
|
||||||
@@ -1880,27 +1918,9 @@ class Exchange:
|
|||||||
"""
|
"""
|
||||||
logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list))
|
logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list))
|
||||||
drop_incomplete = self._ohlcv_partial_candle if drop_incomplete is None else drop_incomplete
|
drop_incomplete = self._ohlcv_partial_candle if drop_incomplete is None else drop_incomplete
|
||||||
input_coroutines = []
|
|
||||||
cached_pairs = []
|
|
||||||
# Gather coroutines to run
|
|
||||||
for pair, timeframe, candle_type in set(pair_list):
|
|
||||||
if (timeframe not in self.timeframes
|
|
||||||
and candle_type in (CandleType.SPOT, CandleType.FUTURES)):
|
|
||||||
logger.warning(
|
|
||||||
f"Cannot download ({pair}, {timeframe}) combination as this timeframe is "
|
|
||||||
f"not available on {self.name}. Available timeframes are "
|
|
||||||
f"{', '.join(self.timeframes)}.")
|
|
||||||
continue
|
|
||||||
if ((pair, timeframe, candle_type) not in self._klines or not cache
|
|
||||||
or self._now_is_time_to_refresh(pair, timeframe, candle_type)):
|
|
||||||
input_coroutines.append(self._build_coroutine(
|
|
||||||
pair, timeframe, candle_type=candle_type, since_ms=since_ms))
|
|
||||||
|
|
||||||
else:
|
# Gather coroutines to run
|
||||||
logger.debug(
|
input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache)
|
||||||
f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..."
|
|
||||||
)
|
|
||||||
cached_pairs.append((pair, timeframe, candle_type))
|
|
||||||
|
|
||||||
results_df = {}
|
results_df = {}
|
||||||
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
||||||
@@ -1941,10 +1961,8 @@ class Exchange:
|
|||||||
interval_in_sec = timeframe_to_seconds(timeframe)
|
interval_in_sec = timeframe_to_seconds(timeframe)
|
||||||
|
|
||||||
return not (
|
return not (
|
||||||
(self._pairs_last_refresh_time.get(
|
(self._pairs_last_refresh_time.get((pair, timeframe, candle_type), 0)
|
||||||
(pair, timeframe, candle_type),
|
+ interval_in_sec) >= arrow.utcnow().int_timestamp
|
||||||
0
|
|
||||||
) + interval_in_sec) >= arrow.utcnow().int_timestamp
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@retrier_async
|
@retrier_async
|
||||||
@@ -2754,10 +2772,6 @@ def is_exchange_known_ccxt(exchange_name: str, ccxt_module: CcxtModuleType = Non
|
|||||||
return exchange_name in ccxt_exchanges(ccxt_module)
|
return exchange_name in ccxt_exchanges(ccxt_module)
|
||||||
|
|
||||||
|
|
||||||
def is_exchange_officially_supported(exchange_name: str) -> bool:
|
|
||||||
return exchange_name in SUPPORTED_EXCHANGES
|
|
||||||
|
|
||||||
|
|
||||||
def ccxt_exchanges(ccxt_module: CcxtModuleType = None) -> List[str]:
|
def ccxt_exchanges(ccxt_module: CcxtModuleType = None) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Return the list of all exchanges known to ccxt
|
Return the list of all exchanges known to ccxt
|
||||||
|
@@ -78,7 +78,8 @@ class Okx(Exchange):
|
|||||||
raise DDosProtection(e) from e
|
raise DDosProtection(e) from e
|
||||||
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
||||||
raise TemporaryError(
|
raise TemporaryError(
|
||||||
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
|
f'Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}'
|
||||||
|
) from e
|
||||||
except ccxt.BaseError as e:
|
except ccxt.BaseError as e:
|
||||||
raise OperationalException(e) from e
|
raise OperationalException(e) from e
|
||||||
|
|
||||||
|
134
freqtrade/freqai/RL/Base4ActionRLEnv.py
Normal file
134
freqtrade/freqai/RL/Base4ActionRLEnv.py
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
import logging
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from gym import spaces
|
||||||
|
|
||||||
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Actions(Enum):
|
||||||
|
Neutral = 0
|
||||||
|
Exit = 1
|
||||||
|
Long_enter = 2
|
||||||
|
Short_enter = 3
|
||||||
|
|
||||||
|
|
||||||
|
class Base4ActionRLEnv(BaseEnvironment):
|
||||||
|
"""
|
||||||
|
Base class for a 4 action environment
|
||||||
|
"""
|
||||||
|
|
||||||
|
def set_action_space(self):
|
||||||
|
self.action_space = spaces.Discrete(len(Actions))
|
||||||
|
|
||||||
|
def step(self, action: int):
|
||||||
|
"""
|
||||||
|
Logic for a single step (incrementing one candle in time)
|
||||||
|
by the agent
|
||||||
|
:param: action: int = the action type that the agent plans
|
||||||
|
to take for the current step.
|
||||||
|
:returns:
|
||||||
|
observation = current state of environment
|
||||||
|
step_reward = the reward from `calculate_reward()`
|
||||||
|
_done = if the agent "died" or if the candles finished
|
||||||
|
info = dict passed back to openai gym lib
|
||||||
|
"""
|
||||||
|
self._done = False
|
||||||
|
self._current_tick += 1
|
||||||
|
|
||||||
|
if self._current_tick == self._end_tick:
|
||||||
|
self._done = True
|
||||||
|
|
||||||
|
self._update_unrealized_total_profit()
|
||||||
|
|
||||||
|
step_reward = self.calculate_reward(action)
|
||||||
|
self.total_reward += step_reward
|
||||||
|
|
||||||
|
trade_type = None
|
||||||
|
if self.is_tradesignal(action):
|
||||||
|
"""
|
||||||
|
Action: Neutral, position: Long -> Close Long
|
||||||
|
Action: Neutral, position: Short -> Close Short
|
||||||
|
|
||||||
|
Action: Long, position: Neutral -> Open Long
|
||||||
|
Action: Long, position: Short -> Close Short and Open Long
|
||||||
|
|
||||||
|
Action: Short, position: Neutral -> Open Short
|
||||||
|
Action: Short, position: Long -> Close Long and Open Short
|
||||||
|
"""
|
||||||
|
|
||||||
|
if action == Actions.Neutral.value:
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
trade_type = "neutral"
|
||||||
|
self._last_trade_tick = None
|
||||||
|
elif action == Actions.Long_enter.value:
|
||||||
|
self._position = Positions.Long
|
||||||
|
trade_type = "long"
|
||||||
|
self._last_trade_tick = self._current_tick
|
||||||
|
elif action == Actions.Short_enter.value:
|
||||||
|
self._position = Positions.Short
|
||||||
|
trade_type = "short"
|
||||||
|
self._last_trade_tick = self._current_tick
|
||||||
|
elif action == Actions.Exit.value:
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
trade_type = "neutral"
|
||||||
|
self._last_trade_tick = None
|
||||||
|
else:
|
||||||
|
print("case not defined")
|
||||||
|
|
||||||
|
if trade_type is not None:
|
||||||
|
self.trade_history.append(
|
||||||
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
|
'type': trade_type})
|
||||||
|
|
||||||
|
if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8):
|
||||||
|
self._done = True
|
||||||
|
|
||||||
|
self._position_history.append(self._position)
|
||||||
|
|
||||||
|
info = dict(
|
||||||
|
tick=self._current_tick,
|
||||||
|
total_reward=self.total_reward,
|
||||||
|
total_profit=self._total_profit,
|
||||||
|
position=self._position.value
|
||||||
|
)
|
||||||
|
|
||||||
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
self._update_history(info)
|
||||||
|
|
||||||
|
return observation, step_reward, self._done, info
|
||||||
|
|
||||||
|
def is_tradesignal(self, action: int):
|
||||||
|
"""
|
||||||
|
Determine if the signal is a trade signal
|
||||||
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||||
|
"""
|
||||||
|
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or
|
||||||
|
(action == Actions.Neutral.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Neutral.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Short_enter.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Short_enter.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Exit.value and self._position == Positions.Neutral) or
|
||||||
|
(action == Actions.Long_enter.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Long_enter.value and self._position == Positions.Short))
|
||||||
|
|
||||||
|
def _is_valid(self, action: int):
|
||||||
|
"""
|
||||||
|
Determine if the signal is valid.
|
||||||
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||||
|
"""
|
||||||
|
# Agent should only try to exit if it is in position
|
||||||
|
if action == Actions.Exit.value:
|
||||||
|
if self._position not in (Positions.Short, Positions.Long):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Agent should only try to enter if it is not in position
|
||||||
|
if action in (Actions.Short_enter.value, Actions.Long_enter.value):
|
||||||
|
if self._position != Positions.Neutral:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
201
freqtrade/freqai/RL/Base5ActionRLEnv.py
Normal file
201
freqtrade/freqai/RL/Base5ActionRLEnv.py
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
import logging
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from gym import spaces
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Actions(Enum):
|
||||||
|
Neutral = 0
|
||||||
|
Long_enter = 1
|
||||||
|
Long_exit = 2
|
||||||
|
Short_enter = 3
|
||||||
|
Short_exit = 4
|
||||||
|
|
||||||
|
|
||||||
|
def mean_over_std(x):
|
||||||
|
std = np.std(x, ddof=1)
|
||||||
|
mean = np.mean(x)
|
||||||
|
return mean / std if std > 0 else 0
|
||||||
|
|
||||||
|
|
||||||
|
class Base5ActionRLEnv(BaseEnvironment):
|
||||||
|
"""
|
||||||
|
Base class for a 5 action environment
|
||||||
|
"""
|
||||||
|
|
||||||
|
def set_action_space(self):
|
||||||
|
self.action_space = spaces.Discrete(len(Actions))
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
|
||||||
|
self._done = False
|
||||||
|
|
||||||
|
if self.starting_point is True:
|
||||||
|
self._position_history = (self._start_tick * [None]) + [self._position]
|
||||||
|
else:
|
||||||
|
self._position_history = (self.window_size * [None]) + [self._position]
|
||||||
|
|
||||||
|
self._current_tick = self._start_tick
|
||||||
|
self._last_trade_tick = None
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
|
||||||
|
self.total_reward = 0.
|
||||||
|
self._total_profit = 1. # unit
|
||||||
|
self.history = {}
|
||||||
|
self.trade_history = []
|
||||||
|
self.portfolio_log_returns = np.zeros(len(self.prices))
|
||||||
|
|
||||||
|
self._profits = [(self._start_tick, 1)]
|
||||||
|
self.close_trade_profit = []
|
||||||
|
self._total_unrealized_profit = 1
|
||||||
|
|
||||||
|
return self._get_observation()
|
||||||
|
|
||||||
|
def step(self, action: int):
|
||||||
|
"""
|
||||||
|
Logic for a single step (incrementing one candle in time)
|
||||||
|
by the agent
|
||||||
|
:param: action: int = the action type that the agent plans
|
||||||
|
to take for the current step.
|
||||||
|
:returns:
|
||||||
|
observation = current state of environment
|
||||||
|
step_reward = the reward from `calculate_reward()`
|
||||||
|
_done = if the agent "died" or if the candles finished
|
||||||
|
info = dict passed back to openai gym lib
|
||||||
|
"""
|
||||||
|
self._done = False
|
||||||
|
self._current_tick += 1
|
||||||
|
|
||||||
|
if self._current_tick == self._end_tick:
|
||||||
|
self._done = True
|
||||||
|
|
||||||
|
self.update_portfolio_log_returns(action)
|
||||||
|
|
||||||
|
self._update_unrealized_total_profit()
|
||||||
|
step_reward = self.calculate_reward(action)
|
||||||
|
self.total_reward += step_reward
|
||||||
|
|
||||||
|
trade_type = None
|
||||||
|
if self.is_tradesignal(action):
|
||||||
|
"""
|
||||||
|
Action: Neutral, position: Long -> Close Long
|
||||||
|
Action: Neutral, position: Short -> Close Short
|
||||||
|
|
||||||
|
Action: Long, position: Neutral -> Open Long
|
||||||
|
Action: Long, position: Short -> Close Short and Open Long
|
||||||
|
|
||||||
|
Action: Short, position: Neutral -> Open Short
|
||||||
|
Action: Short, position: Long -> Close Long and Open Short
|
||||||
|
"""
|
||||||
|
|
||||||
|
if action == Actions.Neutral.value:
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
trade_type = "neutral"
|
||||||
|
self._last_trade_tick = None
|
||||||
|
elif action == Actions.Long_enter.value:
|
||||||
|
self._position = Positions.Long
|
||||||
|
trade_type = "long"
|
||||||
|
self._last_trade_tick = self._current_tick
|
||||||
|
elif action == Actions.Short_enter.value:
|
||||||
|
self._position = Positions.Short
|
||||||
|
trade_type = "short"
|
||||||
|
self._last_trade_tick = self._current_tick
|
||||||
|
elif action == Actions.Long_exit.value:
|
||||||
|
self._update_total_profit()
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
trade_type = "neutral"
|
||||||
|
self._last_trade_tick = None
|
||||||
|
elif action == Actions.Short_exit.value:
|
||||||
|
self._update_total_profit()
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
trade_type = "neutral"
|
||||||
|
self._last_trade_tick = None
|
||||||
|
else:
|
||||||
|
print("case not defined")
|
||||||
|
|
||||||
|
if trade_type is not None:
|
||||||
|
self.trade_history.append(
|
||||||
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
|
'type': trade_type})
|
||||||
|
|
||||||
|
if (self._total_profit < self.max_drawdown or
|
||||||
|
self._total_unrealized_profit < self.max_drawdown):
|
||||||
|
self._done = True
|
||||||
|
|
||||||
|
self._position_history.append(self._position)
|
||||||
|
|
||||||
|
info = dict(
|
||||||
|
tick=self._current_tick,
|
||||||
|
total_reward=self.total_reward,
|
||||||
|
total_profit=self._total_profit,
|
||||||
|
position=self._position.value
|
||||||
|
)
|
||||||
|
|
||||||
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
self._update_history(info)
|
||||||
|
|
||||||
|
return observation, step_reward, self._done, info
|
||||||
|
|
||||||
|
def _get_observation(self):
|
||||||
|
features_window = self.signal_features[(
|
||||||
|
self._current_tick - self.window_size):self._current_tick]
|
||||||
|
features_and_state = DataFrame(np.zeros((len(features_window), 3)),
|
||||||
|
columns=['current_profit_pct', 'position', 'trade_duration'],
|
||||||
|
index=features_window.index)
|
||||||
|
|
||||||
|
features_and_state['current_profit_pct'] = self.get_unrealized_profit()
|
||||||
|
features_and_state['position'] = self._position.value
|
||||||
|
features_and_state['trade_duration'] = self.get_trade_duration()
|
||||||
|
features_and_state = pd.concat([features_window, features_and_state], axis=1)
|
||||||
|
return features_and_state
|
||||||
|
|
||||||
|
def get_trade_duration(self):
|
||||||
|
if self._last_trade_tick is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return self._current_tick - self._last_trade_tick
|
||||||
|
|
||||||
|
def is_tradesignal(self, action: int):
|
||||||
|
# trade signal
|
||||||
|
"""
|
||||||
|
Determine if the signal is a trade signal
|
||||||
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||||
|
"""
|
||||||
|
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or
|
||||||
|
(action == Actions.Neutral.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Neutral.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Short_enter.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Short_enter.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Short_exit.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Short_exit.value and self._position == Positions.Neutral) or
|
||||||
|
(action == Actions.Long_enter.value and self._position == Positions.Long) or
|
||||||
|
(action == Actions.Long_enter.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Long_exit.value and self._position == Positions.Short) or
|
||||||
|
(action == Actions.Long_exit.value and self._position == Positions.Neutral))
|
||||||
|
|
||||||
|
def _is_valid(self, action: int):
|
||||||
|
# trade signal
|
||||||
|
"""
|
||||||
|
Determine if the signal is valid.
|
||||||
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||||
|
"""
|
||||||
|
# Agent should only try to exit if it is in position
|
||||||
|
if action in (Actions.Short_exit.value, Actions.Long_exit.value):
|
||||||
|
if self._position not in (Positions.Short, Positions.Long):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Agent should only try to enter if it is not in position
|
||||||
|
if action in (Actions.Short_enter.value, Actions.Long_enter.value):
|
||||||
|
if self._position != Positions.Neutral:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
267
freqtrade/freqai/RL/BaseEnvironment.py
Normal file
267
freqtrade/freqai/RL/BaseEnvironment.py
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
import logging
|
||||||
|
from abc import abstractmethod
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from gym import spaces
|
||||||
|
from gym.utils import seeding
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Positions(Enum):
|
||||||
|
Short = 0
|
||||||
|
Long = 1
|
||||||
|
Neutral = 0.5
|
||||||
|
|
||||||
|
def opposite(self):
|
||||||
|
return Positions.Short if self == Positions.Long else Positions.Long
|
||||||
|
|
||||||
|
|
||||||
|
class BaseEnvironment(gym.Env):
|
||||||
|
"""
|
||||||
|
Base class for environments. This class is agnostic to action count.
|
||||||
|
Inherited classes customize this to include varying action counts/types,
|
||||||
|
See RL/Base5ActionRLEnv.py and RL/Base4ActionRLEnv.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(),
|
||||||
|
reward_kwargs: dict = {}, window_size=10, starting_point=True,
|
||||||
|
id: str = 'baseenv-1', seed: int = 1, config: dict = {}):
|
||||||
|
|
||||||
|
self.rl_config = config['freqai']['rl_config']
|
||||||
|
self.id = id
|
||||||
|
self.seed(seed)
|
||||||
|
self.reset_env(df, prices, window_size, reward_kwargs, starting_point)
|
||||||
|
self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8)
|
||||||
|
self.compound_trades = config['stake_amount'] == 'unlimited'
|
||||||
|
|
||||||
|
def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int,
|
||||||
|
reward_kwargs: dict, starting_point=True):
|
||||||
|
"""
|
||||||
|
Resets the environment when the agent fails (in our case, if the drawdown
|
||||||
|
exceeds the user set max_training_drawdown_pct)
|
||||||
|
"""
|
||||||
|
self.df = df
|
||||||
|
self.signal_features = self.df
|
||||||
|
self.prices = prices
|
||||||
|
self.window_size = window_size
|
||||||
|
self.starting_point = starting_point
|
||||||
|
self.rr = reward_kwargs["rr"]
|
||||||
|
self.profit_aim = reward_kwargs["profit_aim"]
|
||||||
|
|
||||||
|
self.fee = 0.0015
|
||||||
|
|
||||||
|
# # spaces
|
||||||
|
self.shape = (window_size, self.signal_features.shape[1] + 3)
|
||||||
|
self.set_action_space()
|
||||||
|
self.observation_space = spaces.Box(
|
||||||
|
low=-1, high=1, shape=self.shape, dtype=np.float32)
|
||||||
|
|
||||||
|
# episode
|
||||||
|
self._start_tick: int = self.window_size
|
||||||
|
self._end_tick: int = len(self.prices) - 1
|
||||||
|
self._done: bool = False
|
||||||
|
self._current_tick: int = self._start_tick
|
||||||
|
self._last_trade_tick: Optional[int] = None
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
self._position_history: list = [None]
|
||||||
|
self.total_reward: float = 0
|
||||||
|
self._total_profit: float = 1
|
||||||
|
self._total_unrealized_profit: float = 1
|
||||||
|
self.history: dict = {}
|
||||||
|
self.trade_history: list = []
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set_action_space(self):
|
||||||
|
"""
|
||||||
|
Unique to the environment action count. Must be inherited.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def seed(self, seed: int = 1):
|
||||||
|
self.np_random, seed = seeding.np_random(seed)
|
||||||
|
return [seed]
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
|
||||||
|
self._done = False
|
||||||
|
|
||||||
|
if self.starting_point is True:
|
||||||
|
self._position_history = (self._start_tick * [None]) + [self._position]
|
||||||
|
else:
|
||||||
|
self._position_history = (self.window_size * [None]) + [self._position]
|
||||||
|
|
||||||
|
self._current_tick = self._start_tick
|
||||||
|
self._last_trade_tick = None
|
||||||
|
self._position = Positions.Neutral
|
||||||
|
|
||||||
|
self.total_reward = 0.
|
||||||
|
self._total_profit = 1. # unit
|
||||||
|
self.history = {}
|
||||||
|
self.trade_history = []
|
||||||
|
self.portfolio_log_returns = np.zeros(len(self.prices))
|
||||||
|
|
||||||
|
self._profits = [(self._start_tick, 1)]
|
||||||
|
self.close_trade_profit = []
|
||||||
|
self._total_unrealized_profit = 1
|
||||||
|
|
||||||
|
return self._get_observation()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def step(self, action: int):
|
||||||
|
"""
|
||||||
|
Step depeneds on action types, this must be inherited.
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
|
def _get_observation(self):
|
||||||
|
"""
|
||||||
|
This may or may not be independent of action types, user can inherit
|
||||||
|
this in their custom "MyRLEnv"
|
||||||
|
"""
|
||||||
|
features_window = self.signal_features[(
|
||||||
|
self._current_tick - self.window_size):self._current_tick]
|
||||||
|
features_and_state = DataFrame(np.zeros((len(features_window), 3)),
|
||||||
|
columns=['current_profit_pct', 'position', 'trade_duration'],
|
||||||
|
index=features_window.index)
|
||||||
|
|
||||||
|
features_and_state['current_profit_pct'] = self.get_unrealized_profit()
|
||||||
|
features_and_state['position'] = self._position.value
|
||||||
|
features_and_state['trade_duration'] = self.get_trade_duration()
|
||||||
|
features_and_state = pd.concat([features_window, features_and_state], axis=1)
|
||||||
|
return features_and_state
|
||||||
|
|
||||||
|
def get_trade_duration(self):
|
||||||
|
"""
|
||||||
|
Get the trade duration if the agent is in a trade
|
||||||
|
"""
|
||||||
|
if self._last_trade_tick is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return self._current_tick - self._last_trade_tick
|
||||||
|
|
||||||
|
def get_unrealized_profit(self):
|
||||||
|
"""
|
||||||
|
Get the unrealized profit if the agent is in a trade
|
||||||
|
"""
|
||||||
|
if self._last_trade_tick is None:
|
||||||
|
return 0.
|
||||||
|
|
||||||
|
if self._position == Positions.Neutral:
|
||||||
|
return 0.
|
||||||
|
elif self._position == Positions.Short:
|
||||||
|
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
|
||||||
|
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||||
|
return (last_trade_price - current_price) / last_trade_price
|
||||||
|
elif self._position == Positions.Long:
|
||||||
|
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
|
||||||
|
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||||
|
return (current_price - last_trade_price) / last_trade_price
|
||||||
|
else:
|
||||||
|
return 0.
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_tradesignal(self, action: int):
|
||||||
|
"""
|
||||||
|
Determine if the signal is a trade signal. This is
|
||||||
|
unique to the actions in the environment, and therefore must be
|
||||||
|
inherited.
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
|
def _is_valid(self, action: int):
|
||||||
|
"""
|
||||||
|
Determine if the signal is valid.This is
|
||||||
|
unique to the actions in the environment, and therefore must be
|
||||||
|
inherited.
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
|
def add_entry_fee(self, price):
|
||||||
|
return price * (1 + self.fee)
|
||||||
|
|
||||||
|
def add_exit_fee(self, price):
|
||||||
|
return price / (1 + self.fee)
|
||||||
|
|
||||||
|
def _update_history(self, info):
|
||||||
|
if not self.history:
|
||||||
|
self.history = {key: [] for key in info.keys()}
|
||||||
|
|
||||||
|
for key, value in info.items():
|
||||||
|
self.history[key].append(value)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def calculate_reward(self, action):
|
||||||
|
"""
|
||||||
|
An example reward function. This is the one function that users will likely
|
||||||
|
wish to inject their own creativity into.
|
||||||
|
:params:
|
||||||
|
action: int = The action made by the agent for the current candle.
|
||||||
|
:returns:
|
||||||
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
of weights in NN)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _update_unrealized_total_profit(self):
|
||||||
|
"""
|
||||||
|
Update the unrealized total profit incase of episode end.
|
||||||
|
"""
|
||||||
|
if self._position in (Positions.Long, Positions.Short):
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
if self.compound_trades:
|
||||||
|
# assumes unit stake and compounding
|
||||||
|
unrl_profit = self._total_profit * (1 + pnl)
|
||||||
|
else:
|
||||||
|
# assumes unit stake and no compounding
|
||||||
|
unrl_profit = self._total_profit + pnl
|
||||||
|
self._total_unrealized_profit = unrl_profit
|
||||||
|
|
||||||
|
def _update_total_profit(self):
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
if self.compound_trades:
|
||||||
|
# assumes unite stake and compounding
|
||||||
|
self._total_profit = self._total_profit * (1 + pnl)
|
||||||
|
else:
|
||||||
|
# assumes unit stake and no compounding
|
||||||
|
self._total_profit += pnl
|
||||||
|
|
||||||
|
def most_recent_return(self, action: int):
|
||||||
|
"""
|
||||||
|
Calculate the tick to tick return if in a trade.
|
||||||
|
Return is generated from rising prices in Long
|
||||||
|
and falling prices in Short positions.
|
||||||
|
The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee.
|
||||||
|
"""
|
||||||
|
# Long positions
|
||||||
|
if self._position == Positions.Long:
|
||||||
|
current_price = self.prices.iloc[self._current_tick].open
|
||||||
|
previous_price = self.prices.iloc[self._current_tick - 1].open
|
||||||
|
|
||||||
|
if (self._position_history[self._current_tick - 1] == Positions.Short
|
||||||
|
or self._position_history[self._current_tick - 1] == Positions.Neutral):
|
||||||
|
previous_price = self.add_entry_fee(previous_price)
|
||||||
|
|
||||||
|
return np.log(current_price) - np.log(previous_price)
|
||||||
|
|
||||||
|
# Short positions
|
||||||
|
if self._position == Positions.Short:
|
||||||
|
current_price = self.prices.iloc[self._current_tick].open
|
||||||
|
previous_price = self.prices.iloc[self._current_tick - 1].open
|
||||||
|
if (self._position_history[self._current_tick - 1] == Positions.Long
|
||||||
|
or self._position_history[self._current_tick - 1] == Positions.Neutral):
|
||||||
|
previous_price = self.add_exit_fee(previous_price)
|
||||||
|
|
||||||
|
return np.log(previous_price) - np.log(current_price)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def update_portfolio_log_returns(self, action):
|
||||||
|
self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action)
|
||||||
|
|
||||||
|
def current_price(self) -> float:
|
||||||
|
return self.prices.iloc[self._current_tick].open
|
376
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
Normal file
376
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
import logging
|
||||||
|
from abc import abstractmethod
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Callable, Dict, Tuple, Type, Union
|
||||||
|
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
import pandas as pd
|
||||||
|
import torch as th
|
||||||
|
import torch.multiprocessing
|
||||||
|
from pandas import DataFrame
|
||||||
|
from stable_baselines3.common.callbacks import EvalCallback
|
||||||
|
from stable_baselines3.common.monitor import Monitor
|
||||||
|
from stable_baselines3.common.utils import set_random_seed
|
||||||
|
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||||
|
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||||
|
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
|
||||||
|
from freqtrade.freqai.RL.BaseEnvironment import Positions
|
||||||
|
from freqtrade.persistence import Trade
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
torch.multiprocessing.set_sharing_strategy('file_system')
|
||||||
|
|
||||||
|
SB3_MODELS = ['PPO', 'A2C', 'DQN']
|
||||||
|
SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO']
|
||||||
|
|
||||||
|
|
||||||
|
class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
|
"""
|
||||||
|
User created Reinforcement Learning Model prediction class
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(config=kwargs['config'])
|
||||||
|
self.max_threads = min(self.freqai_info['rl_config'].get(
|
||||||
|
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
|
||||||
|
th.set_num_threads(self.max_threads)
|
||||||
|
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
|
||||||
|
self.train_env: Union[SubprocVecEnv, gym.Env] = None
|
||||||
|
self.eval_env: Union[SubprocVecEnv, gym.Env] = None
|
||||||
|
self.eval_callback: EvalCallback = None
|
||||||
|
self.model_type = self.freqai_info['rl_config']['model_type']
|
||||||
|
self.rl_config = self.freqai_info['rl_config']
|
||||||
|
self.continual_learning = self.freqai_info.get('continual_learning', False)
|
||||||
|
if self.model_type in SB3_MODELS:
|
||||||
|
import_str = 'stable_baselines3'
|
||||||
|
elif self.model_type in SB3_CONTRIB_MODELS:
|
||||||
|
import_str = 'sb3_contrib'
|
||||||
|
else:
|
||||||
|
raise OperationalException(f'{self.model_type} not available in stable_baselines3 or '
|
||||||
|
f'sb3_contrib. please choose one of {SB3_MODELS} or '
|
||||||
|
f'{SB3_CONTRIB_MODELS}')
|
||||||
|
|
||||||
|
mod = __import__(import_str, fromlist=[
|
||||||
|
self.model_type])
|
||||||
|
self.MODELCLASS = getattr(mod, self.model_type)
|
||||||
|
self.policy_type = self.freqai_info['rl_config']['policy_type']
|
||||||
|
self.unset_outlier_removal()
|
||||||
|
|
||||||
|
def unset_outlier_removal(self):
|
||||||
|
"""
|
||||||
|
If user has activated any function that may remove training points, this
|
||||||
|
function will set them to false and warn them
|
||||||
|
"""
|
||||||
|
if self.ft_params.get('use_SVM_to_remove_outliers', False):
|
||||||
|
self.ft_params.update({'use_SVM_to_remove_outliers': False})
|
||||||
|
logger.warning('User tried to use SVM with RL. Deactivating SVM.')
|
||||||
|
if self.ft_params.get('use_DBSCAN_to_remove_outliers', False):
|
||||||
|
self.ft_params.update({'use_SVM_to_remove_outliers': False})
|
||||||
|
logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.')
|
||||||
|
if self.freqai_info['data_split_parameters'].get('shuffle', False):
|
||||||
|
self.freqai_info['data_split_parameters'].update('shuffle', False)
|
||||||
|
logger.warning('User tried to shuffle training data. Setting shuffle to False')
|
||||||
|
|
||||||
|
def train(
|
||||||
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||||
|
for storing, saving, loading, and analyzing the data.
|
||||||
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
|
:param metadata: pair metadata from strategy.
|
||||||
|
:returns:
|
||||||
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger.info("--------------------Starting training " f"{pair} --------------------")
|
||||||
|
|
||||||
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
|
unfiltered_df,
|
||||||
|
dk.training_features_list,
|
||||||
|
dk.label_list,
|
||||||
|
training_filter=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
data_dictionary: Dict[str, Any] = dk.make_train_test_datasets(
|
||||||
|
features_filtered, labels_filtered)
|
||||||
|
dk.fit_labels() # FIXME useless for now, but just satiating append methods
|
||||||
|
|
||||||
|
# normalize all data based on train_dataset only
|
||||||
|
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
|
||||||
|
data_dictionary = dk.normalize_data(data_dictionary)
|
||||||
|
|
||||||
|
# data cleaning/analysis
|
||||||
|
self.data_cleaning_train(dk)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f'Training model on {len(dk.data_dictionary["train_features"].columns)}'
|
||||||
|
f' features and {len(data_dictionary["train_features"])} data points'
|
||||||
|
)
|
||||||
|
|
||||||
|
self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk)
|
||||||
|
|
||||||
|
model = self.fit(data_dictionary, dk)
|
||||||
|
|
||||||
|
logger.info(f"--------------------done training {pair}--------------------")
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame],
|
||||||
|
prices_train: DataFrame, prices_test: DataFrame,
|
||||||
|
dk: FreqaiDataKitchen):
|
||||||
|
"""
|
||||||
|
User can override this if they are using a custom MyRLEnv
|
||||||
|
:params:
|
||||||
|
data_dictionary: dict = common data dictionary containing train and test
|
||||||
|
features/labels/weights.
|
||||||
|
prices_train/test: DataFrame = dataframe comprised of the prices to be used in the
|
||||||
|
environment during training
|
||||||
|
or testing
|
||||||
|
dk: FreqaiDataKitchen = the datakitchen for the current pair
|
||||||
|
"""
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
test_df = data_dictionary["test_features"]
|
||||||
|
|
||||||
|
self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
|
||||||
|
reward_kwargs=self.reward_params, config=self.config)
|
||||||
|
self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test,
|
||||||
|
window_size=self.CONV_WIDTH,
|
||||||
|
reward_kwargs=self.reward_params, config=self.config))
|
||||||
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
|
render=False, eval_freq=len(train_df),
|
||||||
|
best_model_save_path=str(dk.data_path))
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||||
|
"""
|
||||||
|
Agent customizations and abstract Reinforcement Learning customizations
|
||||||
|
go in here. Abstract method, so this function must be overridden by
|
||||||
|
user class.
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_state_info(self, pair: str) -> Tuple[float, float, int]:
|
||||||
|
"""
|
||||||
|
State info during dry/live/backtesting which is fed back
|
||||||
|
into the model.
|
||||||
|
:param:
|
||||||
|
pair: str = COIN/STAKE to get the environment information for
|
||||||
|
:returns:
|
||||||
|
market_side: float = representing short, long, or neutral for
|
||||||
|
pair
|
||||||
|
trade_duration: int = the number of candles that the trade has
|
||||||
|
been open for
|
||||||
|
"""
|
||||||
|
open_trades = Trade.get_trades_proxy(is_open=True)
|
||||||
|
market_side = 0.5
|
||||||
|
current_profit: float = 0
|
||||||
|
trade_duration = 0
|
||||||
|
for trade in open_trades:
|
||||||
|
if trade.pair == pair:
|
||||||
|
if self.strategy.dp._exchange is None: # type: ignore
|
||||||
|
logger.error('No exchange available.')
|
||||||
|
else:
|
||||||
|
current_value = self.strategy.dp._exchange.get_rate( # type: ignore
|
||||||
|
pair, refresh=False, side="exit", is_short=trade.is_short)
|
||||||
|
openrate = trade.open_rate
|
||||||
|
now = datetime.now(timezone.utc).timestamp()
|
||||||
|
trade_duration = int((now - trade.open_date.timestamp()) / self.base_tf_seconds)
|
||||||
|
if 'long' in str(trade.enter_tag):
|
||||||
|
market_side = 1
|
||||||
|
current_profit = (current_value - openrate) / openrate
|
||||||
|
else:
|
||||||
|
market_side = 0
|
||||||
|
current_profit = (openrate - current_value) / openrate
|
||||||
|
|
||||||
|
return market_side, current_profit, int(trade_duration)
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
|
"""
|
||||||
|
Filter the prediction features data and predict with it.
|
||||||
|
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||||
|
:return:
|
||||||
|
:pred_df: dataframe containing the predictions
|
||||||
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
|
"""
|
||||||
|
|
||||||
|
dk.find_features(unfiltered_df)
|
||||||
|
filtered_dataframe, _ = dk.filter_features(
|
||||||
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
|
)
|
||||||
|
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||||
|
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||||
|
|
||||||
|
# optional additional data cleaning/analysis
|
||||||
|
self.data_cleaning_predict(dk)
|
||||||
|
|
||||||
|
pred_df = self.rl_model_predict(
|
||||||
|
dk.data_dictionary["prediction_features"], dk, self.model)
|
||||||
|
pred_df.fillna(0, inplace=True)
|
||||||
|
|
||||||
|
return (pred_df, dk.do_predict)
|
||||||
|
|
||||||
|
def rl_model_predict(self, dataframe: DataFrame,
|
||||||
|
dk: FreqaiDataKitchen, model: Any) -> DataFrame:
|
||||||
|
"""
|
||||||
|
A helper function to make predictions in the Reinforcement learning module.
|
||||||
|
:params:
|
||||||
|
dataframe: DataFrame = the dataframe of features to make the predictions on
|
||||||
|
dk: FreqaiDatakitchen = data kitchen for the current pair
|
||||||
|
model: Any = the trained model used to inference the features.
|
||||||
|
"""
|
||||||
|
output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list)
|
||||||
|
|
||||||
|
def _predict(window):
|
||||||
|
market_side, current_profit, trade_duration = self.get_state_info(dk.pair)
|
||||||
|
observations = dataframe.iloc[window.index]
|
||||||
|
observations['current_profit_pct'] = current_profit
|
||||||
|
observations['position'] = market_side
|
||||||
|
observations['trade_duration'] = trade_duration
|
||||||
|
res, _ = model.predict(observations, deterministic=True)
|
||||||
|
return res
|
||||||
|
|
||||||
|
output = output.rolling(window=self.CONV_WIDTH).apply(_predict)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def build_ohlc_price_dataframes(self, data_dictionary: dict,
|
||||||
|
pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame,
|
||||||
|
DataFrame]:
|
||||||
|
"""
|
||||||
|
Builds the train prices and test prices for the environment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
coin = pair.split('/')[0]
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
test_df = data_dictionary["test_features"]
|
||||||
|
|
||||||
|
# price data for model training and evaluation
|
||||||
|
tf = self.config['timeframe']
|
||||||
|
ohlc_list = [f'%-{coin}raw_open_{tf}', f'%-{coin}raw_low_{tf}',
|
||||||
|
f'%-{coin}raw_high_{tf}', f'%-{coin}raw_close_{tf}']
|
||||||
|
rename_dict = {f'%-{coin}raw_open_{tf}': 'open', f'%-{coin}raw_low_{tf}': 'low',
|
||||||
|
f'%-{coin}raw_high_{tf}': ' high', f'%-{coin}raw_close_{tf}': 'close'}
|
||||||
|
|
||||||
|
prices_train = train_df.filter(ohlc_list, axis=1)
|
||||||
|
prices_train.rename(columns=rename_dict, inplace=True)
|
||||||
|
prices_train.reset_index(drop=True)
|
||||||
|
|
||||||
|
prices_test = test_df.filter(ohlc_list, axis=1)
|
||||||
|
prices_test.rename(columns=rename_dict, inplace=True)
|
||||||
|
prices_test.reset_index(drop=True)
|
||||||
|
|
||||||
|
return prices_train, prices_test
|
||||||
|
|
||||||
|
def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any:
|
||||||
|
"""
|
||||||
|
Can be used by user if they are trying to limit_ram_usage *and*
|
||||||
|
perform continual learning.
|
||||||
|
For now, this is unused.
|
||||||
|
"""
|
||||||
|
exists = Path(dk.data_path / f"{dk.model_filename}_model").is_file()
|
||||||
|
if exists:
|
||||||
|
model = self.MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||||
|
else:
|
||||||
|
logger.info('No model file on disk to continue learning from.')
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
# Nested class which can be overridden by user to customize further
|
||||||
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
|
"""
|
||||||
|
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||||
|
sets a custom reward based on profit and trade duration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def calculate_reward(self, action: int) -> float:
|
||||||
|
"""
|
||||||
|
An example reward function. This is the one function that users will likely
|
||||||
|
wish to inject their own creativity into.
|
||||||
|
:params:
|
||||||
|
action: int = The action made by the agent for the current candle.
|
||||||
|
:returns:
|
||||||
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
of weights in NN)
|
||||||
|
"""
|
||||||
|
# first, penalize if the action is not valid
|
||||||
|
if not self._is_valid(action):
|
||||||
|
return -2
|
||||||
|
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
rew = np.sign(pnl) * (pnl + 1)
|
||||||
|
factor = 100.
|
||||||
|
|
||||||
|
# reward agent for entering trades
|
||||||
|
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
||||||
|
and self._position == Positions.Neutral):
|
||||||
|
return 25
|
||||||
|
# discourage agent from not entering trades
|
||||||
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
|
if self._last_trade_tick:
|
||||||
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
|
else:
|
||||||
|
trade_duration = 0
|
||||||
|
|
||||||
|
if trade_duration <= max_trade_duration:
|
||||||
|
factor *= 1.5
|
||||||
|
elif trade_duration > max_trade_duration:
|
||||||
|
factor *= 0.5
|
||||||
|
|
||||||
|
# discourage sitting in position
|
||||||
|
if (self._position in (Positions.Short, Positions.Long) and
|
||||||
|
action == Actions.Neutral.value):
|
||||||
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
|
||||||
|
# close long
|
||||||
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(rew * factor)
|
||||||
|
|
||||||
|
# close short
|
||||||
|
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(rew * factor)
|
||||||
|
|
||||||
|
return 0.
|
||||||
|
|
||||||
|
|
||||||
|
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
||||||
|
seed: int, train_df: DataFrame, price: DataFrame,
|
||||||
|
reward_params: Dict[str, int], window_size: int, monitor: bool = False,
|
||||||
|
config: Dict[str, Any] = {}) -> Callable:
|
||||||
|
"""
|
||||||
|
Utility function for multiprocessed env.
|
||||||
|
|
||||||
|
:param env_id: (str) the environment ID
|
||||||
|
:param num_env: (int) the number of environment you wish to have in subprocesses
|
||||||
|
:param seed: (int) the inital seed for RNG
|
||||||
|
:param rank: (int) index of the subprocess
|
||||||
|
:return: (Callable)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _init() -> gym.Env:
|
||||||
|
|
||||||
|
env = MyRLEnv(df=train_df, prices=price, window_size=window_size,
|
||||||
|
reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config)
|
||||||
|
if monitor:
|
||||||
|
env = Monitor(env)
|
||||||
|
return env
|
||||||
|
set_random_seed(seed)
|
||||||
|
return _init
|
0
freqtrade/freqai/RL/__init__.py
Normal file
0
freqtrade/freqai/RL/__init__.py
Normal file
@@ -3,10 +3,10 @@ from time import time
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
import numpy as np
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -17,6 +17,13 @@ class BaseTensorFlowModel(IFreqaiModel):
|
|||||||
User *must* inherit from this class and set fit() and predict().
|
User *must* inherit from this class and set fit() and predict().
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(config=kwargs['config'])
|
||||||
|
self.keras = True
|
||||||
|
if self.ft_params.get("DI_threshold", 0):
|
||||||
|
self.ft_params["DI_threshold"] = 0
|
||||||
|
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Any:
|
) -> Any:
|
||||||
@@ -68,3 +75,76 @@ class BaseTensorFlowModel(IFreqaiModel):
|
|||||||
f"({end_time - start_time:.2f} secs) --------------------")
|
f"({end_time - start_time:.2f} secs) --------------------")
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
class WindowGenerator:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
input_width,
|
||||||
|
label_width,
|
||||||
|
shift,
|
||||||
|
train_df=None,
|
||||||
|
val_df=None,
|
||||||
|
test_df=None,
|
||||||
|
train_labels=None,
|
||||||
|
val_labels=None,
|
||||||
|
test_labels=None,
|
||||||
|
batch_size=None,
|
||||||
|
):
|
||||||
|
# Store the raw data.
|
||||||
|
self.train_df = train_df
|
||||||
|
self.val_df = val_df
|
||||||
|
self.test_df = test_df
|
||||||
|
self.train_labels = train_labels
|
||||||
|
self.val_labels = val_labels
|
||||||
|
self.test_labels = test_labels
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.input_width = input_width
|
||||||
|
self.label_width = label_width
|
||||||
|
self.shift = shift
|
||||||
|
self.total_window_size = input_width + shift
|
||||||
|
self.input_slice = slice(0, input_width)
|
||||||
|
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
|
||||||
|
|
||||||
|
def make_dataset(self, data, labels=None):
|
||||||
|
data = np.array(data, dtype=np.float32)
|
||||||
|
if labels is not None:
|
||||||
|
labels = np.array(labels, dtype=np.float32)
|
||||||
|
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
|
||||||
|
data=data,
|
||||||
|
targets=labels,
|
||||||
|
sequence_length=self.total_window_size,
|
||||||
|
sequence_stride=1,
|
||||||
|
sampling_rate=1,
|
||||||
|
shuffle=False,
|
||||||
|
batch_size=self.batch_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ds
|
||||||
|
|
||||||
|
@property
|
||||||
|
def train(self):
|
||||||
|
return self.make_dataset(self.train_df, self.train_labels)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def val(self):
|
||||||
|
return self.make_dataset(self.val_df, self.val_labels)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def test(self):
|
||||||
|
return self.make_dataset(self.test_df, self.test_labels)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def inference(self):
|
||||||
|
return self.make_dataset(self.test_df)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def example(self):
|
||||||
|
"""Get and cache an example batch of `inputs, labels` for plotting."""
|
||||||
|
result = getattr(self, "_example", None)
|
||||||
|
if result is None:
|
||||||
|
# No example batch was found, so get one from the `.train` dataset
|
||||||
|
result = next(iter(self.train))
|
||||||
|
# And cache it for next time
|
||||||
|
self._example = result
|
||||||
|
return result
|
||||||
|
@@ -91,6 +91,13 @@ class FreqaiDataDrawer:
|
|||||||
self.empty_pair_dict: pair_info = {
|
self.empty_pair_dict: pair_info = {
|
||||||
"model_filename": "", "trained_timestamp": 0,
|
"model_filename": "", "trained_timestamp": 0,
|
||||||
"data_path": "", "extras": {}}
|
"data_path": "", "extras": {}}
|
||||||
|
self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False)
|
||||||
|
if 'rl_config' in self.freqai_info:
|
||||||
|
self.model_type = 'stable_baselines'
|
||||||
|
logger.warning('User indicated rl_config, FreqAI will now use stable_baselines3'
|
||||||
|
' to save models.')
|
||||||
|
else:
|
||||||
|
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
|
||||||
|
|
||||||
def load_drawer_from_disk(self):
|
def load_drawer_from_disk(self):
|
||||||
"""
|
"""
|
||||||
@@ -257,7 +264,7 @@ class FreqaiDataDrawer:
|
|||||||
|
|
||||||
def append_model_predictions(self, pair: str, predictions: DataFrame,
|
def append_model_predictions(self, pair: str, predictions: DataFrame,
|
||||||
do_preds: NDArray[np.int_],
|
do_preds: NDArray[np.int_],
|
||||||
dk: FreqaiDataKitchen, len_df: int) -> None:
|
dk: FreqaiDataKitchen, strat_df: DataFrame) -> None:
|
||||||
"""
|
"""
|
||||||
Append model predictions to historic predictions dataframe, then set the
|
Append model predictions to historic predictions dataframe, then set the
|
||||||
strategy return dataframe to the tail of the historic predictions. The length of
|
strategy return dataframe to the tail of the historic predictions. The length of
|
||||||
@@ -266,6 +273,7 @@ class FreqaiDataDrawer:
|
|||||||
historic predictions.
|
historic predictions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
len_df = len(strat_df)
|
||||||
index = self.historic_predictions[pair].index[-1:]
|
index = self.historic_predictions[pair].index[-1:]
|
||||||
columns = self.historic_predictions[pair].columns
|
columns = self.historic_predictions[pair].columns
|
||||||
|
|
||||||
@@ -293,6 +301,15 @@ class FreqaiDataDrawer:
|
|||||||
for return_str in rets:
|
for return_str in rets:
|
||||||
df[return_str].iloc[-1] = rets[return_str]
|
df[return_str].iloc[-1] = rets[return_str]
|
||||||
|
|
||||||
|
# this logic carries users between version without needing to
|
||||||
|
# change their identifier
|
||||||
|
if 'close_price' not in df.columns:
|
||||||
|
df['close_price'] = np.nan
|
||||||
|
df['date_pred'] = np.nan
|
||||||
|
|
||||||
|
df['close_price'].iloc[-1] = strat_df['close'].iloc[-1]
|
||||||
|
df['date_pred'].iloc[-1] = strat_df['date'].iloc[-1]
|
||||||
|
|
||||||
self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True)
|
self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True)
|
||||||
|
|
||||||
def attach_return_values_to_return_dataframe(
|
def attach_return_values_to_return_dataframe(
|
||||||
@@ -413,10 +430,12 @@ class FreqaiDataDrawer:
|
|||||||
save_path = Path(dk.data_path)
|
save_path = Path(dk.data_path)
|
||||||
|
|
||||||
# Save the trained model
|
# Save the trained model
|
||||||
if not dk.keras:
|
if self.model_type == 'joblib':
|
||||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||||
else:
|
elif self.model_type == 'keras':
|
||||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||||
|
elif 'stable_baselines' in self.model_type:
|
||||||
|
model.save(save_path / f"{dk.model_filename}_model.zip")
|
||||||
|
|
||||||
if dk.svm_model is not None:
|
if dk.svm_model is not None:
|
||||||
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
|
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
|
||||||
@@ -443,8 +462,8 @@ class FreqaiDataDrawer:
|
|||||||
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
||||||
)
|
)
|
||||||
|
|
||||||
# if self.live:
|
if not self.limit_ram_use:
|
||||||
self.model_dictionary[coin] = model
|
self.model_dictionary[coin] = model
|
||||||
self.pair_dict[coin]["model_filename"] = dk.model_filename
|
self.pair_dict[coin]["model_filename"] = dk.model_filename
|
||||||
self.pair_dict[coin]["data_path"] = str(dk.data_path)
|
self.pair_dict[coin]["data_path"] = str(dk.data_path)
|
||||||
self.save_drawer_to_disk()
|
self.save_drawer_to_disk()
|
||||||
@@ -493,14 +512,18 @@ class FreqaiDataDrawer:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# try to access model in memory instead of loading object from disk to save time
|
# try to access model in memory instead of loading object from disk to save time
|
||||||
if dk.live and coin in self.model_dictionary:
|
if dk.live and coin in self.model_dictionary and not self.limit_ram_use:
|
||||||
model = self.model_dictionary[coin]
|
model = self.model_dictionary[coin]
|
||||||
elif not dk.keras:
|
elif self.model_type == 'joblib':
|
||||||
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
||||||
else:
|
elif self.model_type == 'keras':
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
|
|
||||||
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
||||||
|
elif self.model_type == 'stable_baselines':
|
||||||
|
mod = __import__('stable_baselines3', fromlist=[
|
||||||
|
self.freqai_info['rl_config']['model_type']])
|
||||||
|
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
||||||
|
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||||
|
|
||||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||||
@@ -510,6 +533,10 @@ class FreqaiDataDrawer:
|
|||||||
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# load it into ram if it was loaded from disk
|
||||||
|
if coin not in self.model_dictionary and not self.limit_ram_use:
|
||||||
|
self.model_dictionary[coin] = model
|
||||||
|
|
||||||
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
||||||
dk.pca = cloudpickle.load(
|
dk.pca = cloudpickle.load(
|
||||||
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
||||||
@@ -620,22 +647,3 @@ class FreqaiDataDrawer:
|
|||||||
)
|
)
|
||||||
|
|
||||||
return corr_dataframes, base_dataframes
|
return corr_dataframes, base_dataframes
|
||||||
|
|
||||||
# to be used if we want to send predictions directly to the follower instead of forcing
|
|
||||||
# follower to load models and inference
|
|
||||||
# def save_model_return_values_to_disk(self) -> None:
|
|
||||||
# with open(self.full_path / str('model_return_values.json'), "w") as fp:
|
|
||||||
# json.dump(self.model_return_values, fp, default=self.np_encoder)
|
|
||||||
|
|
||||||
# def load_model_return_values_from_disk(self, dk: FreqaiDataKitchen) -> FreqaiDataKitchen:
|
|
||||||
# exists = Path(self.full_path / str('model_return_values.json')).resolve().exists()
|
|
||||||
# if exists:
|
|
||||||
# with open(self.full_path / str('model_return_values.json'), "r") as fp:
|
|
||||||
# self.model_return_values = json.load(fp)
|
|
||||||
# elif not self.follow_mode:
|
|
||||||
# logger.info("Could not find existing datadrawer, starting from scratch")
|
|
||||||
# else:
|
|
||||||
# logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '
|
|
||||||
# 'sending null values back to strategy')
|
|
||||||
|
|
||||||
# return exists, dk
|
|
||||||
|
@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import numpy.typing as npt
|
import numpy.typing as npt
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import psutil
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
from scipy import stats
|
from scipy import stats
|
||||||
from sklearn import linear_model
|
from sklearn import linear_model
|
||||||
@@ -76,9 +77,10 @@ class FreqaiDataKitchen:
|
|||||||
self.backtest_predictions_folder: str = "backtesting_predictions"
|
self.backtest_predictions_folder: str = "backtesting_predictions"
|
||||||
self.live = live
|
self.live = live
|
||||||
self.pair = pair
|
self.pair = pair
|
||||||
|
self.model_save_type = self.freqai_config.get('model_save_type', 'joblib')
|
||||||
|
|
||||||
self.svm_model: linear_model.SGDOneClassSVM = None
|
self.svm_model: linear_model.SGDOneClassSVM = None
|
||||||
self.keras: bool = self.freqai_config.get("keras", False)
|
# self.model_save_type: bool = self.freqai_config.get("keras", False)
|
||||||
self.set_all_pairs()
|
self.set_all_pairs()
|
||||||
if not self.live:
|
if not self.live:
|
||||||
if not self.config["timerange"]:
|
if not self.config["timerange"]:
|
||||||
@@ -95,7 +97,10 @@ class FreqaiDataKitchen:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})
|
self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})
|
||||||
self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1)
|
if not self.freqai_config.get("data_kitchen_thread_count", 0):
|
||||||
|
self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||||
|
else:
|
||||||
|
self.thread_count = self.freqai_config["data_kitchen_thread_count"]
|
||||||
self.train_dates: DataFrame = pd.DataFrame()
|
self.train_dates: DataFrame = pd.DataFrame()
|
||||||
self.unique_classes: Dict[str, list] = {}
|
self.unique_classes: Dict[str, list] = {}
|
||||||
self.unique_class_list: list = []
|
self.unique_class_list: list = []
|
||||||
@@ -134,20 +139,15 @@ class FreqaiDataKitchen:
|
|||||||
"""
|
"""
|
||||||
feat_dict = self.freqai_config["feature_parameters"]
|
feat_dict = self.freqai_config["feature_parameters"]
|
||||||
|
|
||||||
|
if 'shuffle' not in self.freqai_config['data_split_parameters']:
|
||||||
|
self.freqai_config["data_split_parameters"].update({'shuffle': False})
|
||||||
|
|
||||||
weights: npt.ArrayLike
|
weights: npt.ArrayLike
|
||||||
if feat_dict.get("weight_factor", 0) > 0:
|
if feat_dict.get("weight_factor", 0) > 0:
|
||||||
weights = self.set_weights_higher_recent(len(filtered_dataframe))
|
weights = self.set_weights_higher_recent(len(filtered_dataframe))
|
||||||
else:
|
else:
|
||||||
weights = np.ones(len(filtered_dataframe))
|
weights = np.ones(len(filtered_dataframe))
|
||||||
|
|
||||||
if feat_dict.get("stratify_training_data", 0) > 0:
|
|
||||||
stratification = np.zeros(len(filtered_dataframe))
|
|
||||||
for i in range(1, len(stratification)):
|
|
||||||
if i % feat_dict.get("stratify_training_data", 0) == 0:
|
|
||||||
stratification[i] = 1
|
|
||||||
else:
|
|
||||||
stratification = None
|
|
||||||
|
|
||||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||||
(
|
(
|
||||||
train_features,
|
train_features,
|
||||||
@@ -160,7 +160,6 @@ class FreqaiDataKitchen:
|
|||||||
filtered_dataframe[: filtered_dataframe.shape[0]],
|
filtered_dataframe[: filtered_dataframe.shape[0]],
|
||||||
labels,
|
labels,
|
||||||
weights,
|
weights,
|
||||||
stratify=stratification,
|
|
||||||
**self.config["freqai"]["data_split_parameters"],
|
**self.config["freqai"]["data_split_parameters"],
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -571,7 +570,7 @@ class FreqaiDataKitchen:
|
|||||||
predict: bool = If true, inference an existing SVM model, else construct one
|
predict: bool = If true, inference an existing SVM model, else construct one
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.keras:
|
if self.model_save_type == 'keras':
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"SVM outlier removal not currently supported for Keras based models. "
|
"SVM outlier removal not currently supported for Keras based models. "
|
||||||
"Skipping user requested function."
|
"Skipping user requested function."
|
||||||
|
@@ -7,10 +7,11 @@ from collections import deque
|
|||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
from typing import Any, Dict, List, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import psutil
|
||||||
from numpy.typing import NDArray
|
from numpy.typing import NDArray
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
@@ -72,10 +73,10 @@ class IFreqaiModel(ABC):
|
|||||||
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
||||||
self.scanning = False
|
self.scanning = False
|
||||||
self.ft_params = self.freqai_info["feature_parameters"]
|
self.ft_params = self.freqai_info["feature_parameters"]
|
||||||
self.keras: bool = self.freqai_info.get("keras", False)
|
# self.keras: bool = self.freqai_info.get("keras", False)
|
||||||
if self.keras and self.ft_params.get("DI_threshold", 0):
|
# if self.keras and self.ft_params.get("DI_threshold", 0):
|
||||||
self.ft_params["DI_threshold"] = 0
|
# self.ft_params["DI_threshold"] = 0
|
||||||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
# logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||||
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
||||||
if self.ft_params.get("inlier_metric_window", 0):
|
if self.ft_params.get("inlier_metric_window", 0):
|
||||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||||
@@ -96,12 +97,15 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
self._threads: List[threading.Thread] = []
|
self._threads: List[threading.Thread] = []
|
||||||
self._stop_event = threading.Event()
|
self._stop_event = threading.Event()
|
||||||
|
self.strategy: Optional[IStrategy] = None
|
||||||
|
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
"""
|
"""
|
||||||
Return an empty state to be pickled in hyperopt
|
Return an empty state to be pickled in hyperopt
|
||||||
"""
|
"""
|
||||||
return ({})
|
return ({})
|
||||||
|
self.strategy: Optional[IStrategy] = None
|
||||||
|
|
||||||
def assert_config(self, config: Config) -> None:
|
def assert_config(self, config: Config) -> None:
|
||||||
|
|
||||||
@@ -122,6 +126,7 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
||||||
self.dd.set_pair_dict_info(metadata)
|
self.dd.set_pair_dict_info(metadata)
|
||||||
|
self.strategy = strategy
|
||||||
|
|
||||||
if self.live:
|
if self.live:
|
||||||
self.inference_timer('start')
|
self.inference_timer('start')
|
||||||
@@ -156,6 +161,13 @@ class IFreqaiModel(ABC):
|
|||||||
self.model = None
|
self.model = None
|
||||||
self.dk = None
|
self.dk = None
|
||||||
|
|
||||||
|
def _on_stop(self):
|
||||||
|
"""
|
||||||
|
Callback for Subclasses to override to include logic for shutting down resources
|
||||||
|
when SIGINT is sent.
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
"""
|
"""
|
||||||
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
||||||
@@ -164,6 +176,8 @@ class IFreqaiModel(ABC):
|
|||||||
logger.info("Stopping FreqAI")
|
logger.info("Stopping FreqAI")
|
||||||
self._stop_event.set()
|
self._stop_event.set()
|
||||||
|
|
||||||
|
self._on_stop()
|
||||||
|
|
||||||
logger.info("Waiting on Training iteration")
|
logger.info("Waiting on Training iteration")
|
||||||
for _thread in self._threads:
|
for _thread in self._threads:
|
||||||
_thread.join()
|
_thread.join()
|
||||||
@@ -393,7 +407,7 @@ class IFreqaiModel(ABC):
|
|||||||
# allows FreqUI to show full return values.
|
# allows FreqUI to show full return values.
|
||||||
pred_df, do_preds = self.predict(dataframe, dk)
|
pred_df, do_preds = self.predict(dataframe, dk)
|
||||||
if pair not in self.dd.historic_predictions:
|
if pair not in self.dd.historic_predictions:
|
||||||
self.set_initial_historic_predictions(pred_df, dk, pair)
|
self.set_initial_historic_predictions(pred_df, dk, pair, dataframe)
|
||||||
self.dd.set_initial_return_values(pair, pred_df)
|
self.dd.set_initial_return_values(pair, pred_df)
|
||||||
|
|
||||||
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
||||||
@@ -414,7 +428,7 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
||||||
self.fit_live_predictions(dk, pair)
|
self.fit_live_predictions(dk, pair)
|
||||||
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, len(dataframe))
|
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, dataframe)
|
||||||
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
||||||
|
|
||||||
return
|
return
|
||||||
@@ -583,7 +597,7 @@ class IFreqaiModel(ABC):
|
|||||||
self.dd.purge_old_models()
|
self.dd.purge_old_models()
|
||||||
|
|
||||||
def set_initial_historic_predictions(
|
def set_initial_historic_predictions(
|
||||||
self, pred_df: DataFrame, dk: FreqaiDataKitchen, pair: str
|
self, pred_df: DataFrame, dk: FreqaiDataKitchen, pair: str, strat_df: DataFrame
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
This function is called only if the datadrawer failed to load an
|
This function is called only if the datadrawer failed to load an
|
||||||
@@ -626,9 +640,13 @@ class IFreqaiModel(ABC):
|
|||||||
for return_str in dk.data['extra_returns_per_train']:
|
for return_str in dk.data['extra_returns_per_train']:
|
||||||
hist_preds_df[return_str] = 0
|
hist_preds_df[return_str] = 0
|
||||||
|
|
||||||
|
hist_preds_df['close_price'] = strat_df['close']
|
||||||
|
hist_preds_df['date_pred'] = strat_df['date']
|
||||||
|
|
||||||
# # for keras type models, the conv_window needs to be prepended so
|
# # for keras type models, the conv_window needs to be prepended so
|
||||||
# # viewing is correct in frequi
|
# # viewing is correct in frequi
|
||||||
if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0):
|
if (not self.freqai_info.get('model_save_type', 'joblib') or
|
||||||
|
self.ft_params.get('inlier_metric_window', 0)):
|
||||||
n_lost_points = self.freqai_info.get('conv_width', 2)
|
n_lost_points = self.freqai_info.get('conv_width', 2)
|
||||||
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
||||||
columns=hist_preds_df.columns)
|
columns=hist_preds_df.columns)
|
||||||
|
144
freqtrade/freqai/prediction_models/CNNPredictionModel.py
Normal file
144
freqtrade/freqai/prediction_models/CNNPredictionModel.py
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Any, Dict, Tuple
|
||||||
|
|
||||||
|
from pandas import DataFrame
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
import tensorflow as tf
|
||||||
|
from freqtrade.freqai.base_models.BaseTensorFlowModel import BaseTensorFlowModel, WindowGenerator
|
||||||
|
from tensorflow.keras.layers import Input, Conv1D, Dense
|
||||||
|
from tensorflow.keras.models import Model
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# tf.config.run_functions_eagerly(True)
|
||||||
|
# tf.data.experimental.enable_debug_mode()
|
||||||
|
|
||||||
|
MAX_EPOCHS = 10
|
||||||
|
|
||||||
|
|
||||||
|
class CNNPredictionModel(BaseTensorFlowModel):
|
||||||
|
"""
|
||||||
|
User created prediction model. The class needs to override three necessary
|
||||||
|
functions, predict(), fit().
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:params:
|
||||||
|
:data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
|
"""
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
train_labels = data_dictionary["train_labels"]
|
||||||
|
test_df = data_dictionary["test_features"]
|
||||||
|
test_labels = data_dictionary["test_labels"]
|
||||||
|
n_labels = len(train_labels.columns)
|
||||||
|
if n_labels > 1:
|
||||||
|
raise OperationalException(
|
||||||
|
"Neural Net not yet configured for multi-targets. Please "
|
||||||
|
" reduce number of targets to 1 in strategy."
|
||||||
|
)
|
||||||
|
|
||||||
|
n_features = len(data_dictionary["train_features"].columns)
|
||||||
|
BATCH_SIZE = self.freqai_info.get("batch_size", 64)
|
||||||
|
input_dims = [BATCH_SIZE, self.CONV_WIDTH, n_features]
|
||||||
|
|
||||||
|
w1 = WindowGenerator(
|
||||||
|
input_width=self.CONV_WIDTH,
|
||||||
|
label_width=1,
|
||||||
|
shift=1,
|
||||||
|
train_df=train_df,
|
||||||
|
val_df=test_df,
|
||||||
|
train_labels=train_labels,
|
||||||
|
val_labels=test_labels,
|
||||||
|
batch_size=BATCH_SIZE,
|
||||||
|
)
|
||||||
|
|
||||||
|
model = self.create_model(input_dims, n_labels)
|
||||||
|
|
||||||
|
steps_per_epoch = np.ceil(len(test_df) / BATCH_SIZE)
|
||||||
|
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
|
||||||
|
0.001, decay_steps=steps_per_epoch * 1000, decay_rate=1, staircase=False
|
||||||
|
)
|
||||||
|
|
||||||
|
early_stopping = tf.keras.callbacks.EarlyStopping(
|
||||||
|
monitor="loss", patience=3, mode="min", min_delta=0.0001
|
||||||
|
)
|
||||||
|
|
||||||
|
model.compile(
|
||||||
|
loss=tf.losses.MeanSquaredError(),
|
||||||
|
optimizer=tf.optimizers.Adam(lr_schedule),
|
||||||
|
metrics=[tf.metrics.MeanAbsoluteError()],
|
||||||
|
)
|
||||||
|
|
||||||
|
model.fit(
|
||||||
|
w1.train,
|
||||||
|
epochs=MAX_EPOCHS,
|
||||||
|
shuffle=False,
|
||||||
|
validation_data=w1.val,
|
||||||
|
callbacks=[early_stopping],
|
||||||
|
verbose=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first=True
|
||||||
|
) -> Tuple[DataFrame, DataFrame]:
|
||||||
|
"""
|
||||||
|
Filter the prediction features data and predict with it.
|
||||||
|
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||||
|
:return:
|
||||||
|
:predictions: np.array of predictions
|
||||||
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
|
"""
|
||||||
|
|
||||||
|
dk.find_features(unfiltered_dataframe)
|
||||||
|
filtered_dataframe, _ = dk.filter_features(
|
||||||
|
unfiltered_dataframe, dk.training_features_list, training_filter=False
|
||||||
|
)
|
||||||
|
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||||
|
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||||
|
|
||||||
|
# optional additional data cleaning/analysis
|
||||||
|
self.data_cleaning_predict(dk, filtered_dataframe)
|
||||||
|
|
||||||
|
if first:
|
||||||
|
full_df = dk.data_dictionary["prediction_features"]
|
||||||
|
|
||||||
|
w1 = WindowGenerator(
|
||||||
|
input_width=self.CONV_WIDTH,
|
||||||
|
label_width=1,
|
||||||
|
shift=1,
|
||||||
|
test_df=full_df,
|
||||||
|
batch_size=len(full_df),
|
||||||
|
)
|
||||||
|
|
||||||
|
predictions = self.model.predict(w1.inference)
|
||||||
|
len_diff = len(dk.do_predict) - len(predictions)
|
||||||
|
if len_diff > 0:
|
||||||
|
dk.do_predict = dk.do_predict[len_diff:]
|
||||||
|
|
||||||
|
else:
|
||||||
|
data = dk.data_dictionary["prediction_features"]
|
||||||
|
data = tf.expand_dims(data, axis=0)
|
||||||
|
predictions = self.model(data, training=False)
|
||||||
|
|
||||||
|
predictions = predictions[:, 0, 0]
|
||||||
|
pred_df = DataFrame(predictions, columns=dk.label_list)
|
||||||
|
|
||||||
|
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||||
|
|
||||||
|
return (pred_df, np.ones(len(pred_df)))
|
||||||
|
|
||||||
|
def create_model(self, input_dims, n_labels) -> Any:
|
||||||
|
|
||||||
|
input_layer = Input(shape=(input_dims[1], input_dims[2]))
|
||||||
|
Layer_1 = Conv1D(filters=32, kernel_size=(self.CONV_WIDTH,), activation="relu")(input_layer)
|
||||||
|
Layer_3 = Dense(units=32, activation="relu")(Layer_1)
|
||||||
|
output_layer = Dense(units=n_labels)(Layer_3)
|
||||||
|
return Model(inputs=input_layer, outputs=output_layer)
|
118
freqtrade/freqai/prediction_models/ReinforcementLearner.py
Normal file
118
freqtrade/freqai/prediction_models/ReinforcementLearner.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import torch as th
|
||||||
|
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||||
|
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||||
|
"""
|
||||||
|
User created Reinforcement Learning Model prediction model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||||
|
"""
|
||||||
|
User customizable fit method
|
||||||
|
:params:
|
||||||
|
data_dictionary: dict = common data dictionary containing all train/test
|
||||||
|
features/labels/weights.
|
||||||
|
dk: FreqaiDatakitchen = data kitchen for current pair.
|
||||||
|
:returns:
|
||||||
|
model: Any = trained model to be used for inference in dry/live/backtesting
|
||||||
|
"""
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df)
|
||||||
|
|
||||||
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
|
net_arch=[128, 128])
|
||||||
|
|
||||||
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
|
tensorboard_log=Path(
|
||||||
|
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
||||||
|
**self.freqai_info['model_training_parameters']
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info('Continual training activated - starting training from previously '
|
||||||
|
'trained agent.')
|
||||||
|
model = self.dd.model_dictionary[dk.pair]
|
||||||
|
model.set_env(self.train_env)
|
||||||
|
|
||||||
|
model.learn(
|
||||||
|
total_timesteps=int(total_timesteps),
|
||||||
|
callback=self.eval_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
if Path(dk.data_path / "best_model.zip").is_file():
|
||||||
|
logger.info('Callback found a best model.')
|
||||||
|
best_model = self.MODELCLASS.load(dk.data_path / "best_model")
|
||||||
|
return best_model
|
||||||
|
|
||||||
|
logger.info('Couldnt find best model, using final model instead.')
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
|
"""
|
||||||
|
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||||
|
sets a custom reward based on profit and trade duration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def calculate_reward(self, action):
|
||||||
|
"""
|
||||||
|
An example reward function. This is the one function that users will likely
|
||||||
|
wish to inject their own creativity into.
|
||||||
|
:params:
|
||||||
|
action: int = The action made by the agent for the current candle.
|
||||||
|
:returns:
|
||||||
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
of weights in NN)
|
||||||
|
"""
|
||||||
|
# first, penalize if the action is not valid
|
||||||
|
if not self._is_valid(action):
|
||||||
|
return -2
|
||||||
|
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
factor = 100
|
||||||
|
|
||||||
|
# reward agent for entering trades
|
||||||
|
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
||||||
|
and self._position == Positions.Neutral):
|
||||||
|
return 25
|
||||||
|
# discourage agent from not entering trades
|
||||||
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
|
|
||||||
|
if trade_duration <= max_trade_duration:
|
||||||
|
factor *= 1.5
|
||||||
|
elif trade_duration > max_trade_duration:
|
||||||
|
factor *= 0.5
|
||||||
|
|
||||||
|
# discourage sitting in position
|
||||||
|
if (self._position in (Positions.Short, Positions.Long) and
|
||||||
|
action == Actions.Neutral.value):
|
||||||
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
|
||||||
|
# close long
|
||||||
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(pnl * factor)
|
||||||
|
|
||||||
|
# close short
|
||||||
|
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(pnl * factor)
|
||||||
|
|
||||||
|
return 0.
|
@@ -0,0 +1,100 @@
|
|||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict # , Tuple
|
||||||
|
|
||||||
|
# import numpy.typing as npt
|
||||||
|
import torch as th
|
||||||
|
from pandas import DataFrame
|
||||||
|
from stable_baselines3.common.callbacks import EvalCallback
|
||||||
|
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||||
|
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.RL.BaseReinforcementLearningModel import (BaseReinforcementLearningModel,
|
||||||
|
make_env)
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
|
||||||
|
"""
|
||||||
|
User created Reinforcement Learning Model prediction model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||||
|
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df)
|
||||||
|
|
||||||
|
# model arch
|
||||||
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
|
net_arch=[128, 128])
|
||||||
|
|
||||||
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
|
tensorboard_log=Path(
|
||||||
|
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
||||||
|
**self.freqai_info['model_training_parameters']
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info('Continual learning activated - starting training from previously '
|
||||||
|
'trained agent.')
|
||||||
|
model = self.dd.model_dictionary[dk.pair]
|
||||||
|
model.set_env(self.train_env)
|
||||||
|
|
||||||
|
model.learn(
|
||||||
|
total_timesteps=int(total_timesteps),
|
||||||
|
callback=self.eval_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
if Path(dk.data_path / "best_model.zip").is_file():
|
||||||
|
logger.info('Callback found a best model.')
|
||||||
|
best_model = self.MODELCLASS.load(dk.data_path / "best_model")
|
||||||
|
return best_model
|
||||||
|
|
||||||
|
logger.info('Couldnt find best model, using final model instead.')
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any],
|
||||||
|
prices_train: DataFrame, prices_test: DataFrame,
|
||||||
|
dk: FreqaiDataKitchen):
|
||||||
|
"""
|
||||||
|
User can override this if they are using a custom MyRLEnv
|
||||||
|
:params:
|
||||||
|
data_dictionary: dict = common data dictionary containing train and test
|
||||||
|
features/labels/weights.
|
||||||
|
prices_train/test: DataFrame = dataframe comprised of the prices to be used in
|
||||||
|
the environment during training
|
||||||
|
or testing
|
||||||
|
dk: FreqaiDataKitchen = the datakitchen for the current pair
|
||||||
|
"""
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
test_df = data_dictionary["test_features"]
|
||||||
|
|
||||||
|
env_id = "train_env"
|
||||||
|
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train,
|
||||||
|
self.reward_params, self.CONV_WIDTH, monitor=True,
|
||||||
|
config=self.config) for i
|
||||||
|
in range(self.max_threads)])
|
||||||
|
|
||||||
|
eval_env_id = 'eval_env'
|
||||||
|
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
||||||
|
test_df, prices_test,
|
||||||
|
self.reward_params, self.CONV_WIDTH, monitor=True,
|
||||||
|
config=self.config) for i
|
||||||
|
in range(self.max_threads)])
|
||||||
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
|
render=False, eval_freq=len(train_df),
|
||||||
|
best_model_save_path=str(dk.data_path))
|
||||||
|
|
||||||
|
def _on_stop(self):
|
||||||
|
"""
|
||||||
|
Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.train_env:
|
||||||
|
self.train_env.close()
|
||||||
|
|
||||||
|
if self.eval_env:
|
||||||
|
self.eval_env.close()
|
@@ -82,7 +82,10 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# Keep this at the end of this initialization method.
|
# Keep this at the end of this initialization method.
|
||||||
self.rpc: RPCManager = RPCManager(self)
|
self.rpc: RPCManager = RPCManager(self)
|
||||||
|
|
||||||
self.dataprovider = DataProvider(self.config, self.exchange, self.pairlists, self.rpc)
|
self.dataprovider = DataProvider(self.config, self.exchange, rpc=self.rpc)
|
||||||
|
self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider)
|
||||||
|
|
||||||
|
self.dataprovider.add_pairlisthandler(self.pairlists)
|
||||||
|
|
||||||
# Attach Dataprovider to strategy instance
|
# Attach Dataprovider to strategy instance
|
||||||
self.strategy.dp = self.dataprovider
|
self.strategy.dp = self.dataprovider
|
||||||
@@ -597,7 +600,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# We should decrease our position
|
# We should decrease our position
|
||||||
amount = self.exchange.amount_to_contract_precision(
|
amount = self.exchange.amount_to_contract_precision(
|
||||||
trade.pair,
|
trade.pair,
|
||||||
abs(float(FtPrecise(stake_amount) / FtPrecise(current_exit_rate))))
|
abs(float(FtPrecise(stake_amount * trade.leverage) / FtPrecise(current_exit_rate))))
|
||||||
if amount > trade.amount:
|
if amount > trade.amount:
|
||||||
# This is currently ineffective as remaining would become < min tradable
|
# This is currently ineffective as remaining would become < min tradable
|
||||||
# Fixing this would require checking for 0.0 there -
|
# Fixing this would require checking for 0.0 there -
|
||||||
@@ -1308,7 +1311,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
# place new order only if new price is supplied
|
# place new order only if new price is supplied
|
||||||
self.execute_entry(
|
self.execute_entry(
|
||||||
pair=trade.pair,
|
pair=trade.pair,
|
||||||
stake_amount=(order_obj.remaining * order_obj.price),
|
stake_amount=(order_obj.remaining * order_obj.price / trade.leverage),
|
||||||
price=adjusted_entry_price,
|
price=adjusted_entry_price,
|
||||||
trade=trade,
|
trade=trade,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
@@ -1340,11 +1343,12 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
replacing: Optional[bool] = False
|
replacing: Optional[bool] = False
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Buy cancel - cancel order
|
entry cancel - cancel order
|
||||||
:param replacing: Replacing order - prevent trade deletion.
|
:param replacing: Replacing order - prevent trade deletion.
|
||||||
:return: True if order was fully cancelled
|
:return: True if trade was fully cancelled
|
||||||
"""
|
"""
|
||||||
was_trade_fully_canceled = False
|
was_trade_fully_canceled = False
|
||||||
|
side = trade.entry_side.capitalize()
|
||||||
|
|
||||||
# Cancelled orders may have the status of 'canceled' or 'closed'
|
# Cancelled orders may have the status of 'canceled' or 'closed'
|
||||||
if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES:
|
if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES:
|
||||||
@@ -1371,7 +1375,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
corder = order
|
corder = order
|
||||||
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
||||||
|
|
||||||
side = trade.entry_side.capitalize()
|
|
||||||
logger.info('%s order %s for %s.', side, reason, trade)
|
logger.info('%s order %s for %s.', side, reason, trade)
|
||||||
|
|
||||||
# Using filled to determine the filled amount
|
# Using filled to determine the filled amount
|
||||||
@@ -1385,24 +1388,15 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
was_trade_fully_canceled = True
|
was_trade_fully_canceled = True
|
||||||
reason += f", {constants.CANCEL_REASON['FULLY_CANCELLED']}"
|
reason += f", {constants.CANCEL_REASON['FULLY_CANCELLED']}"
|
||||||
else:
|
else:
|
||||||
# FIXME TODO: This could possibly reworked to not duplicate the code 15 lines below.
|
|
||||||
self.update_trade_state(trade, trade.open_order_id, corder)
|
self.update_trade_state(trade, trade.open_order_id, corder)
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
logger.info(f'{side} Order timeout for {trade}.')
|
logger.info(f'{side} Order timeout for {trade}.')
|
||||||
else:
|
else:
|
||||||
# if trade is partially complete, edit the stake details for the trade
|
# update_trade_state (and subsequently recalc_trade_from_orders) will handle updates
|
||||||
# and close the order
|
# to the trade object
|
||||||
# cancel_order may not contain the full order dict, so we need to fallback
|
|
||||||
# to the order dict acquired before cancelling.
|
|
||||||
# we need to fall back to the values from order if corder does not contain these keys.
|
|
||||||
trade.amount = filled_amount
|
|
||||||
# * Check edge cases, we don't want to make leverage > 1.0 if we don't have to
|
|
||||||
# * (for leverage modes which aren't isolated futures)
|
|
||||||
|
|
||||||
trade.stake_amount = trade.amount * trade.open_rate / trade.leverage
|
|
||||||
self.update_trade_state(trade, trade.open_order_id, corder)
|
self.update_trade_state(trade, trade.open_order_id, corder)
|
||||||
|
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
|
|
||||||
logger.info(f'Partial {trade.entry_side} order timeout for {trade}.')
|
logger.info(f'Partial {trade.entry_side} order timeout for {trade}.')
|
||||||
reason += f", {constants.CANCEL_REASON['PARTIALLY_FILLED']}"
|
reason += f", {constants.CANCEL_REASON['PARTIALLY_FILLED']}"
|
||||||
|
|
||||||
@@ -1417,49 +1411,63 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
:return: True if exit order was cancelled, false otherwise
|
:return: True if exit order was cancelled, false otherwise
|
||||||
"""
|
"""
|
||||||
cancelled = False
|
cancelled = False
|
||||||
# if trade is not partially completed, just cancel the order
|
# Cancelled orders may have the status of 'canceled' or 'closed'
|
||||||
if order['remaining'] == order['amount'] or order.get('filled') == 0.0:
|
if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES:
|
||||||
if not self.exchange.check_order_canceled_empty(order):
|
filled_val: float = order.get('filled', 0.0) or 0.0
|
||||||
try:
|
filled_rem_stake = trade.stake_amount - filled_val * trade.open_rate
|
||||||
# if trade is not partially completed, just delete the order
|
minstake = self.exchange.get_min_pair_stake_amount(
|
||||||
co = self.exchange.cancel_order_with_result(trade.open_order_id, trade.pair,
|
trade.pair, trade.open_rate, self.strategy.stoploss)
|
||||||
trade.amount)
|
# Double-check remaining amount
|
||||||
trade.update_order(co)
|
if filled_val > 0:
|
||||||
except InvalidOrderException:
|
reason = constants.CANCEL_REASON['PARTIALLY_FILLED']
|
||||||
logger.exception(
|
if minstake and filled_rem_stake < minstake:
|
||||||
f"Could not cancel {trade.exit_side} order {trade.open_order_id}")
|
logger.warning(
|
||||||
return False
|
f"Order {trade.open_order_id} for {trade.pair} not cancelled, as "
|
||||||
logger.info('%s order %s for %s.', trade.exit_side.capitalize(), reason, trade)
|
f"the filled amount of {filled_val} would result in an unexitable trade.")
|
||||||
else:
|
reason = constants.CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
||||||
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
|
||||||
logger.info('%s order %s for %s.', trade.exit_side.capitalize(), reason, trade)
|
|
||||||
trade.update_order(order)
|
|
||||||
|
|
||||||
|
self._notify_exit_cancel(
|
||||||
|
trade,
|
||||||
|
order_type=self.strategy.order_types['exit'],
|
||||||
|
reason=reason, order_id=order['id'],
|
||||||
|
sub_trade=trade.amount != order['amount']
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
co = self.exchange.cancel_order_with_result(trade.open_order_id, trade.pair,
|
||||||
|
trade.amount)
|
||||||
|
except InvalidOrderException:
|
||||||
|
logger.exception(
|
||||||
|
f"Could not cancel {trade.exit_side} order {trade.open_order_id}")
|
||||||
|
return False
|
||||||
trade.close_rate = None
|
trade.close_rate = None
|
||||||
trade.close_rate_requested = None
|
trade.close_rate_requested = None
|
||||||
trade.close_profit = None
|
trade.close_profit = None
|
||||||
trade.close_profit_abs = None
|
trade.close_profit_abs = None
|
||||||
trade.close_date = None
|
# Set exit_reason for fill message
|
||||||
trade.is_open = True
|
exit_reason_prev = trade.exit_reason
|
||||||
trade.open_order_id = None
|
trade.exit_reason = trade.exit_reason + f", {reason}" if trade.exit_reason else reason
|
||||||
trade.exit_reason = None
|
self.update_trade_state(trade, trade.open_order_id, co)
|
||||||
|
# Order might be filled above in odd timing issues.
|
||||||
|
if co.get('status') in ('canceled', 'cancelled'):
|
||||||
|
trade.exit_reason = None
|
||||||
|
trade.open_order_id = None
|
||||||
|
else:
|
||||||
|
trade.exit_reason = exit_reason_prev
|
||||||
|
|
||||||
|
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
|
||||||
cancelled = True
|
cancelled = True
|
||||||
self.wallets.update()
|
|
||||||
else:
|
else:
|
||||||
# TODO: figure out how to handle partially complete sell orders
|
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
|
||||||
reason = constants.CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
|
||||||
cancelled = False
|
self.update_trade_state(trade, trade.open_order_id, order)
|
||||||
|
trade.open_order_id = None
|
||||||
|
|
||||||
order_obj = trade.select_order_by_order_id(order['id'])
|
|
||||||
if not order_obj:
|
|
||||||
raise DependencyException(
|
|
||||||
f"Order_obj not found for {order['id']}. This should not have happened.")
|
|
||||||
|
|
||||||
sub_trade = order_obj.amount != trade.amount
|
|
||||||
self._notify_exit_cancel(
|
self._notify_exit_cancel(
|
||||||
trade,
|
trade,
|
||||||
order_type=self.strategy.order_types['exit'],
|
order_type=self.strategy.order_types['exit'],
|
||||||
reason=reason, order=order_obj, sub_trade=sub_trade
|
reason=reason, order_id=order['id'], sub_trade=trade.amount != order['amount']
|
||||||
)
|
)
|
||||||
return cancelled
|
return cancelled
|
||||||
|
|
||||||
@@ -1656,7 +1664,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
def _notify_exit_cancel(self, trade: Trade, order_type: str, reason: str,
|
def _notify_exit_cancel(self, trade: Trade, order_type: str, reason: str,
|
||||||
order: Order, sub_trade: bool = False) -> None:
|
order_id: str, sub_trade: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Sends rpc notification when a sell cancel occurred.
|
Sends rpc notification when a sell cancel occurred.
|
||||||
"""
|
"""
|
||||||
@@ -1665,6 +1673,11 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
else:
|
else:
|
||||||
trade.exit_order_status = reason
|
trade.exit_order_status = reason
|
||||||
|
|
||||||
|
order = trade.select_order_by_order_id(order_id)
|
||||||
|
if not order:
|
||||||
|
raise DependencyException(
|
||||||
|
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||||
|
|
||||||
profit_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
profit_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
||||||
profit_trade = trade.calc_profit(rate=profit_rate)
|
profit_trade = trade.calc_profit(rate=profit_rate)
|
||||||
current_rate = self.exchange.get_rate(
|
current_rate = self.exchange.get_rate(
|
||||||
@@ -1700,11 +1713,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
}
|
}
|
||||||
|
|
||||||
if 'fiat_display_currency' in self.config:
|
|
||||||
msg.update({
|
|
||||||
'fiat_currency': self.config['fiat_display_currency'],
|
|
||||||
})
|
|
||||||
|
|
||||||
# Send the message
|
# Send the message
|
||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
|
@@ -110,7 +110,7 @@ class Backtesting:
|
|||||||
self.timeframe = str(self.config.get('timeframe'))
|
self.timeframe = str(self.config.get('timeframe'))
|
||||||
self.timeframe_min = timeframe_to_minutes(self.timeframe)
|
self.timeframe_min = timeframe_to_minutes(self.timeframe)
|
||||||
self.init_backtest_detail()
|
self.init_backtest_detail()
|
||||||
self.pairlists = PairListManager(self.exchange, self.config)
|
self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider)
|
||||||
if 'VolumePairList' in self.pairlists.name_list:
|
if 'VolumePairList' in self.pairlists.name_list:
|
||||||
raise OperationalException("VolumePairList not allowed for backtesting. "
|
raise OperationalException("VolumePairList not allowed for backtesting. "
|
||||||
"Please use StaticPairList instead.")
|
"Please use StaticPairList instead.")
|
||||||
@@ -540,7 +540,7 @@ class Backtesting:
|
|||||||
|
|
||||||
if stake_amount is not None and stake_amount < 0.0:
|
if stake_amount is not None and stake_amount < 0.0:
|
||||||
amount = amount_to_contract_precision(
|
amount = amount_to_contract_precision(
|
||||||
abs(stake_amount) / current_rate, trade.amount_precision,
|
abs(stake_amount * trade.leverage) / current_rate, trade.amount_precision,
|
||||||
self.precision_mode, trade.contract_size)
|
self.precision_mode, trade.contract_size)
|
||||||
if amount == 0.0:
|
if amount == 0.0:
|
||||||
return trade
|
return trade
|
||||||
@@ -1045,7 +1045,7 @@ class Backtesting:
|
|||||||
if requested_rate:
|
if requested_rate:
|
||||||
self._enter_trade(pair=trade.pair, row=row, trade=trade,
|
self._enter_trade(pair=trade.pair, row=row, trade=trade,
|
||||||
requested_rate=requested_rate,
|
requested_rate=requested_rate,
|
||||||
requested_stake=(order.remaining * order.price),
|
requested_stake=(order.remaining * order.price / trade.leverage),
|
||||||
direction='short' if trade.is_short else 'long')
|
direction='short' if trade.is_short else 'long')
|
||||||
self.replaced_entry_orders += 1
|
self.replaced_entry_orders += 1
|
||||||
else:
|
else:
|
||||||
|
@@ -24,6 +24,7 @@ from pandas import DataFrame
|
|||||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
|
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
|
||||||
from freqtrade.data.converter import trim_dataframes
|
from freqtrade.data.converter import trim_dataframes
|
||||||
from freqtrade.data.history import get_timerange
|
from freqtrade.data.history import get_timerange
|
||||||
|
from freqtrade.data.metrics import calculate_market_change
|
||||||
from freqtrade.enums import HyperoptState
|
from freqtrade.enums import HyperoptState
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
|
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
|
||||||
@@ -111,6 +112,7 @@ class Hyperopt:
|
|||||||
|
|
||||||
self.clean_hyperopt()
|
self.clean_hyperopt()
|
||||||
|
|
||||||
|
self.market_change = 0.0
|
||||||
self.num_epochs_saved = 0
|
self.num_epochs_saved = 0
|
||||||
self.current_best_epoch: Optional[Dict[str, Any]] = None
|
self.current_best_epoch: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
@@ -357,7 +359,7 @@ class Hyperopt:
|
|||||||
|
|
||||||
strat_stats = generate_strategy_stats(
|
strat_stats = generate_strategy_stats(
|
||||||
self.pairlist, self.backtesting.strategy.get_strategy_name(),
|
self.pairlist, self.backtesting.strategy.get_strategy_name(),
|
||||||
backtesting_results, min_date, max_date, market_change=0
|
backtesting_results, min_date, max_date, market_change=self.market_change
|
||||||
)
|
)
|
||||||
results_explanation = HyperoptTools.format_results_explanation_string(
|
results_explanation = HyperoptTools.format_results_explanation_string(
|
||||||
strat_stats, self.config['stake_currency'])
|
strat_stats, self.config['stake_currency'])
|
||||||
@@ -425,6 +427,9 @@ class Hyperopt:
|
|||||||
# Trim startup period from analyzed dataframe to get correct dates for output.
|
# Trim startup period from analyzed dataframe to get correct dates for output.
|
||||||
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
||||||
self.min_date, self.max_date = get_timerange(trimmed)
|
self.min_date, self.max_date = get_timerange(trimmed)
|
||||||
|
if not self.market_change:
|
||||||
|
self.market_change = calculate_market_change(trimmed, 'close')
|
||||||
|
|
||||||
# Real trimming will happen as part of backtesting.
|
# Real trimming will happen as part of backtesting.
|
||||||
return preprocessed
|
return preprocessed
|
||||||
|
|
||||||
|
90
freqtrade/plugins/pairlist/ProducerPairList.py
Normal file
90
freqtrade/plugins/pairlist/ProducerPairList.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
"""
|
||||||
|
External Pair List provider
|
||||||
|
|
||||||
|
Provides pair list from Leader data
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ProducerPairList(IPairList):
|
||||||
|
"""
|
||||||
|
PairList plugin for use with external_message_consumer.
|
||||||
|
Will use pairs given from leader data.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
"pairlists": [
|
||||||
|
{
|
||||||
|
"method": "ProducerPairList",
|
||||||
|
"number_assets": 5,
|
||||||
|
"producer_name": "default",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, exchange, pairlistmanager,
|
||||||
|
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
|
||||||
|
pairlist_pos: int) -> None:
|
||||||
|
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
|
||||||
|
|
||||||
|
self._num_assets: int = self._pairlistconfig.get('number_assets', 0)
|
||||||
|
self._producer_name = self._pairlistconfig.get('producer_name', 'default')
|
||||||
|
if not config.get('external_message_consumer', {}).get('enabled'):
|
||||||
|
raise OperationalException(
|
||||||
|
"ProducerPairList requires external_message_consumer to be enabled.")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def needstickers(self) -> bool:
|
||||||
|
"""
|
||||||
|
Boolean property defining if tickers are necessary.
|
||||||
|
If no Pairlist requires tickers, an empty Dict is passed
|
||||||
|
as tickers argument to filter_pairlist
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def short_desc(self) -> str:
|
||||||
|
"""
|
||||||
|
Short whitelist method description - used for startup-messages
|
||||||
|
-> Please overwrite in subclasses
|
||||||
|
"""
|
||||||
|
return f"{self.name} - {self._producer_name}"
|
||||||
|
|
||||||
|
def _filter_pairlist(self, pairlist: Optional[List[str]]):
|
||||||
|
upstream_pairlist = self._pairlistmanager._dataprovider.get_producer_pairs(
|
||||||
|
self._producer_name)
|
||||||
|
|
||||||
|
if pairlist is None:
|
||||||
|
pairlist = self._pairlistmanager._dataprovider.get_producer_pairs(self._producer_name)
|
||||||
|
|
||||||
|
pairs = list(dict.fromkeys(pairlist + upstream_pairlist))
|
||||||
|
if self._num_assets:
|
||||||
|
pairs = pairs[:self._num_assets]
|
||||||
|
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
def gen_pairlist(self, tickers: Dict) -> List[str]:
|
||||||
|
"""
|
||||||
|
Generate the pairlist
|
||||||
|
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
|
||||||
|
:return: List of pairs
|
||||||
|
"""
|
||||||
|
pairs = self._filter_pairlist(None)
|
||||||
|
self.log_once(f"Received pairs: {pairs}", logger.debug)
|
||||||
|
pairs = self._whitelist_for_active_markets(self.verify_whitelist(pairs, logger.info))
|
||||||
|
return pairs
|
||||||
|
|
||||||
|
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
|
||||||
|
"""
|
||||||
|
Filters and sorts pairlist and returns the whitelist again.
|
||||||
|
Called on each bot iteration - please use internal caching if necessary
|
||||||
|
:param pairlist: pairlist to filter or sort
|
||||||
|
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
|
||||||
|
:return: new whitelist
|
||||||
|
"""
|
||||||
|
return self._filter_pairlist(pairlist)
|
@@ -232,6 +232,4 @@ class VolumePairList(IPairList):
|
|||||||
# Limit pairlist to the requested number of pairs
|
# Limit pairlist to the requested number of pairs
|
||||||
pairs = pairs[:self._number_pairs]
|
pairs = pairs[:self._number_pairs]
|
||||||
|
|
||||||
self.log_once(f"Searching {self._number_pairs} pairs: {pairs}", logger.info)
|
|
||||||
|
|
||||||
return pairs
|
return pairs
|
||||||
|
@@ -3,11 +3,12 @@ PairList manager class
|
|||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import Dict, List
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
from cachetools import TTLCache, cached
|
from cachetools import TTLCache, cached
|
||||||
|
|
||||||
from freqtrade.constants import Config, ListPairsWithTimeframes
|
from freqtrade.constants import Config, ListPairsWithTimeframes
|
||||||
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.enums import CandleType
|
from freqtrade.enums import CandleType
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
@@ -21,13 +22,14 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class PairListManager(LoggingMixin):
|
class PairListManager(LoggingMixin):
|
||||||
|
|
||||||
def __init__(self, exchange, config: Config) -> None:
|
def __init__(self, exchange, config: Config, dataprovider: DataProvider = None) -> None:
|
||||||
self._exchange = exchange
|
self._exchange = exchange
|
||||||
self._config = config
|
self._config = config
|
||||||
self._whitelist = self._config['exchange'].get('pair_whitelist')
|
self._whitelist = self._config['exchange'].get('pair_whitelist')
|
||||||
self._blacklist = self._config['exchange'].get('pair_blacklist', [])
|
self._blacklist = self._config['exchange'].get('pair_blacklist', [])
|
||||||
self._pairlist_handlers: List[IPairList] = []
|
self._pairlist_handlers: List[IPairList] = []
|
||||||
self._tickers_needed = False
|
self._tickers_needed = False
|
||||||
|
self._dataprovider: Optional[DataProvider] = dataprovider
|
||||||
for pairlist_handler_config in self._config.get('pairlists', []):
|
for pairlist_handler_config in self._config.get('pairlists', []):
|
||||||
pairlist_handler = PairListResolver.load_pairlist(
|
pairlist_handler = PairListResolver.load_pairlist(
|
||||||
pairlist_handler_config['method'],
|
pairlist_handler_config['method'],
|
||||||
@@ -96,6 +98,8 @@ class PairListManager(LoggingMixin):
|
|||||||
# to ensure blacklist is respected.
|
# to ensure blacklist is respected.
|
||||||
pairlist = self.verify_blacklist(pairlist, logger.warning)
|
pairlist = self.verify_blacklist(pairlist, logger.warning)
|
||||||
|
|
||||||
|
self.log_once(f"Whitelist with {len(pairlist)} pairs: {pairlist}", logger.info)
|
||||||
|
|
||||||
self._whitelist = pairlist
|
self._whitelist = pairlist
|
||||||
|
|
||||||
def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:
|
def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:
|
||||||
|
@@ -29,7 +29,9 @@ nav:
|
|||||||
- Parameter table: freqai-parameter-table.md
|
- Parameter table: freqai-parameter-table.md
|
||||||
- Feature engineering: freqai-feature-engineering.md
|
- Feature engineering: freqai-feature-engineering.md
|
||||||
- Running FreqAI: freqai-running.md
|
- Running FreqAI: freqai-running.md
|
||||||
|
- Reinforcement Learning: freqai-reinforcement-learning.md
|
||||||
- Developer guide: freqai-developers.md
|
- Developer guide: freqai-developers.md
|
||||||
|
- JOSS paper: paper.md
|
||||||
- Short / Leverage: leverage.md
|
- Short / Leverage: leverage.md
|
||||||
- Utility Sub-commands: utils.md
|
- Utility Sub-commands: utils.md
|
||||||
- Plotting: plotting.md
|
- Plotting: plotting.md
|
||||||
|
@@ -8,16 +8,16 @@
|
|||||||
coveralls==3.3.1
|
coveralls==3.3.1
|
||||||
flake8==5.0.4
|
flake8==5.0.4
|
||||||
flake8-tidy-imports==4.8.0
|
flake8-tidy-imports==4.8.0
|
||||||
mypy==0.971
|
mypy==0.981
|
||||||
pre-commit==2.20.0
|
pre-commit==2.20.0
|
||||||
pytest==7.1.3
|
pytest==7.1.3
|
||||||
pytest-asyncio==0.19.0
|
pytest-asyncio==0.19.0
|
||||||
pytest-cov==3.0.0
|
pytest-cov==4.0.0
|
||||||
pytest-mock==3.8.2
|
pytest-mock==3.9.0
|
||||||
pytest-random-order==1.0.4
|
pytest-random-order==1.0.4
|
||||||
isort==5.10.1
|
isort==5.10.1
|
||||||
# For datetime mocking
|
# For datetime mocking
|
||||||
time-machine==2.8.1
|
time-machine==2.8.2
|
||||||
|
|
||||||
# Convert jupyter notebooks to markdown documents
|
# Convert jupyter notebooks to markdown documents
|
||||||
nbconvert==7.0.0
|
nbconvert==7.0.0
|
||||||
|
8
requirements-freqai-rl.txt
Normal file
8
requirements-freqai-rl.txt
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Include all requirements to run the bot.
|
||||||
|
-r requirements-freqai.txt
|
||||||
|
|
||||||
|
# Required for freqai-rl
|
||||||
|
torch==1.12.1
|
||||||
|
stable-baselines3==1.6.1
|
||||||
|
gym==0.26.2
|
||||||
|
sb3-contrib==1.6.1
|
@@ -4,6 +4,10 @@
|
|||||||
# Required for freqai
|
# Required for freqai
|
||||||
scikit-learn==1.1.2
|
scikit-learn==1.1.2
|
||||||
joblib==1.2.0
|
joblib==1.2.0
|
||||||
catboost==1.0.6; platform_machine != 'aarch64'
|
catboost==1.1; platform_machine != 'aarch64'
|
||||||
lightgbm==3.3.2
|
lightgbm==3.3.2
|
||||||
xgboost==1.6.2
|
xgboost==1.6.2
|
||||||
|
torch==1.12.1
|
||||||
|
stable-baselines3==1.6.1
|
||||||
|
gym==0.26.2
|
||||||
|
sb3-contrib==1.6.1
|
||||||
|
@@ -4,7 +4,7 @@ pandas==1.5.0; platform_machine != 'armv7l'
|
|||||||
pandas==1.4.3; platform_machine == 'armv7l'
|
pandas==1.4.3; platform_machine == 'armv7l'
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==1.93.98
|
ccxt==1.95.2
|
||||||
# Pin cryptography for now due to rust build errors with piwheels
|
# Pin cryptography for now due to rust build errors with piwheels
|
||||||
cryptography==38.0.1
|
cryptography==38.0.1
|
||||||
aiohttp==3.8.3
|
aiohttp==3.8.3
|
||||||
@@ -38,6 +38,7 @@ sdnotify==0.3.2
|
|||||||
|
|
||||||
# API Server
|
# API Server
|
||||||
fastapi==0.85.0
|
fastapi==0.85.0
|
||||||
|
pydantic>=1.8.0
|
||||||
uvicorn==0.18.3
|
uvicorn==0.18.3
|
||||||
pyjwt==2.5.0
|
pyjwt==2.5.0
|
||||||
aiofiles==22.1.0
|
aiofiles==22.1.0
|
||||||
|
1
setup.py
1
setup.py
@@ -75,6 +75,7 @@ setup(
|
|||||||
'joblib>=1.2.0',
|
'joblib>=1.2.0',
|
||||||
'pyarrow; platform_machine != "armv7l"',
|
'pyarrow; platform_machine != "armv7l"',
|
||||||
'fastapi',
|
'fastapi',
|
||||||
|
'pydantic>=1.8.0',
|
||||||
'uvicorn',
|
'uvicorn',
|
||||||
'psutil',
|
'psutil',
|
||||||
'pyjwt',
|
'pyjwt',
|
||||||
|
9
setup.sh
9
setup.sh
@@ -78,14 +78,21 @@ function updateenv() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
REQUIREMENTS_FREQAI=""
|
REQUIREMENTS_FREQAI=""
|
||||||
|
REQUIREMENTS_FREQAI_RL=""
|
||||||
read -p "Do you want to install dependencies for freqai [y/N]? "
|
read -p "Do you want to install dependencies for freqai [y/N]? "
|
||||||
dev=$REPLY
|
dev=$REPLY
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||||
then
|
then
|
||||||
REQUIREMENTS_FREQAI="-r requirements-freqai.txt"
|
REQUIREMENTS_FREQAI="-r requirements-freqai.txt"
|
||||||
|
read -p "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]? "
|
||||||
|
dev=$REPLY
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||||
|
then
|
||||||
|
REQUIREMENTS_FREQAI="-r requirements-freqai-rl.txt"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI}
|
${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} ${REQUIREMENTS_FREQAI_RL}
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Failed installing dependencies"
|
echo "Failed installing dependencies"
|
||||||
exit 1
|
exit 1
|
||||||
|
@@ -200,6 +200,8 @@ def patch_freqtradebot(mocker, config) -> None:
|
|||||||
mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock())
|
mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock())
|
||||||
mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock())
|
mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock())
|
||||||
patch_whitelist(mocker, config)
|
patch_whitelist(mocker, config)
|
||||||
|
mocker.patch('freqtrade.freqtradebot.ExternalMessageConsumer')
|
||||||
|
mocker.patch('freqtrade.configuration.config_validation._validate_consumers')
|
||||||
|
|
||||||
|
|
||||||
def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:
|
def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:
|
||||||
|
@@ -235,7 +235,7 @@ def test_calculate_market_change(testdatadir):
|
|||||||
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m')
|
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m')
|
||||||
result = calculate_market_change(data)
|
result = calculate_market_change(data)
|
||||||
assert isinstance(result, float)
|
assert isinstance(result, float)
|
||||||
assert pytest.approx(result) == 0.00955514
|
assert pytest.approx(result) == 0.01100002
|
||||||
|
|
||||||
|
|
||||||
def test_combine_dataframes_with_mean(testdatadir):
|
def test_combine_dataframes_with_mean(testdatadir):
|
||||||
|
@@ -139,10 +139,10 @@ def test_jsondatahandler_ohlcv_purge(mocker, testdatadir):
|
|||||||
def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
|
def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
|
||||||
dh = JsonDataHandler(testdatadir)
|
dh = JsonDataHandler(testdatadir)
|
||||||
df = dh.ohlcv_load('XRP/ETH', '5m', 'spot')
|
df = dh.ohlcv_load('XRP/ETH', '5m', 'spot')
|
||||||
assert len(df) == 711
|
assert len(df) == 712
|
||||||
|
|
||||||
df_mark = dh.ohlcv_load('UNITTEST/USDT', '1h', candle_type="mark")
|
df_mark = dh.ohlcv_load('UNITTEST/USDT', '1h', candle_type="mark")
|
||||||
assert len(df_mark) == 99
|
assert len(df_mark) == 100
|
||||||
|
|
||||||
df_no_mark = dh.ohlcv_load('UNITTEST/USDT', '1h', 'spot')
|
df_no_mark = dh.ohlcv_load('UNITTEST/USDT', '1h', 'spot')
|
||||||
assert len(df_no_mark) == 0
|
assert len(df_no_mark) == 0
|
||||||
|
@@ -124,8 +124,8 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
|
|||||||
assert '0' in captured.out
|
assert '0' in captured.out
|
||||||
assert '0.01616' in captured.out
|
assert '0.01616' in captured.out
|
||||||
assert '34.049' in captured.out
|
assert '34.049' in captured.out
|
||||||
assert '0.104104' in captured.out
|
assert '0.104411' in captured.out
|
||||||
assert '47.0996' in captured.out
|
assert '52.8292' in captured.out
|
||||||
|
|
||||||
# test group 1
|
# test group 1
|
||||||
args = get_args(base_args + ['--analysis-groups', "1"])
|
args = get_args(base_args + ['--analysis-groups', "1"])
|
||||||
|
@@ -377,8 +377,8 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
|||||||
td = ((end - start).total_seconds() // 60 // 5) + 1
|
td = ((end - start).total_seconds() // 60 // 5) + 1
|
||||||
assert td != len(data['UNITTEST/BTC'])
|
assert td != len(data['UNITTEST/BTC'])
|
||||||
|
|
||||||
# Shift endtime with +5 - as last candle is dropped (partial candle)
|
# Shift endtime with +5
|
||||||
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5)
|
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0])
|
||||||
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
||||||
f'data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}',
|
f'data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}',
|
||||||
caplog)
|
caplog)
|
||||||
@@ -447,7 +447,7 @@ def test_get_timerange(default_conf, mocker, testdatadir) -> None:
|
|||||||
)
|
)
|
||||||
min_date, max_date = get_timerange(data)
|
min_date, max_date = get_timerange(data)
|
||||||
assert min_date.isoformat() == '2017-11-04T23:02:00+00:00'
|
assert min_date.isoformat() == '2017-11-04T23:02:00+00:00'
|
||||||
assert max_date.isoformat() == '2017-11-14T22:58:00+00:00'
|
assert max_date.isoformat() == '2017-11-14T22:59:00+00:00'
|
||||||
|
|
||||||
|
|
||||||
def test_validate_backtest_data_warn(default_conf, mocker, caplog, testdatadir) -> None:
|
def test_validate_backtest_data_warn(default_conf, mocker, caplog, testdatadir) -> None:
|
||||||
@@ -470,7 +470,7 @@ def test_validate_backtest_data_warn(default_conf, mocker, caplog, testdatadir)
|
|||||||
min_date, max_date, timeframe_to_minutes('1m'))
|
min_date, max_date, timeframe_to_minutes('1m'))
|
||||||
assert len(caplog.record_tuples) == 1
|
assert len(caplog.record_tuples) == 1
|
||||||
assert log_has(
|
assert log_has(
|
||||||
"UNITTEST/BTC has missing frames: expected 14396, got 13680, that's 716 missing values",
|
"UNITTEST/BTC has missing frames: expected 14397, got 13681, that's 716 missing values",
|
||||||
caplog)
|
caplog)
|
||||||
|
|
||||||
|
|
||||||
@@ -480,7 +480,7 @@ def test_validate_backtest_data(default_conf, mocker, caplog, testdatadir) -> No
|
|||||||
default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
|
default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
|
||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
strategy = StrategyResolver.load_strategy(default_conf)
|
||||||
|
|
||||||
timerange = TimeRange('index', 'index', 200, 250)
|
timerange = TimeRange()
|
||||||
data = strategy.advise_all_indicators(
|
data = strategy.advise_all_indicators(
|
||||||
load_data(
|
load_data(
|
||||||
datadir=testdatadir,
|
datadir=testdatadir,
|
||||||
|
@@ -501,6 +501,24 @@ def test_fill_leverage_tiers_binance_dryrun(default_conf, mocker, leverage_tiers
|
|||||||
assert len(v) == len(value)
|
assert len(v) == len(value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_additional_exchange_init_binance(default_conf, mocker):
|
||||||
|
api_mock = MagicMock()
|
||||||
|
api_mock.fapiPrivateGetPositionsideDual = MagicMock(return_value={"dualSidePosition": True})
|
||||||
|
api_mock.fapiPrivateGetMultiAssetsMargin = MagicMock(return_value={"multiAssetsMargin": True})
|
||||||
|
default_conf['dry_run'] = False
|
||||||
|
default_conf['trading_mode'] = TradingMode.FUTURES
|
||||||
|
default_conf['margin_mode'] = MarginMode.ISOLATED
|
||||||
|
with pytest.raises(OperationalException,
|
||||||
|
match=r"Hedge Mode is not supported.*\nMulti-Asset Mode is not supported.*"):
|
||||||
|
get_patched_exchange(mocker, default_conf, id="binance", api_mock=api_mock)
|
||||||
|
api_mock.fapiPrivateGetPositionsideDual = MagicMock(return_value={"dualSidePosition": False})
|
||||||
|
api_mock.fapiPrivateGetMultiAssetsMargin = MagicMock(return_value={"multiAssetsMargin": False})
|
||||||
|
exchange = get_patched_exchange(mocker, default_conf, id="binance", api_mock=api_mock)
|
||||||
|
assert exchange
|
||||||
|
ccxt_exceptionhandlers(mocker, default_conf, api_mock, 'binance',
|
||||||
|
"additional_exchange_init", "fapiPrivateGetPositionsideDual")
|
||||||
|
|
||||||
|
|
||||||
def test__set_leverage_binance(mocker, default_conf):
|
def test__set_leverage_binance(mocker, default_conf):
|
||||||
|
|
||||||
api_mock = MagicMock()
|
api_mock = MagicMock()
|
||||||
|
@@ -137,6 +137,7 @@ def exchange_futures(request, exchange_conf, class_mocker):
|
|||||||
'freqtrade.exchange.binance.Binance.fill_leverage_tiers')
|
'freqtrade.exchange.binance.Binance.fill_leverage_tiers')
|
||||||
class_mocker.patch('freqtrade.exchange.exchange.Exchange.fetch_trading_fees')
|
class_mocker.patch('freqtrade.exchange.exchange.Exchange.fetch_trading_fees')
|
||||||
class_mocker.patch('freqtrade.exchange.okx.Okx.additional_exchange_init')
|
class_mocker.patch('freqtrade.exchange.okx.Okx.additional_exchange_init')
|
||||||
|
class_mocker.patch('freqtrade.exchange.binance.Binance.additional_exchange_init')
|
||||||
class_mocker.patch('freqtrade.exchange.exchange.Exchange.load_cached_leverage_tiers',
|
class_mocker.patch('freqtrade.exchange.exchange.Exchange.load_cached_leverage_tiers',
|
||||||
return_value=None)
|
return_value=None)
|
||||||
class_mocker.patch('freqtrade.exchange.exchange.Exchange.cache_leverage_tiers')
|
class_mocker.patch('freqtrade.exchange.exchange.Exchange.cache_leverage_tiers')
|
||||||
|
85
tests/exchange/test_exchange_utils.py
Normal file
85
tests/exchange/test_exchange_utils.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# pragma pylint: disable=missing-docstring, protected-access, invalid-name
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from freqtrade.enums import RunMode
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.exchange.check_exchange import check_exchange
|
||||||
|
from tests.conftest import log_has_re
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_exchange(default_conf, caplog) -> None:
|
||||||
|
# Test an officially supported by Freqtrade team exchange
|
||||||
|
default_conf['runmode'] = RunMode.DRY_RUN
|
||||||
|
default_conf.get('exchange').update({'name': 'BITTREX'})
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.",
|
||||||
|
caplog)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test an officially supported by Freqtrade team exchange
|
||||||
|
default_conf.get('exchange').update({'name': 'binance'})
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
assert log_has_re(
|
||||||
|
r"Exchange \"binance\" is officially supported by the Freqtrade development team\.",
|
||||||
|
caplog)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test an officially supported by Freqtrade team exchange
|
||||||
|
default_conf.get('exchange').update({'name': 'binanceus'})
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
assert log_has_re(
|
||||||
|
r"Exchange \"binanceus\" is officially supported by the Freqtrade development team\.",
|
||||||
|
caplog)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test an officially supported by Freqtrade team exchange - with remapping
|
||||||
|
default_conf.get('exchange').update({'name': 'okex'})
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
assert log_has_re(
|
||||||
|
r"Exchange \"okex\" is officially supported by the Freqtrade development team\.",
|
||||||
|
caplog)
|
||||||
|
caplog.clear()
|
||||||
|
# Test an available exchange, supported by ccxt
|
||||||
|
default_conf.get('exchange').update({'name': 'huobipro'})
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, "
|
||||||
|
r"but not officially supported "
|
||||||
|
r"by the Freqtrade development team\. .*", caplog)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test a 'bad' exchange, which known to have serious problems
|
||||||
|
default_conf.get('exchange').update({'name': 'bitmex'})
|
||||||
|
with pytest.raises(OperationalException,
|
||||||
|
match=r"Exchange .* will not work with Freqtrade\..*"):
|
||||||
|
check_exchange(default_conf)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test a 'bad' exchange with check_for_bad=False
|
||||||
|
default_conf.get('exchange').update({'name': 'bitmex'})
|
||||||
|
assert check_exchange(default_conf, False)
|
||||||
|
assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, "
|
||||||
|
r"but not officially supported "
|
||||||
|
r"by the Freqtrade development team\. .*", caplog)
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
# Test an invalid exchange
|
||||||
|
default_conf.get('exchange').update({'name': 'unknown_exchange'})
|
||||||
|
with pytest.raises(
|
||||||
|
OperationalException,
|
||||||
|
match=r'Exchange "unknown_exchange" is not known to the ccxt library '
|
||||||
|
r'and therefore not available for the bot.*'
|
||||||
|
):
|
||||||
|
check_exchange(default_conf)
|
||||||
|
|
||||||
|
# Test no exchange...
|
||||||
|
default_conf.get('exchange').update({'name': ''})
|
||||||
|
default_conf['runmode'] = RunMode.PLOT
|
||||||
|
assert check_exchange(default_conf)
|
||||||
|
|
||||||
|
# Test no exchange...
|
||||||
|
default_conf.get('exchange').update({'name': ''})
|
||||||
|
default_conf['runmode'] = RunMode.UTIL_EXCHANGE
|
||||||
|
with pytest.raises(OperationalException,
|
||||||
|
match=r'This command requires a configured exchange.*'):
|
||||||
|
check_exchange(default_conf)
|
@@ -29,15 +29,16 @@ def freqai_conf(default_conf, tmpdir):
|
|||||||
"enabled": True,
|
"enabled": True,
|
||||||
"startup_candles": 10000,
|
"startup_candles": 10000,
|
||||||
"purge_old_models": True,
|
"purge_old_models": True,
|
||||||
"train_period_days": 5,
|
"train_period_days": 2,
|
||||||
"backtest_period_days": 2,
|
"backtest_period_days": 2,
|
||||||
"live_retrain_hours": 0,
|
"live_retrain_hours": 0,
|
||||||
"expiration_hours": 1,
|
"expiration_hours": 1,
|
||||||
"identifier": "uniqe-id100",
|
"identifier": "uniqe-id100",
|
||||||
"live_trained_timestamp": 0,
|
"live_trained_timestamp": 0,
|
||||||
|
"data_kitchen_thread_count": 2,
|
||||||
"feature_parameters": {
|
"feature_parameters": {
|
||||||
"include_timeframes": ["5m"],
|
"include_timeframes": ["5m"],
|
||||||
"include_corr_pairlist": ["ADA/BTC", "DASH/BTC"],
|
"include_corr_pairlist": ["ADA/BTC"],
|
||||||
"label_period_candles": 20,
|
"label_period_candles": 20,
|
||||||
"include_shifted_candles": 1,
|
"include_shifted_candles": 1,
|
||||||
"DI_threshold": 0.9,
|
"DI_threshold": 0.9,
|
||||||
@@ -47,7 +48,7 @@ def freqai_conf(default_conf, tmpdir):
|
|||||||
"stratify_training_data": 0,
|
"stratify_training_data": 0,
|
||||||
"indicator_periods_candles": [10],
|
"indicator_periods_candles": [10],
|
||||||
},
|
},
|
||||||
"data_split_parameters": {"test_size": 0.33, "random_state": 1},
|
"data_split_parameters": {"test_size": 0.33, "shuffle": False},
|
||||||
"model_training_parameters": {"n_estimators": 100},
|
"model_training_parameters": {"n_estimators": 100},
|
||||||
},
|
},
|
||||||
"config_files": [Path('config_examples', 'config_freqai.example.json')]
|
"config_files": [Path('config_examples', 'config_freqai.example.json')]
|
||||||
|
@@ -90,5 +90,5 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf):
|
|||||||
|
|
||||||
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC')
|
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC')
|
||||||
|
|
||||||
assert len(df.columns) == 45
|
assert len(df.columns) == 33
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
@@ -71,14 +71,14 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog):
|
|||||||
freqai = make_data_dictionary(mocker, freqai_conf)
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
||||||
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(r"DBSCAN found eps of 2\.3\d\.", caplog)
|
assert log_has_re(r"DBSCAN found eps of 1\.7\d\.", caplog)
|
||||||
|
|
||||||
|
|
||||||
def test_compute_distances(mocker, freqai_conf):
|
def test_compute_distances(mocker, freqai_conf):
|
||||||
freqai = make_data_dictionary(mocker, freqai_conf)
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
||||||
avg_mean_dist = freqai.dk.compute_distances()
|
avg_mean_dist = freqai.dk.compute_distances()
|
||||||
assert round(avg_mean_dist, 2) == 2.54
|
assert round(avg_mean_dist, 2) == 1.99
|
||||||
|
|
||||||
|
|
||||||
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
||||||
@@ -86,7 +86,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf,
|
|||||||
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
||||||
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"SVM detected 8.09%",
|
"SVM detected 7.36%",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -125,7 +125,7 @@ def test_normalize_data(mocker, freqai_conf):
|
|||||||
freqai = make_data_dictionary(mocker, freqai_conf)
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
data_dict = freqai.dk.data_dictionary
|
data_dict = freqai.dk.data_dictionary
|
||||||
freqai.dk.normalize_data(data_dict)
|
freqai.dk.normalize_data(data_dict)
|
||||||
assert len(freqai.dk.data) == 56
|
assert len(freqai.dk.data) == 32
|
||||||
|
|
||||||
|
|
||||||
def test_filter_features(mocker, freqai_conf):
|
def test_filter_features(mocker, freqai_conf):
|
||||||
@@ -139,7 +139,7 @@ def test_filter_features(mocker, freqai_conf):
|
|||||||
training_filter=True,
|
training_filter=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert len(filtered_df.columns) == 26
|
assert len(filtered_df.columns) == 14
|
||||||
|
|
||||||
|
|
||||||
def test_make_train_test_datasets(mocker, freqai_conf):
|
def test_make_train_test_datasets(mocker, freqai_conf):
|
||||||
|
@@ -7,7 +7,11 @@ import pytest
|
|||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
|
from freqtrade.enums import RunMode
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.utils import download_all_data_for_training, get_required_data_timerange
|
||||||
|
from freqtrade.optimize.backtesting import Backtesting
|
||||||
|
from freqtrade.persistence import Trade
|
||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
from tests.conftest import get_patched_exchange, log_has_re
|
from tests.conftest import get_patched_exchange, log_has_re
|
||||||
from tests.freqai.conftest import get_patched_freqai_strategy
|
from tests.freqai.conftest import get_patched_freqai_strategy
|
||||||
@@ -18,19 +22,56 @@ def is_arm() -> bool:
|
|||||||
return "arm" in machine or "aarch64" in machine
|
return "arm" in machine or "aarch64" in machine
|
||||||
|
|
||||||
|
|
||||||
|
def is_mac() -> bool:
|
||||||
|
machine = platform.system()
|
||||||
|
return "Darwin" in machine
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('model', [
|
@pytest.mark.parametrize('model', [
|
||||||
'LightGBMRegressor',
|
'LightGBMRegressor',
|
||||||
'XGBoostRegressor',
|
'XGBoostRegressor',
|
||||||
'CatboostRegressor',
|
'CatboostRegressor',
|
||||||
|
'ReinforcementLearner',
|
||||||
|
'ReinforcementLearner_multiproc',
|
||||||
|
'ReinforcementLearner_test_4ac'
|
||||||
])
|
])
|
||||||
def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model):
|
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model):
|
||||||
if is_arm() and model == 'CatboostRegressor':
|
if is_arm() and model == 'CatboostRegressor':
|
||||||
pytest.skip("CatBoost is not supported on ARM")
|
pytest.skip("CatBoost is not supported on ARM")
|
||||||
|
|
||||||
|
if is_mac():
|
||||||
|
pytest.skip("Reinforcement learning module not available on intel based Mac OS")
|
||||||
|
|
||||||
|
model_save_ext = 'joblib'
|
||||||
freqai_conf.update({"freqaimodel": model})
|
freqai_conf.update({"freqaimodel": model})
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
freqai_conf.update({"strategy": "freqai_test_strat"})
|
freqai_conf.update({"strategy": "freqai_test_strat"})
|
||||||
|
|
||||||
|
if 'ReinforcementLearner' in model:
|
||||||
|
model_save_ext = 'zip'
|
||||||
|
freqai_conf.update({"strategy": "freqai_rl_test_strat"})
|
||||||
|
freqai_conf["freqai"].update({"model_training_parameters": {
|
||||||
|
"learning_rate": 0.00025,
|
||||||
|
"gamma": 0.9,
|
||||||
|
"verbose": 1
|
||||||
|
}})
|
||||||
|
freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'})
|
||||||
|
freqai_conf["freqai"]["rl_config"] = {
|
||||||
|
"train_cycles": 1,
|
||||||
|
"thread_count": 2,
|
||||||
|
"max_trade_duration_candles": 300,
|
||||||
|
"model_type": "PPO",
|
||||||
|
"policy_type": "MlpPolicy",
|
||||||
|
"max_training_drawdown_pct": 0.5,
|
||||||
|
"model_reward_parameters": {
|
||||||
|
"rr": 1,
|
||||||
|
"profit_aim": 0.02,
|
||||||
|
"win_reward_factor": 2
|
||||||
|
}}
|
||||||
|
|
||||||
|
if 'test_4ac' in model:
|
||||||
|
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
||||||
|
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@@ -43,16 +84,16 @@ def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model):
|
|||||||
|
|
||||||
freqai.dd.pair_dict = MagicMock()
|
freqai.dd.pair_dict = MagicMock()
|
||||||
|
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180125-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180127-20180130")
|
||||||
|
|
||||||
freqai.extract_data_and_train_model(
|
freqai.extract_data_and_train_model(
|
||||||
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
assert Path(freqai.dk.data_path /
|
||||||
|
f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file()
|
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
@@ -92,7 +133,7 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model):
|
|||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file()
|
||||||
assert len(freqai.dk.data['training_features_list']) == 26
|
assert len(freqai.dk.data['training_features_list']) == 14
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
@@ -136,9 +177,56 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
|||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
def test_start_backtesting(mocker, freqai_conf):
|
@pytest.mark.parametrize(
|
||||||
freqai_conf.update({"timerange": "20180120-20180130"})
|
"model, num_files, strat",
|
||||||
|
[
|
||||||
|
("LightGBMRegressor", 6, "freqai_test_strat"),
|
||||||
|
("XGBoostRegressor", 6, "freqai_test_strat"),
|
||||||
|
("CatboostRegressor", 6, "freqai_test_strat"),
|
||||||
|
("ReinforcementLearner", 7, "freqai_rl_test_strat"),
|
||||||
|
("XGBoostClassifier", 6, "freqai_test_classifier"),
|
||||||
|
("LightGBMClassifier", 6, "freqai_test_classifier"),
|
||||||
|
("CatboostClassifier", 6, "freqai_test_classifier")
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat):
|
||||||
freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
|
freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
|
||||||
|
freqai_conf['runmode'] = RunMode.BACKTEST
|
||||||
|
Trade.use_db = False
|
||||||
|
if is_arm() and "Catboost" in model:
|
||||||
|
pytest.skip("CatBoost is not supported on ARM")
|
||||||
|
|
||||||
|
if is_mac():
|
||||||
|
pytest.skip("Reinforcement learning module not available on intel based Mac OS")
|
||||||
|
|
||||||
|
freqai_conf.update({"freqaimodel": model})
|
||||||
|
freqai_conf.update({"timerange": "20180120-20180130"})
|
||||||
|
freqai_conf.update({"strategy": strat})
|
||||||
|
|
||||||
|
if 'ReinforcementLearner' in model:
|
||||||
|
|
||||||
|
freqai_conf["freqai"].update({"model_training_parameters": {
|
||||||
|
"learning_rate": 0.00025,
|
||||||
|
"gamma": 0.9,
|
||||||
|
"verbose": 1
|
||||||
|
}})
|
||||||
|
freqai_conf["freqai"].update({"model_save_type": 'stable_baselines'})
|
||||||
|
freqai_conf["freqai"]["rl_config"] = {
|
||||||
|
"train_cycles": 1,
|
||||||
|
"thread_count": 2,
|
||||||
|
"max_trade_duration_candles": 300,
|
||||||
|
"model_type": "PPO",
|
||||||
|
"policy_type": "MlpPolicy",
|
||||||
|
"max_training_drawdown_pct": 0.5,
|
||||||
|
"model_reward_parameters": {
|
||||||
|
"rr": 1,
|
||||||
|
"profit_aim": 0.02,
|
||||||
|
"win_reward_factor": 2
|
||||||
|
}}
|
||||||
|
|
||||||
|
if 'test_4ac' in model:
|
||||||
|
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
|
||||||
|
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@@ -157,8 +245,8 @@ def test_start_backtesting(mocker, freqai_conf):
|
|||||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||||
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
||||||
|
|
||||||
assert len(model_folders) == 6
|
assert len(model_folders) == num_files
|
||||||
|
Backtesting.cleanup()
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@@ -211,7 +299,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
|||||||
|
|
||||||
assert len(model_folders) == 6
|
assert len(model_folders) == 6
|
||||||
|
|
||||||
# without deleting the exiting folder structure, re-run
|
# without deleting the existing folder structure, re-run
|
||||||
|
|
||||||
freqai_conf.update({"timerange": "20180120-20180130"})
|
freqai_conf.update({"timerange": "20180120-20180130"})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
@@ -375,3 +463,40 @@ def test_freqai_informative_pairs(mocker, freqai_conf, timeframes, corr_pairs):
|
|||||||
pairs_b = strategy.gather_informative_pairs()
|
pairs_b = strategy.gather_informative_pairs()
|
||||||
# we expect unique pairs * timeframes
|
# we expect unique pairs * timeframes
|
||||||
assert len(pairs_b) == len(set(pairlist + corr_pairs)) * len(timeframes)
|
assert len(pairs_b) == len(set(pairlist + corr_pairs)) * len(timeframes)
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_set_train_queue(mocker, freqai_conf, caplog):
|
||||||
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
|
pairlist = PairListManager(exchange, freqai_conf)
|
||||||
|
strategy.dp = DataProvider(freqai_conf, exchange, pairlist)
|
||||||
|
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||||
|
freqai = strategy.freqai
|
||||||
|
freqai.live = False
|
||||||
|
|
||||||
|
freqai.train_queue = freqai._set_train_queue()
|
||||||
|
|
||||||
|
assert log_has_re(
|
||||||
|
"Set fresh train queue from whitelist.",
|
||||||
|
caplog,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_required_data_timerange(mocker, freqai_conf):
|
||||||
|
time_range = get_required_data_timerange(freqai_conf)
|
||||||
|
assert (time_range.stopts - time_range.startts) == 177300
|
||||||
|
|
||||||
|
|
||||||
|
def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir):
|
||||||
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
|
pairlist = PairListManager(exchange, freqai_conf)
|
||||||
|
strategy.dp = DataProvider(freqai_conf, exchange, pairlist)
|
||||||
|
freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist']
|
||||||
|
freqai_conf['datadir'] = Path(tmpdir)
|
||||||
|
download_all_data_for_training(strategy.dp, freqai_conf)
|
||||||
|
|
||||||
|
assert log_has_re(
|
||||||
|
"Downloading",
|
||||||
|
caplog,
|
||||||
|
)
|
||||||
|
104
tests/freqai/test_models/ReinforcementLearner_test_4ac.py
Normal file
104
tests/freqai/test_models/ReinforcementLearner_test_4ac.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch as th
|
||||||
|
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions
|
||||||
|
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel):
|
||||||
|
"""
|
||||||
|
User created Reinforcement Learning Model prediction model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||||
|
|
||||||
|
train_df = data_dictionary["train_features"]
|
||||||
|
total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df)
|
||||||
|
|
||||||
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
|
net_arch=[128, 128])
|
||||||
|
|
||||||
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
|
tensorboard_log=Path(
|
||||||
|
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
||||||
|
**self.freqai_info['model_training_parameters']
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info('Continual training activated - starting training from previously '
|
||||||
|
'trained agent.')
|
||||||
|
model = self.dd.model_dictionary[dk.pair]
|
||||||
|
model.set_env(self.train_env)
|
||||||
|
|
||||||
|
model.learn(
|
||||||
|
total_timesteps=int(total_timesteps),
|
||||||
|
callback=self.eval_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
if Path(dk.data_path / "best_model.zip").is_file():
|
||||||
|
logger.info('Callback found a best model.')
|
||||||
|
best_model = self.MODELCLASS.load(dk.data_path / "best_model")
|
||||||
|
return best_model
|
||||||
|
|
||||||
|
logger.info('Couldnt find best model, using final model instead.')
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
class MyRLEnv(Base4ActionRLEnv):
|
||||||
|
"""
|
||||||
|
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||||
|
sets a custom reward based on profit and trade duration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def calculate_reward(self, action):
|
||||||
|
|
||||||
|
# first, penalize if the action is not valid
|
||||||
|
if not self._is_valid(action):
|
||||||
|
return -2
|
||||||
|
|
||||||
|
pnl = self.get_unrealized_profit()
|
||||||
|
rew = np.sign(pnl) * (pnl + 1)
|
||||||
|
factor = 100
|
||||||
|
|
||||||
|
# reward agent for entering trades
|
||||||
|
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
||||||
|
and self._position == Positions.Neutral):
|
||||||
|
return 25
|
||||||
|
# discourage agent from not entering trades
|
||||||
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
|
|
||||||
|
if trade_duration <= max_trade_duration:
|
||||||
|
factor *= 1.5
|
||||||
|
elif trade_duration > max_trade_duration:
|
||||||
|
factor *= 0.5
|
||||||
|
|
||||||
|
# discourage sitting in position
|
||||||
|
if (self._position in (Positions.Short, Positions.Long) and
|
||||||
|
action == Actions.Neutral.value):
|
||||||
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
|
||||||
|
# close long
|
||||||
|
if action == Actions.Exit.value and self._position == Positions.Long:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(rew * factor)
|
||||||
|
|
||||||
|
# close short
|
||||||
|
if action == Actions.Exit.value and self._position == Positions.Short:
|
||||||
|
if pnl > self.profit_aim * self.rr:
|
||||||
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
|
return float(rew * factor)
|
||||||
|
|
||||||
|
return 0.
|
@@ -80,7 +80,7 @@ def load_data_test(what, testdatadir):
|
|||||||
data.loc[:, 'close'] = np.sin(data.index * hz) / 1000 + base
|
data.loc[:, 'close'] = np.sin(data.index * hz) / 1000 + base
|
||||||
|
|
||||||
return {'UNITTEST/BTC': clean_ohlcv_dataframe(data, timeframe='1m', pair='UNITTEST/BTC',
|
return {'UNITTEST/BTC': clean_ohlcv_dataframe(data, timeframe='1m', pair='UNITTEST/BTC',
|
||||||
fill_missing=True)}
|
fill_missing=True, drop_incomplete=True)}
|
||||||
|
|
||||||
|
|
||||||
# FIX: fixturize this?
|
# FIX: fixturize this?
|
||||||
@@ -323,7 +323,7 @@ def test_data_to_dataframe_bt(default_conf, mocker, testdatadir) -> None:
|
|||||||
backtesting = Backtesting(default_conf)
|
backtesting = Backtesting(default_conf)
|
||||||
backtesting._set_strategy(backtesting.strategylist[0])
|
backtesting._set_strategy(backtesting.strategylist[0])
|
||||||
processed = backtesting.strategy.advise_all_indicators(data)
|
processed = backtesting.strategy.advise_all_indicators(data)
|
||||||
assert len(processed['UNITTEST/BTC']) == 102
|
assert len(processed['UNITTEST/BTC']) == 103
|
||||||
|
|
||||||
# Load strategy to compare the result between Backtesting function and strategy are the same
|
# Load strategy to compare the result between Backtesting function and strategy are the same
|
||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
strategy = StrategyResolver.load_strategy(default_conf)
|
||||||
@@ -1165,9 +1165,9 @@ def test_backtest_start_timerange(default_conf, mocker, caplog, testdatadir):
|
|||||||
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2017-11-14 20:57:00 '
|
'Loading data from 2017-11-14 20:57:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 '
|
'Backtesting with data from 2017-11-14 21:17:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...'
|
'Parameter --enable-position-stacking detected ...'
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1244,9 +1244,9 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
|
|||||||
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2017-11-14 20:57:00 '
|
'Loading data from 2017-11-14 20:57:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 '
|
'Backtesting with data from 2017-11-14 21:17:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...',
|
'Parameter --enable-position-stacking detected ...',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
'Running backtesting for Strategy StrategyTestV2',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
@@ -1355,9 +1355,9 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
|||||||
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2017-11-14 20:57:00 '
|
'Loading data from 2017-11-14 20:57:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 '
|
'Backtesting with data from 2017-11-14 21:17:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...',
|
'Parameter --enable-position-stacking detected ...',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
'Running backtesting for Strategy StrategyTestV2',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
@@ -1371,7 +1371,7 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
|||||||
assert 'EXIT REASON STATS' in captured.out
|
assert 'EXIT REASON STATS' in captured.out
|
||||||
assert 'DAY BREAKDOWN' in captured.out
|
assert 'DAY BREAKDOWN' in captured.out
|
||||||
assert 'LEFT OPEN TRADES REPORT' in captured.out
|
assert 'LEFT OPEN TRADES REPORT' in captured.out
|
||||||
assert '2017-11-14 21:17:00 -> 2017-11-14 22:58:00 | Max open trades : 1' in captured.out
|
assert '2017-11-14 21:17:00 -> 2017-11-14 22:59:00 | Max open trades : 1' in captured.out
|
||||||
assert 'STRATEGY SUMMARY' in captured.out
|
assert 'STRATEGY SUMMARY' in captured.out
|
||||||
|
|
||||||
|
|
||||||
@@ -1503,9 +1503,9 @@ def test_backtest_start_nomock_futures(default_conf_usdt, mocker,
|
|||||||
'Parameter -i/--timeframe detected ... Using timeframe: 1h ...',
|
'Parameter -i/--timeframe detected ... Using timeframe: 1h ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2021-11-17 01:00:00 '
|
'Loading data from 2021-11-17 01:00:00 '
|
||||||
'up to 2021-11-21 03:00:00 (4 days).',
|
'up to 2021-11-21 04:00:00 (4 days).',
|
||||||
'Backtesting with data from 2021-11-17 21:00:00 '
|
'Backtesting with data from 2021-11-17 21:00:00 '
|
||||||
'up to 2021-11-21 03:00:00 (3 days).',
|
'up to 2021-11-21 04:00:00 (3 days).',
|
||||||
'XRP/USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00',
|
'XRP/USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00',
|
||||||
'XRP/USDT, mark, 8h, data starts at 2021-11-18 00:00:00',
|
'XRP/USDT, mark, 8h, data starts at 2021-11-18 00:00:00',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
@@ -1616,9 +1616,9 @@ def test_backtest_start_multi_strat_nomock_detail(default_conf, mocker,
|
|||||||
'Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...',
|
'Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2019-10-11 00:00:00 '
|
'Loading data from 2019-10-11 00:00:00 '
|
||||||
'up to 2019-10-13 11:10:00 (2 days).',
|
'up to 2019-10-13 11:15:00 (2 days).',
|
||||||
'Backtesting with data from 2019-10-11 01:40:00 '
|
'Backtesting with data from 2019-10-11 01:40:00 '
|
||||||
'up to 2019-10-13 11:10:00 (2 days).',
|
'up to 2019-10-13 11:15:00 (2 days).',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1719,7 +1719,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
'Parameter --timerange detected: 1510694220-1510700340 ...',
|
||||||
f'Using data directory: {testdatadir} ...',
|
f'Using data directory: {testdatadir} ...',
|
||||||
'Loading data from 2017-11-14 20:57:00 '
|
'Loading data from 2017-11-14 20:57:00 '
|
||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:59:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...',
|
'Parameter --enable-position-stacking detected ...',
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1732,7 +1732,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
'Running backtesting for Strategy StrategyTestV2',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
'Running backtesting for Strategy StrategyTestV3',
|
'Running backtesting for Strategy StrategyTestV3',
|
||||||
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).',
|
||||||
]
|
]
|
||||||
elif run_id == '2' and min_backtest_date < start_time:
|
elif run_id == '2' and min_backtest_date < start_time:
|
||||||
assert backtestmock.call_count == 0
|
assert backtestmock.call_count == 0
|
||||||
@@ -1745,7 +1745,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
'Reusing result of previous backtest for StrategyTestV2',
|
'Reusing result of previous backtest for StrategyTestV2',
|
||||||
'Running backtesting for Strategy StrategyTestV3',
|
'Running backtesting for Strategy StrategyTestV3',
|
||||||
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).',
|
||||||
]
|
]
|
||||||
assert backtestmock.call_count == 1
|
assert backtestmock.call_count == 1
|
||||||
|
|
||||||
|
@@ -93,11 +93,16 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) ->
|
|||||||
t["close_rate"], 6) < round(ln.iloc[0]["high"], 6))
|
t["close_rate"], 6) < round(ln.iloc[0]["high"], 6))
|
||||||
|
|
||||||
|
|
||||||
def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> None:
|
@pytest.mark.parametrize('leverage', [
|
||||||
|
1, 2
|
||||||
|
])
|
||||||
|
def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, leverage) -> None:
|
||||||
default_conf['use_exit_signal'] = False
|
default_conf['use_exit_signal'] = False
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
|
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
|
||||||
mocker.patch("freqtrade.exchange.Exchange.get_min_pair_stake_amount", return_value=10)
|
mocker.patch("freqtrade.exchange.Exchange.get_min_pair_stake_amount", return_value=10)
|
||||||
mocker.patch("freqtrade.exchange.Exchange.get_max_pair_stake_amount", return_value=float('inf'))
|
mocker.patch("freqtrade.exchange.Exchange.get_max_pair_stake_amount", return_value=float('inf'))
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.get_max_leverage", return_value=10)
|
||||||
|
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
default_conf.update({
|
default_conf.update({
|
||||||
"stake_amount": 100.0,
|
"stake_amount": 100.0,
|
||||||
@@ -105,6 +110,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
"strategy": "StrategyTestV3"
|
"strategy": "StrategyTestV3"
|
||||||
})
|
})
|
||||||
backtesting = Backtesting(default_conf)
|
backtesting = Backtesting(default_conf)
|
||||||
|
backtesting._can_short = True
|
||||||
backtesting._set_strategy(backtesting.strategylist[0])
|
backtesting._set_strategy(backtesting.strategylist[0])
|
||||||
pair = 'XRP/USDT'
|
pair = 'XRP/USDT'
|
||||||
row = [
|
row = [
|
||||||
@@ -120,18 +126,19 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
'', # enter_tag
|
'', # enter_tag
|
||||||
'', # exit_tag
|
'', # exit_tag
|
||||||
]
|
]
|
||||||
|
backtesting.strategy.leverage = MagicMock(return_value=leverage)
|
||||||
trade = backtesting._enter_trade(pair, row=row, direction='long')
|
trade = backtesting._enter_trade(pair, row=row, direction='long')
|
||||||
trade.orders[0].close_bt_order(row[0], trade)
|
trade.orders[0].close_bt_order(row[0], trade)
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 100.0
|
assert pytest.approx(trade.stake_amount) == 100.0
|
||||||
assert pytest.approx(trade.amount) == 47.61904762
|
assert pytest.approx(trade.amount) == 47.61904762 * leverage
|
||||||
assert len(trade.orders) == 1
|
assert len(trade.orders) == 1
|
||||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=None)
|
backtesting.strategy.adjust_trade_position = MagicMock(return_value=None)
|
||||||
|
|
||||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 100.0
|
assert pytest.approx(trade.stake_amount) == 100.0
|
||||||
assert pytest.approx(trade.amount) == 47.61904762
|
assert pytest.approx(trade.amount) == 47.61904762 * leverage
|
||||||
assert len(trade.orders) == 1
|
assert len(trade.orders) == 1
|
||||||
# Increase position by 100
|
# Increase position by 100
|
||||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=100)
|
backtesting.strategy.adjust_trade_position = MagicMock(return_value=100)
|
||||||
@@ -140,7 +147,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
|
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 200.0
|
assert pytest.approx(trade.stake_amount) == 200.0
|
||||||
assert pytest.approx(trade.amount) == 95.23809524
|
assert pytest.approx(trade.amount) == 95.23809524 * leverage
|
||||||
assert len(trade.orders) == 2
|
assert len(trade.orders) == 2
|
||||||
|
|
||||||
# Reduce by more than amount - no change to trade.
|
# Reduce by more than amount - no change to trade.
|
||||||
@@ -150,7 +157,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
|
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 200.0
|
assert pytest.approx(trade.stake_amount) == 200.0
|
||||||
assert pytest.approx(trade.amount) == 95.23809524
|
assert pytest.approx(trade.amount) == 95.23809524 * leverage
|
||||||
assert len(trade.orders) == 2
|
assert len(trade.orders) == 2
|
||||||
assert trade.nr_of_successful_entries == 2
|
assert trade.nr_of_successful_entries == 2
|
||||||
|
|
||||||
@@ -160,7 +167,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
|
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 100.0
|
assert pytest.approx(trade.stake_amount) == 100.0
|
||||||
assert pytest.approx(trade.amount) == 47.61904762
|
assert pytest.approx(trade.amount) == 47.61904762 * leverage
|
||||||
assert len(trade.orders) == 3
|
assert len(trade.orders) == 3
|
||||||
assert trade.nr_of_successful_entries == 2
|
assert trade.nr_of_successful_entries == 2
|
||||||
assert trade.nr_of_successful_exits == 1
|
assert trade.nr_of_successful_exits == 1
|
||||||
@@ -171,7 +178,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> Non
|
|||||||
|
|
||||||
assert trade
|
assert trade
|
||||||
assert pytest.approx(trade.stake_amount) == 100.0
|
assert pytest.approx(trade.stake_amount) == 100.0
|
||||||
assert pytest.approx(trade.amount) == 47.61904762
|
assert pytest.approx(trade.amount) == 47.61904762 * leverage
|
||||||
assert len(trade.orders) == 3
|
assert len(trade.orders) == 3
|
||||||
assert trade.nr_of_successful_entries == 2
|
assert trade.nr_of_successful_entries == 2
|
||||||
assert trade.nr_of_successful_exits == 1
|
assert trade.nr_of_successful_exits == 1
|
||||||
|
@@ -297,6 +297,7 @@ def test_params_no_optimize_details(hyperopt) -> None:
|
|||||||
def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
|
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
@@ -530,6 +531,7 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
|
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
@@ -581,6 +583,7 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
mocker.patch(
|
mocker.patch(
|
||||||
@@ -622,6 +625,7 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
@@ -663,6 +667,7 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
@@ -736,6 +741,7 @@ def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None:
|
|||||||
def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
@@ -778,6 +784,7 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
||||||
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
dumper = mocker.patch('freqtrade.optimize.hyperopt.dump')
|
||||||
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result')
|
||||||
|
mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5)
|
||||||
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
mocker.patch('freqtrade.optimize.hyperopt.file_dump_json')
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data',
|
||||||
MagicMock(return_value=(MagicMock(), None)))
|
MagicMock(return_value=(MagicMock(), None)))
|
||||||
|
@@ -9,6 +9,7 @@ import pytest
|
|||||||
import time_machine
|
import time_machine
|
||||||
|
|
||||||
from freqtrade.constants import AVAILABLE_PAIRLISTS
|
from freqtrade.constants import AVAILABLE_PAIRLISTS
|
||||||
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.enums import CandleType, RunMode
|
from freqtrade.enums import CandleType, RunMode
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
@@ -40,6 +41,12 @@ def whitelist_conf(default_conf):
|
|||||||
"sort_key": "quoteVolume",
|
"sort_key": "quoteVolume",
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
default_conf.update({
|
||||||
|
"external_message_consumer": {
|
||||||
|
"enabled": True,
|
||||||
|
"producers": [],
|
||||||
|
}
|
||||||
|
})
|
||||||
return default_conf
|
return default_conf
|
||||||
|
|
||||||
|
|
||||||
@@ -126,7 +133,7 @@ def test_log_cached(mocker, static_pl_conf, markets, tickers):
|
|||||||
def test_load_pairlist_noexist(mocker, markets, default_conf):
|
def test_load_pairlist_noexist(mocker, markets, default_conf):
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.markets', PropertyMock(return_value=markets))
|
mocker.patch('freqtrade.exchange.Exchange.markets', PropertyMock(return_value=markets))
|
||||||
plm = PairListManager(freqtrade.exchange, default_conf)
|
plm = PairListManager(freqtrade.exchange, default_conf, MagicMock())
|
||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match=r"Impossible to load Pairlist 'NonexistingPairList'. "
|
match=r"Impossible to load Pairlist 'NonexistingPairList'. "
|
||||||
r"This class does not exist or contains Python code errors."):
|
r"This class does not exist or contains Python code errors."):
|
||||||
@@ -137,7 +144,7 @@ def test_load_pairlist_noexist(mocker, markets, default_conf):
|
|||||||
def test_load_pairlist_verify_multi(mocker, markets_static, default_conf):
|
def test_load_pairlist_verify_multi(mocker, markets_static, default_conf):
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.markets', PropertyMock(return_value=markets_static))
|
mocker.patch('freqtrade.exchange.Exchange.markets', PropertyMock(return_value=markets_static))
|
||||||
plm = PairListManager(freqtrade.exchange, default_conf)
|
plm = PairListManager(freqtrade.exchange, default_conf, MagicMock())
|
||||||
# Call different versions one after the other, should always consider what was passed in
|
# Call different versions one after the other, should always consider what was passed in
|
||||||
# and have no side-effects (therefore the same check multiple times)
|
# and have no side-effects (therefore the same check multiple times)
|
||||||
assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', ], print) == ['ETH/BTC', 'XRP/BTC']
|
assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', ], print) == ['ETH/BTC', 'XRP/BTC']
|
||||||
@@ -269,7 +276,7 @@ def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_co
|
|||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match=r'`number_assets` not specified. Please check your configuration '
|
match=r'`number_assets` not specified. Please check your configuration '
|
||||||
r'for "pairlist.config.number_assets"'):
|
r'for "pairlist.config.number_assets"'):
|
||||||
PairListManager(freqtrade.exchange, whitelist_conf)
|
PairListManager(freqtrade.exchange, whitelist_conf, MagicMock())
|
||||||
|
|
||||||
|
|
||||||
def test_refresh_pairlist_dynamic_2(mocker, shitcoinmarkets, tickers, whitelist_conf_2):
|
def test_refresh_pairlist_dynamic_2(mocker, shitcoinmarkets, tickers, whitelist_conf_2):
|
||||||
@@ -694,7 +701,7 @@ def test_PrecisionFilter_error(mocker, whitelist_conf) -> None:
|
|||||||
|
|
||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match=r"PrecisionFilter can only work with stoploss defined\..*"):
|
match=r"PrecisionFilter can only work with stoploss defined\..*"):
|
||||||
PairListManager(MagicMock, whitelist_conf)
|
PairListManager(MagicMock, whitelist_conf, MagicMock())
|
||||||
|
|
||||||
|
|
||||||
def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None:
|
def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None:
|
||||||
@@ -703,7 +710,7 @@ def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None:
|
|||||||
del Trade.query
|
del Trade.query
|
||||||
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
|
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
|
||||||
exchange = get_patched_exchange(mocker, whitelist_conf)
|
exchange = get_patched_exchange(mocker, whitelist_conf)
|
||||||
pm = PairListManager(exchange, whitelist_conf)
|
pm = PairListManager(exchange, whitelist_conf, MagicMock())
|
||||||
pm.refresh_pairlist()
|
pm.refresh_pairlist()
|
||||||
|
|
||||||
assert log_has("PerformanceFilter is not available in this mode.", caplog)
|
assert log_has("PerformanceFilter is not available in this mode.", caplog)
|
||||||
@@ -1167,6 +1174,10 @@ def test_spreadfilter_invalid_data(mocker, default_conf, markets, tickers, caplo
|
|||||||
"[{'OffsetFilter': 'OffsetFilter - Taking 10 Pairs, starting from 5.'}]",
|
"[{'OffsetFilter': 'OffsetFilter - Taking 10 Pairs, starting from 5.'}]",
|
||||||
None
|
None
|
||||||
),
|
),
|
||||||
|
({"method": "ProducerPairList"},
|
||||||
|
"[{'ProducerPairList': 'ProducerPairList - default'}]",
|
||||||
|
None
|
||||||
|
),
|
||||||
])
|
])
|
||||||
def test_pricefilter_desc(mocker, whitelist_conf, markets, pairlistconfig,
|
def test_pricefilter_desc(mocker, whitelist_conf, markets, pairlistconfig,
|
||||||
desc_expected, exception_expected):
|
desc_expected, exception_expected):
|
||||||
@@ -1341,3 +1352,77 @@ def test_expand_pairlist_keep_invalid(wildcardlist, pairs, expected):
|
|||||||
expand_pairlist(wildcardlist, pairs, keep_invalid=True)
|
expand_pairlist(wildcardlist, pairs, keep_invalid=True)
|
||||||
else:
|
else:
|
||||||
assert sorted(expand_pairlist(wildcardlist, pairs, keep_invalid=True)) == sorted(expected)
|
assert sorted(expand_pairlist(wildcardlist, pairs, keep_invalid=True)) == sorted(expected)
|
||||||
|
|
||||||
|
|
||||||
|
def test_ProducerPairlist_no_emc(mocker, whitelist_conf):
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
|
||||||
|
|
||||||
|
whitelist_conf['pairlists'] = [
|
||||||
|
{
|
||||||
|
"method": "ProducerPairList",
|
||||||
|
"number_assets": 10,
|
||||||
|
"producer_name": "hello_world",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
del whitelist_conf['external_message_consumer']
|
||||||
|
|
||||||
|
with pytest.raises(OperationalException,
|
||||||
|
match=r"ProducerPairList requires external_message_consumer to be enabled."):
|
||||||
|
get_patched_freqtradebot(mocker, whitelist_conf)
|
||||||
|
|
||||||
|
|
||||||
|
def test_ProducerPairlist(mocker, whitelist_conf, markets):
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True))
|
||||||
|
mocker.patch.multiple('freqtrade.exchange.Exchange',
|
||||||
|
markets=PropertyMock(return_value=markets),
|
||||||
|
exchange_has=MagicMock(return_value=True),
|
||||||
|
)
|
||||||
|
whitelist_conf['pairlists'] = [
|
||||||
|
{
|
||||||
|
"method": "ProducerPairList",
|
||||||
|
"number_assets": 2,
|
||||||
|
"producer_name": "hello_world",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
whitelist_conf.update({
|
||||||
|
"external_message_consumer": {
|
||||||
|
"enabled": True,
|
||||||
|
"producers": [
|
||||||
|
{
|
||||||
|
"name": "hello_world",
|
||||||
|
"host": "null",
|
||||||
|
"port": 9891,
|
||||||
|
"ws_token": "dummy",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
exchange = get_patched_exchange(mocker, whitelist_conf)
|
||||||
|
dp = DataProvider(whitelist_conf, exchange, None)
|
||||||
|
pairs = ['ETH/BTC', 'LTC/BTC', 'XRP/BTC']
|
||||||
|
# different producer
|
||||||
|
dp._set_producer_pairs(pairs + ['MEEP/USDT'], 'default')
|
||||||
|
pm = PairListManager(exchange, whitelist_conf, dp)
|
||||||
|
pm.refresh_pairlist()
|
||||||
|
assert pm.whitelist == []
|
||||||
|
# proper producer
|
||||||
|
dp._set_producer_pairs(pairs, 'hello_world')
|
||||||
|
pm.refresh_pairlist()
|
||||||
|
|
||||||
|
# Pairlist reduced to 2
|
||||||
|
assert pm.whitelist == pairs[:2]
|
||||||
|
assert len(pm.whitelist) == 2
|
||||||
|
whitelist_conf['exchange']['pair_whitelist'] = ['TKN/BTC']
|
||||||
|
|
||||||
|
whitelist_conf['pairlists'] = [
|
||||||
|
{"method": "StaticPairList"},
|
||||||
|
{
|
||||||
|
"method": "ProducerPairList",
|
||||||
|
"producer_name": "hello_world",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
pm = PairListManager(exchange, whitelist_conf, dp)
|
||||||
|
pm.refresh_pairlist()
|
||||||
|
assert len(pm.whitelist) == 4
|
||||||
|
assert pm.whitelist == ['TKN/BTC'] + pairs
|
||||||
|
@@ -1457,6 +1457,7 @@ def test_api_strategies(botclient):
|
|||||||
'StrategyTestV2',
|
'StrategyTestV2',
|
||||||
'StrategyTestV3',
|
'StrategyTestV3',
|
||||||
'StrategyTestV3Futures',
|
'StrategyTestV3Futures',
|
||||||
|
'freqai_rl_test_strat',
|
||||||
'freqai_test_classifier',
|
'freqai_test_classifier',
|
||||||
'freqai_test_multimodel_strat',
|
'freqai_test_multimodel_strat',
|
||||||
'freqai_test_strat'
|
'freqai_test_strat'
|
||||||
|
@@ -188,15 +188,19 @@ async def test_emc_create_connection_success(default_conf, caplog, mocker):
|
|||||||
emc.shutdown()
|
emc.shutdown()
|
||||||
|
|
||||||
|
|
||||||
async def test_emc_create_connection_invalid_port(default_conf, caplog, mocker):
|
@pytest.mark.parametrize('host,port', [
|
||||||
|
(_TEST_WS_HOST, -1),
|
||||||
|
("10000.1241..2121/", _TEST_WS_PORT),
|
||||||
|
])
|
||||||
|
async def test_emc_create_connection_invalid_url(default_conf, caplog, mocker, host, port):
|
||||||
default_conf.update({
|
default_conf.update({
|
||||||
"external_message_consumer": {
|
"external_message_consumer": {
|
||||||
"enabled": True,
|
"enabled": True,
|
||||||
"producers": [
|
"producers": [
|
||||||
{
|
{
|
||||||
"name": "default",
|
"name": "default",
|
||||||
"host": _TEST_WS_HOST,
|
"host": host,
|
||||||
"port": -1,
|
"port": port,
|
||||||
"ws_token": _TEST_WS_TOKEN
|
"ws_token": _TEST_WS_TOKEN
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -207,38 +211,13 @@ async def test_emc_create_connection_invalid_port(default_conf, caplog, mocker):
|
|||||||
})
|
})
|
||||||
|
|
||||||
dp = DataProvider(default_conf, None, None, None)
|
dp = DataProvider(default_conf, None, None, None)
|
||||||
|
# Handle start explicitly to avoid messing with threading in tests
|
||||||
|
mocker.patch("freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start",)
|
||||||
emc = ExternalMessageConsumer(default_conf, dp)
|
emc = ExternalMessageConsumer(default_conf, dp)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await asyncio.sleep(0.01)
|
emc._running = True
|
||||||
assert log_has_re(r".+ is an invalid WebSocket URL .+", caplog)
|
await emc._create_connection(emc.producers[0], asyncio.Lock())
|
||||||
finally:
|
|
||||||
emc.shutdown()
|
|
||||||
|
|
||||||
|
|
||||||
async def test_emc_create_connection_invalid_host(default_conf, caplog, mocker):
|
|
||||||
default_conf.update({
|
|
||||||
"external_message_consumer": {
|
|
||||||
"enabled": True,
|
|
||||||
"producers": [
|
|
||||||
{
|
|
||||||
"name": "default",
|
|
||||||
"host": "10000.1241..2121/",
|
|
||||||
"port": _TEST_WS_PORT,
|
|
||||||
"ws_token": _TEST_WS_TOKEN
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"wait_timeout": 60,
|
|
||||||
"ping_timeout": 60,
|
|
||||||
"sleep_timeout": 60
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
dp = DataProvider(default_conf, None, None, None)
|
|
||||||
emc = ExternalMessageConsumer(default_conf, dp)
|
|
||||||
|
|
||||||
try:
|
|
||||||
await asyncio.sleep(0.01)
|
|
||||||
assert log_has_re(r".+ is an invalid WebSocket URL .+", caplog)
|
assert log_has_re(r".+ is an invalid WebSocket URL .+", caplog)
|
||||||
finally:
|
finally:
|
||||||
emc.shutdown()
|
emc.shutdown()
|
||||||
|
139
tests/strategy/strats/freqai_rl_test_strat.py
Normal file
139
tests/strategy/strats/freqai_rl_test_strat.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
import logging
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import talib.abstract as ta
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.strategy import IStrategy, merge_informative_pair
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class freqai_rl_test_strat(IStrategy):
|
||||||
|
"""
|
||||||
|
Test strategy - used for testing freqAI functionalities.
|
||||||
|
DO not use in production.
|
||||||
|
"""
|
||||||
|
|
||||||
|
minimal_roi = {"0": 0.1, "240": -1}
|
||||||
|
|
||||||
|
plot_config = {
|
||||||
|
"main_plot": {},
|
||||||
|
"subplots": {
|
||||||
|
"prediction": {"prediction": {"color": "blue"}},
|
||||||
|
"target_roi": {
|
||||||
|
"target_roi": {"color": "brown"},
|
||||||
|
},
|
||||||
|
"do_predict": {
|
||||||
|
"do_predict": {"color": "brown"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
process_only_new_candles = True
|
||||||
|
stoploss = -0.05
|
||||||
|
use_exit_signal = True
|
||||||
|
startup_candle_count: int = 30
|
||||||
|
can_short = False
|
||||||
|
|
||||||
|
def informative_pairs(self):
|
||||||
|
whitelist_pairs = self.dp.current_whitelist()
|
||||||
|
corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"]
|
||||||
|
informative_pairs = []
|
||||||
|
for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]:
|
||||||
|
for pair in whitelist_pairs:
|
||||||
|
informative_pairs.append((pair, tf))
|
||||||
|
for pair in corr_pairs:
|
||||||
|
if pair in whitelist_pairs:
|
||||||
|
continue # avoid duplication
|
||||||
|
informative_pairs.append((pair, tf))
|
||||||
|
return informative_pairs
|
||||||
|
|
||||||
|
def populate_any_indicators(
|
||||||
|
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||||
|
):
|
||||||
|
|
||||||
|
coin = pair.split('/')[0]
|
||||||
|
|
||||||
|
if informative is None:
|
||||||
|
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||||
|
|
||||||
|
# first loop is automatically duplicating indicators for time periods
|
||||||
|
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||||
|
|
||||||
|
t = int(t)
|
||||||
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||||
|
|
||||||
|
# FIXME: add these outside the user strategy?
|
||||||
|
# The following columns are necessary for RL models.
|
||||||
|
informative[f"%-{coin}raw_close"] = informative["close"]
|
||||||
|
informative[f"%-{coin}raw_open"] = informative["open"]
|
||||||
|
informative[f"%-{coin}raw_high"] = informative["high"]
|
||||||
|
informative[f"%-{coin}raw_low"] = informative["low"]
|
||||||
|
|
||||||
|
indicators = [col for col in informative if col.startswith("%")]
|
||||||
|
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||||
|
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||||
|
if n == 0:
|
||||||
|
continue
|
||||||
|
informative_shift = informative[indicators].shift(n)
|
||||||
|
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||||
|
informative = pd.concat((informative, informative_shift), axis=1)
|
||||||
|
|
||||||
|
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||||
|
skip_columns = [
|
||||||
|
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||||
|
]
|
||||||
|
df = df.drop(columns=skip_columns)
|
||||||
|
|
||||||
|
# Add generalized indicators here (because in live, it will call this
|
||||||
|
# function to populate indicators during training). Notice how we ensure not to
|
||||||
|
# add them multiple times
|
||||||
|
if set_generalized_indicators:
|
||||||
|
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||||
|
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||||
|
|
||||||
|
# For RL, there are no direct targets to set. This is filler (neutral)
|
||||||
|
# until the agent sends an action.
|
||||||
|
df["&-action"] = 0
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
|
||||||
|
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
|
||||||
|
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
|
||||||
|
enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1]
|
||||||
|
|
||||||
|
if enter_long_conditions:
|
||||||
|
df.loc[
|
||||||
|
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||||
|
] = (1, "long")
|
||||||
|
|
||||||
|
enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3]
|
||||||
|
|
||||||
|
if enter_short_conditions:
|
||||||
|
df.loc[
|
||||||
|
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
|
||||||
|
] = (1, "short")
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2]
|
||||||
|
if exit_long_conditions:
|
||||||
|
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
||||||
|
|
||||||
|
exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4]
|
||||||
|
if exit_short_conditions:
|
||||||
|
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
||||||
|
|
||||||
|
return df
|
@@ -288,7 +288,7 @@ def test_advise_all_indicators(default_conf, testdatadir) -> None:
|
|||||||
data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
|
data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
|
||||||
fill_up_missing=True)
|
fill_up_missing=True)
|
||||||
processed = strategy.advise_all_indicators(data)
|
processed = strategy.advise_all_indicators(data)
|
||||||
assert len(processed['UNITTEST/BTC']) == 102 # partial candle was removed
|
assert len(processed['UNITTEST/BTC']) == 103
|
||||||
|
|
||||||
|
|
||||||
def test_populate_any_indicators(default_conf, testdatadir) -> None:
|
def test_populate_any_indicators(default_conf, testdatadir) -> None:
|
||||||
@@ -300,7 +300,7 @@ def test_populate_any_indicators(default_conf, testdatadir) -> None:
|
|||||||
processed = strategy.populate_any_indicators('UNITTEST/BTC', data, '5m')
|
processed = strategy.populate_any_indicators('UNITTEST/BTC', data, '5m')
|
||||||
assert processed == data
|
assert processed == data
|
||||||
assert id(processed) == id(data)
|
assert id(processed) == id(data)
|
||||||
assert len(processed['UNITTEST/BTC']) == 102 # partial candle was removed
|
assert len(processed['UNITTEST/BTC']) == 103
|
||||||
|
|
||||||
|
|
||||||
def test_freqai_not_initialized(default_conf) -> None:
|
def test_freqai_not_initialized(default_conf) -> None:
|
||||||
|
@@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed():
|
|||||||
directory = Path(__file__).parent / "strats"
|
directory = Path(__file__).parent / "strats"
|
||||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=False)
|
strategies = StrategyResolver.search_all_objects(directory, enum_failed=False)
|
||||||
assert isinstance(strategies, list)
|
assert isinstance(strategies, list)
|
||||||
assert len(strategies) == 9
|
assert len(strategies) == 10
|
||||||
assert isinstance(strategies[0], dict)
|
assert isinstance(strategies[0], dict)
|
||||||
|
|
||||||
|
|
||||||
@@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed():
|
|||||||
directory = Path(__file__).parent / "strats"
|
directory = Path(__file__).parent / "strats"
|
||||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=True)
|
strategies = StrategyResolver.search_all_objects(directory, enum_failed=True)
|
||||||
assert isinstance(strategies, list)
|
assert isinstance(strategies, list)
|
||||||
assert len(strategies) == 10
|
assert len(strategies) == 11
|
||||||
# with enum_failed=True search_all_objects() shall find 2 good strategies
|
# with enum_failed=True search_all_objects() shall find 2 good strategies
|
||||||
# and 1 which fails to load
|
# and 1 which fails to load
|
||||||
assert len([x for x in strategies if x['class'] is not None]) == 9
|
assert len([x for x in strategies if x['class'] is not None]) == 10
|
||||||
assert len([x for x in strategies if x['class'] is None]) == 1
|
assert len([x for x in strategies if x['class'] is None]) == 1
|
||||||
|
|
||||||
directory = Path(__file__).parent / "strats_nonexistingdir"
|
directory = Path(__file__).parent / "strats_nonexistingdir"
|
||||||
|
@@ -11,7 +11,7 @@ import pytest
|
|||||||
from jsonschema import ValidationError
|
from jsonschema import ValidationError
|
||||||
|
|
||||||
from freqtrade.commands import Arguments
|
from freqtrade.commands import Arguments
|
||||||
from freqtrade.configuration import Configuration, check_exchange, validate_config_consistency
|
from freqtrade.configuration import Configuration, validate_config_consistency
|
||||||
from freqtrade.configuration.config_validation import validate_config_schema
|
from freqtrade.configuration.config_validation import validate_config_schema
|
||||||
from freqtrade.configuration.deprecated_settings import (check_conflicting_settings,
|
from freqtrade.configuration.deprecated_settings import (check_conflicting_settings,
|
||||||
process_deprecated_setting,
|
process_deprecated_setting,
|
||||||
@@ -584,67 +584,6 @@ def test_hyperopt_with_arguments(mocker, default_conf, caplog) -> None:
|
|||||||
assert config['runmode'] == RunMode.HYPEROPT
|
assert config['runmode'] == RunMode.HYPEROPT
|
||||||
|
|
||||||
|
|
||||||
def test_check_exchange(default_conf, caplog) -> None:
|
|
||||||
# Test an officially supported by Freqtrade team exchange
|
|
||||||
default_conf['runmode'] = RunMode.DRY_RUN
|
|
||||||
default_conf.get('exchange').update({'name': 'BITTREX'})
|
|
||||||
assert check_exchange(default_conf)
|
|
||||||
assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.",
|
|
||||||
caplog)
|
|
||||||
caplog.clear()
|
|
||||||
|
|
||||||
# Test an officially supported by Freqtrade team exchange
|
|
||||||
default_conf.get('exchange').update({'name': 'binance'})
|
|
||||||
assert check_exchange(default_conf)
|
|
||||||
assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.",
|
|
||||||
caplog)
|
|
||||||
caplog.clear()
|
|
||||||
|
|
||||||
# Test an available exchange, supported by ccxt
|
|
||||||
default_conf.get('exchange').update({'name': 'huobipro'})
|
|
||||||
assert check_exchange(default_conf)
|
|
||||||
assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, "
|
|
||||||
r"but not officially supported "
|
|
||||||
r"by the Freqtrade development team\. .*", caplog)
|
|
||||||
caplog.clear()
|
|
||||||
|
|
||||||
# Test a 'bad' exchange, which known to have serious problems
|
|
||||||
default_conf.get('exchange').update({'name': 'bitmex'})
|
|
||||||
with pytest.raises(OperationalException,
|
|
||||||
match=r"Exchange .* will not work with Freqtrade\..*"):
|
|
||||||
check_exchange(default_conf)
|
|
||||||
caplog.clear()
|
|
||||||
|
|
||||||
# Test a 'bad' exchange with check_for_bad=False
|
|
||||||
default_conf.get('exchange').update({'name': 'bitmex'})
|
|
||||||
assert check_exchange(default_conf, False)
|
|
||||||
assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, "
|
|
||||||
r"but not officially supported "
|
|
||||||
r"by the Freqtrade development team\. .*", caplog)
|
|
||||||
caplog.clear()
|
|
||||||
|
|
||||||
# Test an invalid exchange
|
|
||||||
default_conf.get('exchange').update({'name': 'unknown_exchange'})
|
|
||||||
with pytest.raises(
|
|
||||||
OperationalException,
|
|
||||||
match=r'Exchange "unknown_exchange" is not known to the ccxt library '
|
|
||||||
r'and therefore not available for the bot.*'
|
|
||||||
):
|
|
||||||
check_exchange(default_conf)
|
|
||||||
|
|
||||||
# Test no exchange...
|
|
||||||
default_conf.get('exchange').update({'name': ''})
|
|
||||||
default_conf['runmode'] = RunMode.PLOT
|
|
||||||
assert check_exchange(default_conf)
|
|
||||||
|
|
||||||
# Test no exchange...
|
|
||||||
default_conf.get('exchange').update({'name': ''})
|
|
||||||
default_conf['runmode'] = RunMode.UTIL_EXCHANGE
|
|
||||||
with pytest.raises(OperationalException,
|
|
||||||
match=r'This command requires a configured exchange.*'):
|
|
||||||
check_exchange(default_conf)
|
|
||||||
|
|
||||||
|
|
||||||
def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
|
def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
|
||||||
patched_configuration_load_config_file(mocker, default_conf)
|
patched_configuration_load_config_file(mocker, default_conf)
|
||||||
|
|
||||||
|
@@ -28,6 +28,7 @@ from tests.conftest import (create_mock_trades, create_mock_trades_usdt, get_pat
|
|||||||
from tests.conftest_trades import (MOCK_TRADE_COUNT, entry_side, exit_side, mock_order_1,
|
from tests.conftest_trades import (MOCK_TRADE_COUNT, entry_side, exit_side, mock_order_1,
|
||||||
mock_order_2, mock_order_2_sell, mock_order_3, mock_order_3_sell,
|
mock_order_2, mock_order_2_sell, mock_order_3, mock_order_3_sell,
|
||||||
mock_order_4, mock_order_5_stoploss, mock_order_6_sell)
|
mock_order_4, mock_order_5_stoploss, mock_order_6_sell)
|
||||||
|
from tests.conftest_trades_usdt import mock_trade_usdt_4
|
||||||
|
|
||||||
|
|
||||||
def patch_RPCManager(mocker) -> MagicMock:
|
def patch_RPCManager(mocker) -> MagicMock:
|
||||||
@@ -1060,6 +1061,7 @@ def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_sho
|
|||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
|
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
@@ -1101,6 +1103,7 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_
|
|||||||
# First case: when stoploss is not yet set but the order is open
|
# First case: when stoploss is not yet set but the order is open
|
||||||
# should get the stoploss order id immediately
|
# should get the stoploss order id immediately
|
||||||
# and should return false as no trade actually happened
|
# and should return false as no trade actually happened
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
trade.is_open = True
|
trade.is_open = True
|
||||||
@@ -1879,6 +1882,7 @@ def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog
|
|||||||
return_value=limit_order[entry_side(is_short)])
|
return_value=limit_order[entry_side(is_short)])
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
trade.open_order_id = '123'
|
trade.open_order_id = '123'
|
||||||
@@ -1902,6 +1906,7 @@ def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog
|
|||||||
order = limit_order[entry_side(is_short)]
|
order = limit_order[entry_side(is_short)]
|
||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
trade.open_order_id = None
|
trade.open_order_id = None
|
||||||
@@ -2042,6 +2047,7 @@ def test_update_trade_state_exception(mocker, default_conf_usdt, is_short, limit
|
|||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.open_order_id = '123'
|
trade.open_order_id = '123'
|
||||||
trade.amount = 123
|
trade.amount = 123
|
||||||
@@ -2060,6 +2066,7 @@ def test_update_trade_state_orderexception(mocker, default_conf_usdt, caplog) ->
|
|||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
||||||
MagicMock(side_effect=InvalidOrderException))
|
MagicMock(side_effect=InvalidOrderException))
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.open_order_id = '123'
|
trade.open_order_id = '123'
|
||||||
|
|
||||||
@@ -2661,6 +2668,7 @@ def test_manage_open_orders_exit_usercustom(
|
|||||||
rpc_mock = patch_RPCManager(mocker)
|
rpc_mock = patch_RPCManager(mocker)
|
||||||
cancel_order_mock = MagicMock()
|
cancel_order_mock = MagicMock()
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.get_min_pair_stake_amount', return_value=0.0)
|
||||||
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
|
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
|
||||||
mocker.patch.multiple(
|
mocker.patch.multiple(
|
||||||
'freqtrade.exchange.Exchange',
|
'freqtrade.exchange.Exchange',
|
||||||
@@ -2673,7 +2681,6 @@ def test_manage_open_orders_exit_usercustom(
|
|||||||
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
||||||
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
||||||
open_trade_usdt.close_profit_abs = 0.001
|
open_trade_usdt.close_profit_abs = 0.001
|
||||||
open_trade_usdt.is_open = False
|
|
||||||
|
|
||||||
Trade.query.session.add(open_trade_usdt)
|
Trade.query.session.add(open_trade_usdt)
|
||||||
Trade.commit()
|
Trade.commit()
|
||||||
@@ -2687,7 +2694,6 @@ def test_manage_open_orders_exit_usercustom(
|
|||||||
freqtrade.manage_open_orders()
|
freqtrade.manage_open_orders()
|
||||||
assert cancel_order_mock.call_count == 0
|
assert cancel_order_mock.call_count == 0
|
||||||
assert rpc_mock.call_count == 1
|
assert rpc_mock.call_count == 1
|
||||||
assert open_trade_usdt.is_open is False
|
|
||||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||||
|
|
||||||
@@ -2697,7 +2703,6 @@ def test_manage_open_orders_exit_usercustom(
|
|||||||
freqtrade.manage_open_orders()
|
freqtrade.manage_open_orders()
|
||||||
assert cancel_order_mock.call_count == 0
|
assert cancel_order_mock.call_count == 0
|
||||||
assert rpc_mock.call_count == 1
|
assert rpc_mock.call_count == 1
|
||||||
assert open_trade_usdt.is_open is False
|
|
||||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||||
|
|
||||||
@@ -2707,7 +2712,6 @@ def test_manage_open_orders_exit_usercustom(
|
|||||||
freqtrade.manage_open_orders()
|
freqtrade.manage_open_orders()
|
||||||
assert cancel_order_mock.call_count == 1
|
assert cancel_order_mock.call_count == 1
|
||||||
assert rpc_mock.call_count == 2
|
assert rpc_mock.call_count == 2
|
||||||
assert open_trade_usdt.is_open is True
|
|
||||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||||
|
|
||||||
@@ -2748,14 +2752,14 @@ def test_manage_open_orders_exit(
|
|||||||
'freqtrade.exchange.Exchange',
|
'freqtrade.exchange.Exchange',
|
||||||
fetch_ticker=ticker_usdt,
|
fetch_ticker=ticker_usdt,
|
||||||
fetch_order=MagicMock(return_value=limit_sell_order_old),
|
fetch_order=MagicMock(return_value=limit_sell_order_old),
|
||||||
cancel_order=cancel_order_mock
|
cancel_order=cancel_order_mock,
|
||||||
|
get_min_pair_stake_amount=MagicMock(return_value=0),
|
||||||
)
|
)
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
|
|
||||||
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
||||||
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
||||||
open_trade_usdt.close_profit_abs = 0.001
|
open_trade_usdt.close_profit_abs = 0.001
|
||||||
open_trade_usdt.is_open = False
|
|
||||||
open_trade_usdt.is_short = is_short
|
open_trade_usdt.is_short = is_short
|
||||||
|
|
||||||
Trade.query.session.add(open_trade_usdt)
|
Trade.query.session.add(open_trade_usdt)
|
||||||
@@ -2796,7 +2800,6 @@ def test_check_handle_cancelled_exit(
|
|||||||
|
|
||||||
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
open_trade_usdt.open_date = arrow.utcnow().shift(hours=-5).datetime
|
||||||
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
open_trade_usdt.close_date = arrow.utcnow().shift(minutes=-601).datetime
|
||||||
open_trade_usdt.is_open = False
|
|
||||||
open_trade_usdt.is_short = is_short
|
open_trade_usdt.is_short = is_short
|
||||||
|
|
||||||
Trade.query.session.add(open_trade_usdt)
|
Trade.query.session.add(open_trade_usdt)
|
||||||
@@ -2984,7 +2987,7 @@ def test_manage_open_orders_exception(default_conf_usdt, ticker_usdt, open_trade
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("is_short", [False, True])
|
@pytest.mark.parametrize("is_short", [False, True])
|
||||||
def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_short) -> None:
|
def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_short, fee) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
l_order = limit_order[entry_side(is_short)]
|
l_order = limit_order[entry_side(is_short)]
|
||||||
@@ -2998,15 +3001,12 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_
|
|||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
freqtrade._notify_enter_cancel = MagicMock()
|
freqtrade._notify_enter_cancel = MagicMock()
|
||||||
|
|
||||||
# TODO: Convert to real trade
|
trade = mock_trade_usdt_4(fee, is_short)
|
||||||
trade = MagicMock()
|
Trade.query.session.add(trade)
|
||||||
trade.pair = 'LTC/USDT'
|
Trade.commit()
|
||||||
trade.open_rate = 200
|
|
||||||
trade.is_short = False
|
|
||||||
trade.entry_side = "buy"
|
|
||||||
l_order['filled'] = 0.0
|
l_order['filled'] = 0.0
|
||||||
l_order['status'] = 'open'
|
l_order['status'] = 'open'
|
||||||
trade.nr_of_successful_entries = 0
|
|
||||||
reason = CANCEL_REASON['TIMEOUT']
|
reason = CANCEL_REASON['TIMEOUT']
|
||||||
assert freqtrade.handle_cancel_enter(trade, l_order, reason)
|
assert freqtrade.handle_cancel_enter(trade, l_order, reason)
|
||||||
assert cancel_order_mock.call_count == 1
|
assert cancel_order_mock.call_count == 1
|
||||||
@@ -3038,7 +3038,7 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_
|
|||||||
@pytest.mark.parametrize("is_short", [False, True])
|
@pytest.mark.parametrize("is_short", [False, True])
|
||||||
@pytest.mark.parametrize("limit_buy_order_canceled_empty", ['binance', 'ftx', 'kraken', 'bittrex'],
|
@pytest.mark.parametrize("limit_buy_order_canceled_empty", ['binance', 'ftx', 'kraken', 'bittrex'],
|
||||||
indirect=['limit_buy_order_canceled_empty'])
|
indirect=['limit_buy_order_canceled_empty'])
|
||||||
def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_short,
|
def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_short, fee,
|
||||||
limit_buy_order_canceled_empty) -> None:
|
limit_buy_order_canceled_empty) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@@ -3049,11 +3049,10 @@ def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_sho
|
|||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
|
|
||||||
reason = CANCEL_REASON['TIMEOUT']
|
reason = CANCEL_REASON['TIMEOUT']
|
||||||
# TODO: Convert to real trade
|
|
||||||
trade = MagicMock()
|
trade = mock_trade_usdt_4(fee, is_short)
|
||||||
trade.nr_of_successful_entries = 0
|
Trade.query.session.add(trade)
|
||||||
trade.pair = 'LTC/ETH'
|
Trade.commit()
|
||||||
trade.entry_side = "sell" if is_short else "buy"
|
|
||||||
assert freqtrade.handle_cancel_enter(trade, limit_buy_order_canceled_empty, reason)
|
assert freqtrade.handle_cancel_enter(trade, limit_buy_order_canceled_empty, reason)
|
||||||
assert cancel_order_mock.call_count == 0
|
assert cancel_order_mock.call_count == 0
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
@@ -3071,7 +3070,7 @@ def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_sho
|
|||||||
'String Return value',
|
'String Return value',
|
||||||
123
|
123
|
||||||
])
|
])
|
||||||
def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order, is_short,
|
def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order, is_short, fee,
|
||||||
cancelorder) -> None:
|
cancelorder) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@@ -3079,19 +3078,15 @@ def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order
|
|||||||
cancel_order_mock = MagicMock(return_value=cancelorder)
|
cancel_order_mock = MagicMock(return_value=cancelorder)
|
||||||
mocker.patch.multiple(
|
mocker.patch.multiple(
|
||||||
'freqtrade.exchange.Exchange',
|
'freqtrade.exchange.Exchange',
|
||||||
cancel_order=cancel_order_mock
|
cancel_order=cancel_order_mock,
|
||||||
|
fetch_order=MagicMock(side_effect=InvalidOrderException)
|
||||||
)
|
)
|
||||||
|
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
freqtrade._notify_enter_cancel = MagicMock()
|
freqtrade._notify_enter_cancel = MagicMock()
|
||||||
# TODO: Convert to real trade
|
trade = mock_trade_usdt_4(fee, is_short)
|
||||||
trade = MagicMock()
|
Trade.query.session.add(trade)
|
||||||
trade.pair = 'LTC/USDT'
|
Trade.commit()
|
||||||
trade.entry_side = "buy"
|
|
||||||
trade.open_rate = 200
|
|
||||||
trade.entry_side = "buy"
|
|
||||||
trade.open_order_id = "open_order_noop"
|
|
||||||
trade.nr_of_successful_entries = 0
|
|
||||||
l_order['filled'] = 0.0
|
l_order['filled'] = 0.0
|
||||||
l_order['status'] = 'open'
|
l_order['status'] = 'open'
|
||||||
reason = CANCEL_REASON['TIMEOUT']
|
reason = CANCEL_REASON['TIMEOUT']
|
||||||
@@ -3100,6 +3095,9 @@ def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order
|
|||||||
|
|
||||||
cancel_order_mock.reset_mock()
|
cancel_order_mock.reset_mock()
|
||||||
l_order['filled'] = 1.0
|
l_order['filled'] = 1.0
|
||||||
|
order = deepcopy(l_order)
|
||||||
|
order['status'] = 'canceled'
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
||||||
assert not freqtrade.handle_cancel_enter(trade, l_order, reason)
|
assert not freqtrade.handle_cancel_enter(trade, l_order, reason)
|
||||||
assert cancel_order_mock.call_count == 1
|
assert cancel_order_mock.call_count == 1
|
||||||
|
|
||||||
@@ -3113,6 +3111,9 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee) -> None:
|
|||||||
cancel_order=cancel_order_mock,
|
cancel_order=cancel_order_mock,
|
||||||
)
|
)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_rate', return_value=0.245441)
|
mocker.patch('freqtrade.exchange.Exchange.get_rate', return_value=0.245441)
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.get_min_pair_stake_amount', return_value=0.2)
|
||||||
|
|
||||||
|
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee')
|
||||||
|
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
|
|
||||||
@@ -3121,20 +3122,21 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee) -> None:
|
|||||||
amount=2,
|
amount=2,
|
||||||
exchange='binance',
|
exchange='binance',
|
||||||
open_rate=0.245441,
|
open_rate=0.245441,
|
||||||
open_order_id="123456",
|
open_order_id="sell_123456",
|
||||||
open_date=arrow.utcnow().shift(days=-2).datetime,
|
open_date=arrow.utcnow().shift(days=-2).datetime,
|
||||||
fee_open=fee.return_value,
|
fee_open=fee.return_value,
|
||||||
fee_close=fee.return_value,
|
fee_close=fee.return_value,
|
||||||
close_rate=0.555,
|
close_rate=0.555,
|
||||||
close_date=arrow.utcnow().datetime,
|
close_date=arrow.utcnow().datetime,
|
||||||
exit_reason="sell_reason_whatever",
|
exit_reason="sell_reason_whatever",
|
||||||
|
stake_amount=0.245441 * 2,
|
||||||
)
|
)
|
||||||
trade.orders = [
|
trade.orders = [
|
||||||
Order(
|
Order(
|
||||||
ft_order_side='buy',
|
ft_order_side='buy',
|
||||||
ft_pair=trade.pair,
|
ft_pair=trade.pair,
|
||||||
ft_is_open=True,
|
ft_is_open=False,
|
||||||
order_id='123456',
|
order_id='buy_123456',
|
||||||
status="closed",
|
status="closed",
|
||||||
symbol=trade.pair,
|
symbol=trade.pair,
|
||||||
order_type="market",
|
order_type="market",
|
||||||
@@ -3147,21 +3149,42 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee) -> None:
|
|||||||
order_date=trade.open_date,
|
order_date=trade.open_date,
|
||||||
order_filled_date=trade.open_date,
|
order_filled_date=trade.open_date,
|
||||||
),
|
),
|
||||||
|
Order(
|
||||||
|
ft_order_side='sell',
|
||||||
|
ft_pair=trade.pair,
|
||||||
|
ft_is_open=True,
|
||||||
|
order_id='sell_123456',
|
||||||
|
status="open",
|
||||||
|
symbol=trade.pair,
|
||||||
|
order_type="limit",
|
||||||
|
side="sell",
|
||||||
|
price=trade.open_rate,
|
||||||
|
average=trade.open_rate,
|
||||||
|
filled=0.0,
|
||||||
|
remaining=trade.amount,
|
||||||
|
cost=trade.open_rate * trade.amount,
|
||||||
|
order_date=trade.open_date,
|
||||||
|
order_filled_date=trade.open_date,
|
||||||
|
),
|
||||||
]
|
]
|
||||||
order = {'id': "123456",
|
order = {'id': "sell_123456",
|
||||||
'remaining': 1,
|
'remaining': 1,
|
||||||
'amount': 1,
|
'amount': 1,
|
||||||
'status': "open"}
|
'status': "open"}
|
||||||
reason = CANCEL_REASON['TIMEOUT']
|
reason = CANCEL_REASON['TIMEOUT']
|
||||||
|
send_msg_mock.reset_mock()
|
||||||
assert freqtrade.handle_cancel_exit(trade, order, reason)
|
assert freqtrade.handle_cancel_exit(trade, order, reason)
|
||||||
assert cancel_order_mock.call_count == 1
|
assert cancel_order_mock.call_count == 1
|
||||||
assert send_msg_mock.call_count == 2
|
assert send_msg_mock.call_count == 1
|
||||||
assert trade.close_rate is None
|
assert trade.close_rate is None
|
||||||
assert trade.exit_reason is None
|
assert trade.exit_reason is None
|
||||||
|
assert trade.open_order_id is None
|
||||||
|
|
||||||
send_msg_mock.reset_mock()
|
send_msg_mock.reset_mock()
|
||||||
|
|
||||||
|
# Partial exit - below exit threshold
|
||||||
order['amount'] = 2
|
order['amount'] = 2
|
||||||
|
order['filled'] = 1.9
|
||||||
assert not freqtrade.handle_cancel_exit(trade, order, reason)
|
assert not freqtrade.handle_cancel_exit(trade, order, reason)
|
||||||
# Assert cancel_order was not called (callcount remains unchanged)
|
# Assert cancel_order was not called (callcount remains unchanged)
|
||||||
assert cancel_order_mock.call_count == 1
|
assert cancel_order_mock.call_count == 1
|
||||||
@@ -3171,21 +3194,32 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee) -> None:
|
|||||||
|
|
||||||
assert not freqtrade.handle_cancel_exit(trade, order, reason)
|
assert not freqtrade.handle_cancel_exit(trade, order, reason)
|
||||||
|
|
||||||
send_msg_mock.call_args_list[0][0][0]['reason'] = CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
assert (send_msg_mock.call_args_list[0][0][0]['reason']
|
||||||
|
== CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'])
|
||||||
|
|
||||||
# Message should not be iterated again
|
# Message should not be iterated again
|
||||||
assert trade.exit_order_status == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
assert trade.exit_order_status == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
||||||
assert send_msg_mock.call_count == 1
|
assert send_msg_mock.call_count == 1
|
||||||
|
|
||||||
|
send_msg_mock.reset_mock()
|
||||||
|
|
||||||
|
order['filled'] = 1
|
||||||
|
assert freqtrade.handle_cancel_exit(trade, order, reason)
|
||||||
|
assert send_msg_mock.call_count == 1
|
||||||
|
assert (send_msg_mock.call_args_list[0][0][0]['reason']
|
||||||
|
== CANCEL_REASON['PARTIALLY_FILLED'])
|
||||||
|
|
||||||
|
|
||||||
def test_handle_cancel_exit_cancel_exception(mocker, default_conf_usdt) -> None:
|
def test_handle_cancel_exit_cancel_exception(mocker, default_conf_usdt) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
mocker.patch(
|
mocker.patch('freqtrade.exchange.Exchange.get_min_pair_stake_amount', return_value=0.0)
|
||||||
'freqtrade.exchange.Exchange.cancel_order_with_result', side_effect=InvalidOrderException())
|
mocker.patch('freqtrade.exchange.Exchange.cancel_order_with_result',
|
||||||
|
side_effect=InvalidOrderException())
|
||||||
|
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
|
|
||||||
|
# TODO: should not be magicmock
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
reason = CANCEL_REASON['TIMEOUT']
|
reason = CANCEL_REASON['TIMEOUT']
|
||||||
order = {'remaining': 1,
|
order = {'remaining': 1,
|
||||||
|
@@ -2,7 +2,7 @@ from unittest.mock import MagicMock
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from freqtrade.enums import ExitCheckTuple, ExitType
|
from freqtrade.enums import ExitCheckTuple, ExitType, TradingMode
|
||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
from freqtrade.persistence.models import Order
|
from freqtrade.persistence.models import Order
|
||||||
from freqtrade.rpc.rpc import RPC
|
from freqtrade.rpc.rpc import RPC
|
||||||
@@ -351,8 +351,13 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.nr_of_successful_exits == 1
|
assert trade.nr_of_successful_exits == 1
|
||||||
|
|
||||||
|
|
||||||
def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
@pytest.mark.parametrize('leverage', [
|
||||||
|
1, 2
|
||||||
|
])
|
||||||
|
def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) -> None:
|
||||||
default_conf_usdt['position_adjustment_enable'] = True
|
default_conf_usdt['position_adjustment_enable'] = True
|
||||||
|
default_conf_usdt['trading_mode'] = 'futures'
|
||||||
|
default_conf_usdt['margin_mode'] = 'isolated'
|
||||||
|
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||||
mocker.patch.multiple(
|
mocker.patch.multiple(
|
||||||
@@ -363,9 +368,14 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
price_to_precision=lambda s, x, y: y,
|
price_to_precision=lambda s, x, y: y,
|
||||||
)
|
)
|
||||||
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
|
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.get_max_leverage", return_value=10)
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.get_funding_fees", return_value=0)
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.get_maintenance_ratio_and_amt", return_value=(0, 0))
|
||||||
|
|
||||||
patch_get_signal(freqtrade)
|
patch_get_signal(freqtrade)
|
||||||
freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96
|
freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96
|
||||||
|
freqtrade.strategy.leverage = MagicMock(return_value=leverage)
|
||||||
|
freqtrade.strategy.minimal_roi = {0: 0.2}
|
||||||
|
|
||||||
freqtrade.enter_positions()
|
freqtrade.enter_positions()
|
||||||
|
|
||||||
@@ -377,6 +387,8 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.open_rate == 1.96
|
assert trade.open_rate == 1.96
|
||||||
assert trade.stop_loss_pct is None
|
assert trade.stop_loss_pct is None
|
||||||
assert trade.stop_loss == 0.0
|
assert trade.stop_loss == 0.0
|
||||||
|
assert trade.leverage == leverage
|
||||||
|
assert trade.stake_amount == 60
|
||||||
assert trade.initial_stop_loss == 0.0
|
assert trade.initial_stop_loss == 0.0
|
||||||
assert trade.initial_stop_loss_pct is None
|
assert trade.initial_stop_loss_pct is None
|
||||||
# No adjustment
|
# No adjustment
|
||||||
@@ -396,6 +408,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.open_rate == 1.96
|
assert trade.open_rate == 1.96
|
||||||
assert trade.stop_loss_pct is None
|
assert trade.stop_loss_pct is None
|
||||||
assert trade.stop_loss == 0.0
|
assert trade.stop_loss == 0.0
|
||||||
|
assert trade.stake_amount == 60
|
||||||
assert trade.initial_stop_loss == 0.0
|
assert trade.initial_stop_loss == 0.0
|
||||||
assert trade.initial_stop_loss_pct is None
|
assert trade.initial_stop_loss_pct is None
|
||||||
|
|
||||||
@@ -407,9 +420,10 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.open_order_id is None
|
assert trade.open_order_id is None
|
||||||
# Open rate is not adjusted yet
|
# Open rate is not adjusted yet
|
||||||
assert trade.open_rate == 1.99
|
assert trade.open_rate == 1.99
|
||||||
|
assert trade.stake_amount == 60
|
||||||
assert trade.stop_loss_pct == -0.1
|
assert trade.stop_loss_pct == -0.1
|
||||||
assert trade.stop_loss == 1.99 * 0.9
|
assert pytest.approx(trade.stop_loss) == 1.99 * (1 - 0.1 / leverage)
|
||||||
assert trade.initial_stop_loss == 1.99 * 0.9
|
assert pytest.approx(trade.initial_stop_loss) == 1.99 * (1 - 0.1 / leverage)
|
||||||
assert trade.initial_stop_loss_pct == -0.1
|
assert trade.initial_stop_loss_pct == -0.1
|
||||||
|
|
||||||
# 2nd order - not filling
|
# 2nd order - not filling
|
||||||
@@ -422,7 +436,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.open_order_id is not None
|
assert trade.open_order_id is not None
|
||||||
assert trade.open_rate == 1.99
|
assert trade.open_rate == 1.99
|
||||||
assert trade.orders[-1].price == 1.96
|
assert trade.orders[-1].price == 1.96
|
||||||
assert trade.orders[-1].cost == 120
|
assert trade.orders[-1].cost == 120 * leverage
|
||||||
|
|
||||||
# Replace new order with diff. order at a lower price
|
# Replace new order with diff. order at a lower price
|
||||||
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)
|
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)
|
||||||
@@ -432,8 +446,9 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert len(trade.orders) == 4
|
assert len(trade.orders) == 4
|
||||||
assert trade.open_order_id is not None
|
assert trade.open_order_id is not None
|
||||||
assert trade.open_rate == 1.99
|
assert trade.open_rate == 1.99
|
||||||
|
assert trade.stake_amount == 60
|
||||||
assert trade.orders[-1].price == 1.95
|
assert trade.orders[-1].price == 1.95
|
||||||
assert pytest.approx(trade.orders[-1].cost) == 120
|
assert pytest.approx(trade.orders[-1].cost) == 120 * leverage
|
||||||
|
|
||||||
# Fill DCA order
|
# Fill DCA order
|
||||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)
|
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)
|
||||||
@@ -446,19 +461,21 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
|||||||
assert trade.open_order_id is None
|
assert trade.open_order_id is None
|
||||||
assert pytest.approx(trade.open_rate) == 1.963153456
|
assert pytest.approx(trade.open_rate) == 1.963153456
|
||||||
assert trade.orders[-1].price == 1.95
|
assert trade.orders[-1].price == 1.95
|
||||||
assert pytest.approx(trade.orders[-1].cost) == 120
|
assert pytest.approx(trade.orders[-1].cost) == 120 * leverage
|
||||||
assert trade.orders[-1].status == 'closed'
|
assert trade.orders[-1].status == 'closed'
|
||||||
|
|
||||||
assert pytest.approx(trade.amount) == 91.689215
|
assert pytest.approx(trade.amount) == 91.689215 * leverage
|
||||||
# Check the 2 filled orders equal the above amount
|
# Check the 2 filled orders equal the above amount
|
||||||
assert pytest.approx(trade.orders[1].amount) == 30.150753768
|
assert pytest.approx(trade.orders[1].amount) == 30.150753768 * leverage
|
||||||
assert pytest.approx(trade.orders[-1].amount) == 61.538461232
|
assert pytest.approx(trade.orders[-1].amount) == 61.538461232 * leverage
|
||||||
|
|
||||||
|
|
||||||
def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> None:
|
@pytest.mark.parametrize('leverage', [1, 2])
|
||||||
|
def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, leverage) -> None:
|
||||||
default_conf_usdt['position_adjustment_enable'] = True
|
default_conf_usdt['position_adjustment_enable'] = True
|
||||||
|
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||||
|
freqtrade.trading_mode = TradingMode.FUTURES
|
||||||
mocker.patch.multiple(
|
mocker.patch.multiple(
|
||||||
'freqtrade.exchange.Exchange',
|
'freqtrade.exchange.Exchange',
|
||||||
fetch_ticker=ticker_usdt,
|
fetch_ticker=ticker_usdt,
|
||||||
@@ -467,15 +484,17 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non
|
|||||||
price_to_precision=lambda s, x, y: y,
|
price_to_precision=lambda s, x, y: y,
|
||||||
get_min_pair_stake_amount=MagicMock(return_value=10),
|
get_min_pair_stake_amount=MagicMock(return_value=10),
|
||||||
)
|
)
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.get_max_leverage", return_value=10)
|
||||||
|
|
||||||
patch_get_signal(freqtrade)
|
patch_get_signal(freqtrade)
|
||||||
|
freqtrade.strategy.leverage = MagicMock(return_value=leverage)
|
||||||
freqtrade.enter_positions()
|
freqtrade.enter_positions()
|
||||||
|
|
||||||
assert len(Trade.get_trades().all()) == 1
|
assert len(Trade.get_trades().all()) == 1
|
||||||
trade = Trade.get_trades().first()
|
trade = Trade.get_trades().first()
|
||||||
assert len(trade.orders) == 1
|
assert len(trade.orders) == 1
|
||||||
assert pytest.approx(trade.stake_amount) == 60
|
assert pytest.approx(trade.stake_amount) == 60
|
||||||
assert pytest.approx(trade.amount) == 30.0
|
assert pytest.approx(trade.amount) == 30.0 * leverage
|
||||||
assert trade.open_rate == 2.0
|
assert trade.open_rate == 2.0
|
||||||
|
|
||||||
# Too small size
|
# Too small size
|
||||||
@@ -484,8 +503,9 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non
|
|||||||
trade = Trade.get_trades().first()
|
trade = Trade.get_trades().first()
|
||||||
assert len(trade.orders) == 1
|
assert len(trade.orders) == 1
|
||||||
assert pytest.approx(trade.stake_amount) == 60
|
assert pytest.approx(trade.stake_amount) == 60
|
||||||
assert pytest.approx(trade.amount) == 30.0
|
assert pytest.approx(trade.amount) == 30.0 * leverage
|
||||||
assert log_has_re("Remaining amount of 1.6.* would be smaller than the minimum of 10.", caplog)
|
assert log_has_re(
|
||||||
|
r"Remaining amount of \d\.\d+.* would be smaller than the minimum of 10.", caplog)
|
||||||
|
|
||||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20)
|
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20)
|
||||||
|
|
||||||
@@ -494,7 +514,7 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non
|
|||||||
assert len(trade.orders) == 2
|
assert len(trade.orders) == 2
|
||||||
assert trade.orders[-1].ft_order_side == 'sell'
|
assert trade.orders[-1].ft_order_side == 'sell'
|
||||||
assert pytest.approx(trade.stake_amount) == 40.198
|
assert pytest.approx(trade.stake_amount) == 40.198
|
||||||
assert pytest.approx(trade.amount) == 20.099
|
assert pytest.approx(trade.amount) == 20.099 * leverage
|
||||||
assert trade.open_rate == 2.0
|
assert trade.open_rate == 2.0
|
||||||
assert trade.is_open
|
assert trade.is_open
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
|
@@ -63,7 +63,7 @@ def test_init_plotscript(default_conf, mocker, testdatadir):
|
|||||||
|
|
||||||
def test_add_indicators(default_conf, testdatadir, caplog):
|
def test_add_indicators(default_conf, testdatadir, caplog):
|
||||||
pair = "UNITTEST/BTC"
|
pair = "UNITTEST/BTC"
|
||||||
timerange = TimeRange(None, 'line', 0, -1000)
|
timerange = TimeRange()
|
||||||
|
|
||||||
data = history.load_pair_history(pair=pair, timeframe='1m',
|
data = history.load_pair_history(pair=pair, timeframe='1m',
|
||||||
datadir=testdatadir, timerange=timerange)
|
datadir=testdatadir, timerange=timerange)
|
||||||
|
Reference in New Issue
Block a user