Merge branch 'freqtrade:develop' into develop
This commit is contained in:
commit
f1793d8d8f
100
.github/workflows/ci.yml
vendored
100
.github/workflows/ci.yml
vendored
@ -3,7 +3,6 @@ name: Freqtrade CI
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
|
||||||
- stable
|
- stable
|
||||||
- develop
|
- develop
|
||||||
tags:
|
tags:
|
||||||
@ -20,7 +19,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-18.04, ubuntu-20.04 ]
|
os: [ ubuntu-18.04, ubuntu-20.04 ]
|
||||||
python-version: [3.7, 3.8, 3.9]
|
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
@ -39,7 +38,7 @@ jobs:
|
|||||||
|
|
||||||
- name: pip cache (linux)
|
- name: pip cache (linux)
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
if: startsWith(matrix.os, 'ubuntu')
|
if: runner.os == 'Linux'
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
@ -50,8 +49,9 @@ jobs:
|
|||||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||||
|
|
||||||
- name: Installation - *nix
|
- name: Installation - *nix
|
||||||
|
if: runner.os == 'Linux'
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
@ -69,7 +69,7 @@ jobs:
|
|||||||
if: matrix.python-version == '3.9'
|
if: matrix.python-version == '3.9'
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
if: (startsWith(matrix.os, 'ubuntu-20') && matrix.python-version == '3.8')
|
if: (runner.os == 'Linux' && matrix.python-version == '3.8')
|
||||||
env:
|
env:
|
||||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||||
@ -101,23 +101,20 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts
|
||||||
|
|
||||||
- name: Slack Notification
|
- name: Discord notification
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: error
|
||||||
job_name: '*Freqtrade CI ${{ matrix.os }}*'
|
details: Freqtrade CI failed on ${{ matrix.os }}
|
||||||
mention: 'here'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
mention_if: 'failure'
|
|
||||||
channel: '#notifications'
|
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
build_macos:
|
build_macos:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ macos-latest ]
|
os: [ macos-latest ]
|
||||||
python-version: [3.7, 3.8, 3.9]
|
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
@ -136,7 +133,7 @@ jobs:
|
|||||||
|
|
||||||
- name: pip cache (macOS)
|
- name: pip cache (macOS)
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
if: startsWith(matrix.os, 'macOS')
|
if: runner.os == 'macOS'
|
||||||
with:
|
with:
|
||||||
path: ~/Library/Caches/pip
|
path: ~/Library/Caches/pip
|
||||||
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
@ -147,10 +144,11 @@ jobs:
|
|||||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||||
|
|
||||||
- name: Installation - macOS
|
- name: Installation - macOS
|
||||||
|
if: runner.os == 'macOS'
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
brew install hdf5 c-blosc
|
brew install hdf5 c-blosc
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
@ -162,7 +160,7 @@ jobs:
|
|||||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
if: (startsWith(matrix.os, 'ubuntu-20') && matrix.python-version == '3.8')
|
if: (runner.os == 'Linux' && matrix.python-version == '3.8')
|
||||||
env:
|
env:
|
||||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||||
@ -194,17 +192,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts
|
||||||
|
|
||||||
- name: Slack Notification
|
- name: Discord notification
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: info
|
||||||
job_name: '*Freqtrade CI ${{ matrix.os }}*'
|
details: Test Succeeded!
|
||||||
mention: 'here'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
mention_if: 'failure'
|
|
||||||
channel: '#notifications'
|
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
|
|
||||||
build_windows:
|
build_windows:
|
||||||
|
|
||||||
@ -212,7 +206,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest ]
|
os: [ windows-latest ]
|
||||||
python-version: [3.7, 3.8]
|
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
@ -224,7 +218,6 @@ jobs:
|
|||||||
|
|
||||||
- name: Pip cache (Windows)
|
- name: Pip cache (Windows)
|
||||||
uses: actions/cache@preview
|
uses: actions/cache@preview
|
||||||
if: startsWith(runner.os, 'Windows')
|
|
||||||
with:
|
with:
|
||||||
path: ~\AppData\Local\pip\Cache
|
path: ~\AppData\Local\pip\Cache
|
||||||
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
|
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
@ -257,16 +250,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts
|
||||||
|
|
||||||
- name: Slack Notification
|
- name: Discord notification
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: error
|
||||||
job_name: '*Freqtrade CI windows*'
|
details: Test Failed
|
||||||
mention: 'here'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
mention_if: 'failure'
|
|
||||||
channel: '#notifications'
|
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
docs_check:
|
docs_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@ -288,14 +278,13 @@ jobs:
|
|||||||
pip install mkdocs
|
pip install mkdocs
|
||||||
mkdocs build
|
mkdocs build
|
||||||
|
|
||||||
- name: Slack Notification
|
- name: Discord notification
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: error
|
||||||
job_name: '*Freqtrade Docs*'
|
details: Freqtrade doc test failed!
|
||||||
channel: '#notifications'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
cleanup-prior-runs:
|
cleanup-prior-runs:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@ -306,7 +295,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
# Notify on slack only once - when CI completes (and after deploy) in case it's successfull
|
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||||
notify-complete:
|
notify-complete:
|
||||||
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
@ -320,14 +309,13 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Slack Notification
|
- name: Discord notification
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: always() && steps.check.outputs.has-permission && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: always() && steps.check.outputs.has-permission && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: info
|
||||||
job_name: '*Freqtrade CI*'
|
details: Test Completed!
|
||||||
channel: '#notifications'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
||||||
@ -400,17 +388,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
build_helpers/publish_docker_multi.sh
|
build_helpers/publish_docker_multi.sh
|
||||||
|
|
||||||
|
- name: Discord notification
|
||||||
- name: Slack Notification
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
uses: lazy-actions/slatify@v3.0.0
|
|
||||||
if: always() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: always() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
with:
|
with:
|
||||||
type: ${{ job.status }}
|
severity: info
|
||||||
job_name: '*Freqtrade CI Deploy*'
|
details: Deploy Succeeded!
|
||||||
mention: 'here'
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
mention_if: 'failure'
|
|
||||||
channel: '#notifications'
|
|
||||||
url: ${{ secrets.SLACK_WEBHOOK }}
|
|
||||||
|
|
||||||
|
|
||||||
deploy_arm:
|
deploy_arm:
|
||||||
|
55
.travis.yml
55
.travis.yml
@ -1,55 +0,0 @@
|
|||||||
os:
|
|
||||||
- linux
|
|
||||||
dist: bionic
|
|
||||||
language: python
|
|
||||||
python:
|
|
||||||
- 3.8
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- IMAGE_NAME=freqtradeorg/freqtrade
|
|
||||||
install:
|
|
||||||
- cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies; cd ..
|
|
||||||
- export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
|
||||||
- export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
|
||||||
- export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
|
||||||
- pip install -r requirements-dev.txt
|
|
||||||
- pip install -e .
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
include:
|
|
||||||
- stage: tests
|
|
||||||
script:
|
|
||||||
- pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
|
||||||
# Allow failure for coveralls
|
|
||||||
# - coveralls || true
|
|
||||||
name: pytest
|
|
||||||
- script:
|
|
||||||
- cp config_examples/config_bittrex.example.json config.json
|
|
||||||
- freqtrade create-userdir --userdir user_data
|
|
||||||
- freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
|
|
||||||
name: backtest
|
|
||||||
- script:
|
|
||||||
- cp config_examples/config_bittrex.example.json config.json
|
|
||||||
- freqtrade create-userdir --userdir user_data
|
|
||||||
- freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily
|
|
||||||
name: hyperopt
|
|
||||||
- script: flake8
|
|
||||||
name: flake8
|
|
||||||
- script:
|
|
||||||
# Test Documentation boxes -
|
|
||||||
# !!! <TYPE>: is not allowed!
|
|
||||||
# !!! <TYPE> "title" - Title needs to be quoted!
|
|
||||||
- grep -Er '^!{3}\s\S+:|^!{3}\s\S+\s[^"]' docs/*; test $? -ne 0
|
|
||||||
name: doc syntax
|
|
||||||
- script: mypy freqtrade scripts
|
|
||||||
name: mypy
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
slack:
|
|
||||||
secure: bKLXmOrx8e2aPZl7W8DA5BdPAXWGpI5UzST33oc1G/thegXcDVmHBTJrBs4sZak6bgAclQQrdZIsRd2eFYzHLalJEaw6pk7hoAw8SvLnZO0ZurWboz7qg2+aZZXfK4eKl/VUe4sM9M4e/qxjkK+yWG7Marg69c4v1ypF7ezUi1fPYILYw8u0paaiX0N5UX8XNlXy+PBlga2MxDjUY70MuajSZhPsY2pDUvYnMY1D/7XN3cFW0g+3O8zXjF0IF4q1Z/1ASQe+eYjKwPQacE+O8KDD+ZJYoTOFBAPllrtpO1jnOPFjNGf3JIbVMZw4bFjIL0mSQaiSUaUErbU3sFZ5Or79rF93XZ81V7uEZ55vD8KMfR2CB1cQJcZcj0v50BxLo0InkFqa0Y8Nra3sbpV4fV5Oe8pDmomPJrNFJnX6ULQhQ1gTCe0M5beKgVms5SITEpt4/Y0CmLUr6iHDT0CUiyMIRWAXdIgbGh1jfaWOMksybeRevlgDsIsNBjXmYI1Sw2ZZR2Eo2u4R6zyfyjOMLwYJ3vgq9IrACv2w5nmf0+oguMWHf6iWi2hiOqhlAN1W74+3HsYQcqnuM3LGOmuCnPprV1oGBqkPXjIFGpy21gNx4vHfO1noLUyJnMnlu2L7SSuN1CdLsnjJ1hVjpJjPfqB4nn8g12x87TqM1bOm+3Q=
|
|
||||||
cache:
|
|
||||||
pip: True
|
|
||||||
directories:
|
|
||||||
- $HOME/dependencies
|
|
@ -1,4 +1,4 @@
|
|||||||
FROM python:3.9.9-slim-bullseye as base
|
FROM python:3.10.0-slim-bullseye as base
|
||||||
|
|
||||||
# Setup env
|
# Setup env
|
||||||
ENV LANG C.UTF-8
|
ENV LANG C.UTF-8
|
||||||
|
@ -197,7 +197,7 @@ To run this bot we recommend you a cloud instance with a minimum of:
|
|||||||
|
|
||||||
### Software requirements
|
### Software requirements
|
||||||
|
|
||||||
- [Python 3.7.x](http://docs.python-guide.org/en/latest/starting/installation/)
|
- [Python >= 3.7](http://docs.python-guide.org/en/latest/starting/installation/)
|
||||||
- [pip](https://pip.pypa.io/en/stable/installing/)
|
- [pip](https://pip.pypa.io/en/stable/installing/)
|
||||||
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||||
- [TA-Lib](https://mrjbq7.github.io/ta-lib/install.html)
|
- [TA-Lib](https://mrjbq7.github.io/ta-lib/install.html)
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.23-cp310-cp310-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.23-cp310-cp310-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.23-cp37-cp37m-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.23-cp37-cp37m-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.23-cp38-cp38-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.23-cp38-cp38-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.23-cp39-cp39-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.23-cp39-cp39-win_amd64.whl
Normal file
Binary file not shown.
@ -1,19 +1,21 @@
|
|||||||
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
||||||
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
||||||
|
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip wheel
|
||||||
|
|
||||||
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||||
|
|
||||||
if ($pyv -eq '3.7') {
|
if ($pyv -eq '3.7') {
|
||||||
pip install build_helpers\TA_Lib-0.4.22-cp37-cp37m-win_amd64.whl
|
pip install build_helpers\TA_Lib-0.4.23-cp37-cp37m-win_amd64.whl
|
||||||
}
|
}
|
||||||
if ($pyv -eq '3.8') {
|
if ($pyv -eq '3.8') {
|
||||||
pip install build_helpers\TA_Lib-0.4.22-cp38-cp38-win_amd64.whl
|
pip install build_helpers\TA_Lib-0.4.23-cp38-cp38-win_amd64.whl
|
||||||
}
|
}
|
||||||
if ($pyv -eq '3.9') {
|
if ($pyv -eq '3.9') {
|
||||||
pip install build_helpers\TA_Lib-0.4.22-cp39-cp39-win_amd64.whl
|
pip install build_helpers\TA_Lib-0.4.23-cp39-cp39-win_amd64.whl
|
||||||
|
}
|
||||||
|
if ($pyv -eq '3.10') {
|
||||||
|
pip install build_helpers\TA_Lib-0.4.23-cp310-cp310-win_amd64.whl
|
||||||
}
|
}
|
||||||
|
|
||||||
pip install -r requirements-dev.txt
|
pip install -r requirements-dev.txt
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
"sell_profit_only": false,
|
"sell_profit_only": false,
|
||||||
"sell_profit_offset": 0.0,
|
"sell_profit_offset": 0.0,
|
||||||
"ignore_roi_if_buy_signal": false,
|
"ignore_roi_if_buy_signal": false,
|
||||||
|
"ignore_buying_expired_candle_after": 300,
|
||||||
"minimal_roi": {
|
"minimal_roi": {
|
||||||
"40": 0.0,
|
"40": 0.0,
|
||||||
"30": 0.01,
|
"30": 0.01,
|
||||||
|
@ -13,7 +13,7 @@ A sample of this can be found below, which is identical to the Default Hyperopt
|
|||||||
|
|
||||||
``` python
|
``` python
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
@ -176,12 +176,15 @@ Log messages are send to `syslog` with the `user` facility. So you can see them
|
|||||||
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfile syslog` or `--logfile journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.
|
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfile syslog` or `--logfile journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.
|
||||||
|
|
||||||
For `rsyslog` the messages from the bot can be redirected into a separate dedicated log file. To achieve this, add
|
For `rsyslog` the messages from the bot can be redirected into a separate dedicated log file. To achieve this, add
|
||||||
|
|
||||||
```
|
```
|
||||||
if $programname startswith "freqtrade" then -/var/log/freqtrade.log
|
if $programname startswith "freqtrade" then -/var/log/freqtrade.log
|
||||||
```
|
```
|
||||||
|
|
||||||
to one of the rsyslog configuration files, for example at the end of the `/etc/rsyslog.d/50-default.conf`.
|
to one of the rsyslog configuration files, for example at the end of the `/etc/rsyslog.d/50-default.conf`.
|
||||||
|
|
||||||
For `syslog` (`rsyslog`), the reduction mode can be switched on. This will reduce the number of repeating messages. For instance, multiple bot Heartbeat messages will be reduced to a single message when nothing else happens with the bot. To achieve this, set in `/etc/rsyslog.conf`:
|
For `syslog` (`rsyslog`), the reduction mode can be switched on. This will reduce the number of repeating messages. For instance, multiple bot Heartbeat messages will be reduced to a single message when nothing else happens with the bot. To achieve this, set in `/etc/rsyslog.conf`:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Filter duplicated messages
|
# Filter duplicated messages
|
||||||
$RepeatedMsgReduction on
|
$RepeatedMsgReduction on
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 121 KiB After Width: | Height: | Size: 143 KiB |
@ -312,7 +312,7 @@ A backtesting result will look like that:
|
|||||||
| | |
|
| | |
|
||||||
| Min balance | 0.00945123 BTC |
|
| Min balance | 0.00945123 BTC |
|
||||||
| Max balance | 0.01846651 BTC |
|
| Max balance | 0.01846651 BTC |
|
||||||
| Drawdown | 50.63% |
|
| Drawdown (Account) | 13.33% |
|
||||||
| Drawdown | 0.0015 BTC |
|
| Drawdown | 0.0015 BTC |
|
||||||
| Drawdown high | 0.0013 BTC |
|
| Drawdown high | 0.0013 BTC |
|
||||||
| Drawdown low | -0.0002 BTC |
|
| Drawdown low | -0.0002 BTC |
|
||||||
@ -399,7 +399,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
| | |
|
| | |
|
||||||
| Min balance | 0.00945123 BTC |
|
| Min balance | 0.00945123 BTC |
|
||||||
| Max balance | 0.01846651 BTC |
|
| Max balance | 0.01846651 BTC |
|
||||||
| Drawdown | 50.63% |
|
| Drawdown (Account) | 13.33% |
|
||||||
| Drawdown | 0.0015 BTC |
|
| Drawdown | 0.0015 BTC |
|
||||||
| Drawdown high | 0.0013 BTC |
|
| Drawdown high | 0.0013 BTC |
|
||||||
| Drawdown low | -0.0002 BTC |
|
| Drawdown low | -0.0002 BTC |
|
||||||
@ -426,7 +426,8 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
||||||
- `Rejected Buy signals`: Buy signals that could not be acted upon due to max_open_trades being reached.
|
- `Rejected Buy signals`: Buy signals that could not be acted upon due to max_open_trades being reached.
|
||||||
- `Min balance` / `Max balance`: Lowest and Highest Wallet balance during the backtest period.
|
- `Min balance` / `Max balance`: Lowest and Highest Wallet balance during the backtest period.
|
||||||
- `Drawdown`: Maximum drawdown experienced. For example, the value of 50% means that from highest to subsequent lowest point, a 50% drop was experienced).
|
- `Drawdown (Account)`: Maximum Account Drawdown experienced. Calculated as $(Absolute Drawdown) / (DrawdownHigh + startingBalance)$.
|
||||||
|
- `Drawdown`: Maximum, absolute drawdown experienced. Difference between Drawdown High and Subsequent Low point.
|
||||||
- `Drawdown high` / `Drawdown low`: Profit at the beginning and end of the largest drawdown period. A negative low value means initial capital lost.
|
- `Drawdown high` / `Drawdown low`: Profit at the beginning and end of the largest drawdown period. A negative low value means initial capital lost.
|
||||||
- `Drawdown Start` / `Drawdown End`: Start and end datetime for this largest drawdown (can also be visualized via the `plot-dataframe` sub-command).
|
- `Drawdown Start` / `Drawdown End`: Start and end datetime for this largest drawdown (can also be visualized via the `plot-dataframe` sub-command).
|
||||||
- `Market change`: Change of the market during the backtest period. Calculated as average of all pairs changes from the first to the last candle using the "close" column.
|
- `Market change`: Change of the market during the backtest period. Calculated as average of all pairs changes from the first to the last candle using the "close" column.
|
||||||
@ -484,8 +485,8 @@ Since backtesting lacks some detailed information about what happens within a ca
|
|||||||
- ROI applies before trailing-stop, ensuring profits are "top-capped" at ROI if both ROI and trailing stop applies
|
- ROI applies before trailing-stop, ensuring profits are "top-capped" at ROI if both ROI and trailing stop applies
|
||||||
- Sell-reason does not explain if a trade was positive or negative, just what triggered the sell (this can look odd if negative ROI values are used)
|
- Sell-reason does not explain if a trade was positive or negative, just what triggered the sell (this can look odd if negative ROI values are used)
|
||||||
- Evaluation sequence (if multiple signals happen on the same candle)
|
- Evaluation sequence (if multiple signals happen on the same candle)
|
||||||
- ROI (if not stoploss)
|
|
||||||
- Sell-signal
|
- Sell-signal
|
||||||
|
- ROI (if not stoploss)
|
||||||
- Stoploss
|
- Stoploss
|
||||||
|
|
||||||
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
|
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
|
||||||
|
@ -15,8 +15,8 @@ This command line option was deprecated in 2019.7-dev (develop branch) and remov
|
|||||||
|
|
||||||
### The **--dynamic-whitelist** command line option
|
### The **--dynamic-whitelist** command line option
|
||||||
|
|
||||||
This command line option was deprecated in 2018 and removed freqtrade 2019.6-dev (develop branch)
|
This command line option was deprecated in 2018 and removed freqtrade 2019.6-dev (develop branch) and in freqtrade 2019.7.
|
||||||
and in freqtrade 2019.7.
|
Please refer to [pairlists](plugins.md#pairlists-and-pairlist-handlers) instead.
|
||||||
|
|
||||||
### the `--live` command line option
|
### the `--live` command line option
|
||||||
|
|
||||||
|
@ -324,9 +324,8 @@ jupyter nbconvert --ClearOutputPreprocessor.enabled=True --to markdown freqtrade
|
|||||||
This documents some decisions taken for the CI Pipeline.
|
This documents some decisions taken for the CI Pipeline.
|
||||||
|
|
||||||
* CI runs on all OS variants, Linux (ubuntu), macOS and Windows.
|
* CI runs on all OS variants, Linux (ubuntu), macOS and Windows.
|
||||||
* Docker images are build for the branches `stable` and `develop`.
|
* Docker images are build for the branches `stable` and `develop`, and are built as multiarch builds, supporting multiple platforms via the same tag.
|
||||||
* Docker images containing Plot dependencies are also available as `stable_plot` and `develop_plot`.
|
* Docker images containing Plot dependencies are also available as `stable_plot` and `develop_plot`.
|
||||||
* Raspberry PI Docker images are postfixed with `_pi` - so tags will be `:stable_pi` and `develop_pi`.
|
|
||||||
* Docker images contain a file, `/freqtrade/freqtrade_commit` containing the commit this image is based of.
|
* Docker images contain a file, `/freqtrade/freqtrade_commit` containing the commit this image is based of.
|
||||||
* Full docker image rebuilds are run once a week via schedule.
|
* Full docker image rebuilds are run once a week via schedule.
|
||||||
* Deployments run on ubuntu.
|
* Deployments run on ubuntu.
|
||||||
|
@ -188,12 +188,12 @@ There is however nothing preventing you from using GPU-enabled indicators within
|
|||||||
Per default Hyperopt called without the `-e`/`--epochs` command line option will only
|
Per default Hyperopt called without the `-e`/`--epochs` command line option will only
|
||||||
run 100 epochs, means 100 evaluations of your triggers, guards, ... Too few
|
run 100 epochs, means 100 evaluations of your triggers, guards, ... Too few
|
||||||
to find a great result (unless if you are very lucky), so you probably
|
to find a great result (unless if you are very lucky), so you probably
|
||||||
have to run it for 10.000 or more. But it will take an eternity to
|
have to run it for 10000 or more. But it will take an eternity to
|
||||||
compute.
|
compute.
|
||||||
|
|
||||||
Since hyperopt uses Bayesian search, running for too many epochs may not produce greater results.
|
Since hyperopt uses Bayesian search, running for too many epochs may not produce greater results.
|
||||||
|
|
||||||
It's therefore recommended to run between 500-1000 epochs over and over until you hit at least 10.000 epochs in total (or are satisfied with the result). You can best judge by looking at the results - if the bot keeps discovering better strategies, it's best to keep on going.
|
It's therefore recommended to run between 500-1000 epochs over and over until you hit at least 10000 epochs in total (or are satisfied with the result). You can best judge by looking at the results - if the bot keeps discovering better strategies, it's best to keep on going.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade hyperopt --hyperopt-loss SharpeHyperOptLossDaily --strategy SampleStrategy -e 1000
|
freqtrade hyperopt --hyperopt-loss SharpeHyperOptLossDaily --strategy SampleStrategy -e 1000
|
||||||
@ -217,9 +217,9 @@ already 8\*10^9\*10 evaluations. A roughly total of 80 billion evaluations.
|
|||||||
Did you run 100 000 evaluations? Congrats, you've done roughly 1 / 100 000 th
|
Did you run 100 000 evaluations? Congrats, you've done roughly 1 / 100 000 th
|
||||||
of the search space, assuming that the bot never tests the same parameters more than once.
|
of the search space, assuming that the bot never tests the same parameters more than once.
|
||||||
|
|
||||||
* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 10.0000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades.
|
* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 100000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades.
|
||||||
|
|
||||||
Example: 4% profit 650 times vs 0,3% profit a trade 10.000 times in a year. If we assume you set the --timerange to 365 days.
|
Example: 4% profit 650 times vs 0,3% profit a trade 10000 times in a year. If we assume you set the --timerange to 365 days.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
`freqtrade --config config.json --strategy SampleStrategy --hyperopt SampleHyperopt -e 1000 --timerange 20190601-20200601`
|
`freqtrade --config config.json --strategy SampleStrategy --hyperopt SampleHyperopt -e 1000 --timerange 20190601-20200601`
|
||||||
|
@ -56,10 +56,6 @@ OS Specific steps are listed first, the [Common](#common) section below is neces
|
|||||||
!!! Note
|
!!! Note
|
||||||
Python3.7 or higher and the corresponding pip are assumed to be available.
|
Python3.7 or higher and the corresponding pip are assumed to be available.
|
||||||
|
|
||||||
!!! Warning "Python 3.10 support"
|
|
||||||
Due to issues with dependencies, freqtrade is currently unable to support python 3.10.
|
|
||||||
We're working on supporting python 3.10, are however dependant on support from dependencies.
|
|
||||||
|
|
||||||
=== "Debian/Ubuntu"
|
=== "Debian/Ubuntu"
|
||||||
#### Install necessary dependencies
|
#### Install necessary dependencies
|
||||||
|
|
||||||
@ -424,16 +420,3 @@ open /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10
|
|||||||
```
|
```
|
||||||
|
|
||||||
If this file is inexistent, then you're probably on a different version of MacOS, so you may need to consult the internet for specific resolution details.
|
If this file is inexistent, then you're probably on a different version of MacOS, so you may need to consult the internet for specific resolution details.
|
||||||
|
|
||||||
### MacOS installation error with python 3.9
|
|
||||||
|
|
||||||
When using python 3.9 on macOS, it's currently necessary to install some os-level modules to allow dependencies to compile.
|
|
||||||
The errors you'll see happen during installation and are related to the installation of `tables` or `blosc`.
|
|
||||||
|
|
||||||
You can install the necessary libraries with the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
brew install hdf5 c-blosc
|
|
||||||
```
|
|
||||||
|
|
||||||
After this, please run the installation (script) again.
|
|
||||||
|
@ -283,6 +283,8 @@ The `plot-profit` subcommand shows an interactive graph with three plots:
|
|||||||
* The summarized profit made by backtesting.
|
* The summarized profit made by backtesting.
|
||||||
Note that this is not the real-world profit, but more of an estimate.
|
Note that this is not the real-world profit, but more of an estimate.
|
||||||
* Profit for each individual pair.
|
* Profit for each individual pair.
|
||||||
|
* Parallelism of trades.
|
||||||
|
* Underwater (Periods of drawdown).
|
||||||
|
|
||||||
The first graph is good to get a grip of how the overall market progresses.
|
The first graph is good to get a grip of how the overall market progresses.
|
||||||
|
|
||||||
@ -292,6 +294,8 @@ This graph will also highlight the start (and end) of the Max drawdown period.
|
|||||||
|
|
||||||
The third graph can be useful to spot outliers, events in pairs that cause profit spikes.
|
The third graph can be useful to spot outliers, events in pairs that cause profit spikes.
|
||||||
|
|
||||||
|
The forth graph can help you analyze trade parallelism, showing how often max_open_trades have been maxed out.
|
||||||
|
|
||||||
Possible options for the `freqtrade plot-profit` subcommand:
|
Possible options for the `freqtrade plot-profit` subcommand:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
mkdocs==1.2.3
|
mkdocs==1.2.3
|
||||||
mkdocs-material==8.1.0
|
mkdocs-material==8.1.4
|
||||||
mdx_truly_sane_lists==1.2
|
mdx_truly_sane_lists==1.2
|
||||||
pymdown-extensions==9.1
|
pymdown-extensions==9.1
|
||||||
|
@ -222,9 +222,9 @@ should be rewritten to
|
|||||||
```python
|
```python
|
||||||
frames = [dataframe]
|
frames = [dataframe]
|
||||||
for val in self.buy_ema_short.range:
|
for val in self.buy_ema_short.range:
|
||||||
frames.append({
|
frames.append(DataFrame({
|
||||||
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
|
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
|
||||||
})
|
}))
|
||||||
|
|
||||||
# Append columns to existing dataframe
|
# Append columns to existing dataframe
|
||||||
merged_frame = pd.concat(frames, axis=1)
|
merged_frame = pd.concat(frames, axis=1)
|
||||||
|
@ -23,9 +23,9 @@ git clone https://github.com/freqtrade/freqtrade.git
|
|||||||
|
|
||||||
Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7/ta-lib#windows).
|
Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7/ta-lib#windows).
|
||||||
|
|
||||||
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial pre-compiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which need to be downloaded and installed using `pip install TA_Lib‑0.4.22‑cp38‑cp38‑win_amd64.whl` (make sure to use the version matching your python version).
|
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial pre-compiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which need to be downloaded and installed using `pip install TA_Lib-0.4.23-cp38-cp38-win_amd64.whl` (make sure to use the version matching your python version).
|
||||||
|
|
||||||
Freqtrade provides these dependencies for the latest 3 Python versions (3.7, 3.8 and 3.9) and for 64bit Windows.
|
Freqtrade provides these dependencies for the latest 3 Python versions (3.7, 3.8, 3.9 and 3.10) and for 64bit Windows.
|
||||||
Other versions must be downloaded from the above link.
|
Other versions must be downloaded from the above link.
|
||||||
|
|
||||||
``` powershell
|
``` powershell
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from cachetools.ttl import TTLCache
|
from cachetools import TTLCache
|
||||||
|
|
||||||
|
|
||||||
class PeriodicCache(TTLCache):
|
class PeriodicCache(TTLCache):
|
||||||
|
@ -9,21 +9,13 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.misc import json_load
|
from freqtrade.misc import json_load
|
||||||
from freqtrade.persistence import LocalTrade, Trade, init_db
|
from freqtrade.persistence import LocalTrade, Trade, init_db
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Old format - maybe remove?
|
|
||||||
BT_DATA_COLUMNS_OLD = ["pair", "profit_percent", "open_date", "close_date", "index",
|
|
||||||
"trade_duration", "open_rate", "close_rate", "open_at_end", "sell_reason"]
|
|
||||||
|
|
||||||
# Mid-term format, created by BacktestResult Named Tuple
|
|
||||||
BT_DATA_COLUMNS_MID = ['pair', 'profit_percent', 'open_date', 'close_date', 'trade_duration',
|
|
||||||
'open_rate', 'close_rate', 'open_at_end', 'sell_reason', 'fee_open',
|
|
||||||
'fee_close', 'amount', 'profit_abs', 'profit_ratio']
|
|
||||||
|
|
||||||
# Newest format
|
# Newest format
|
||||||
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date',
|
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date',
|
||||||
'open_rate', 'close_rate',
|
'open_rate', 'close_rate',
|
||||||
@ -167,23 +159,9 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# old format - only with lists.
|
# old format - only with lists.
|
||||||
df = pd.DataFrame(data, columns=BT_DATA_COLUMNS_OLD)
|
raise OperationalException(
|
||||||
if not df.empty:
|
"Backtest-results with only trades data are no longer supported.")
|
||||||
df['open_date'] = pd.to_datetime(df['open_date'],
|
|
||||||
unit='s',
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True
|
|
||||||
)
|
|
||||||
df['close_date'] = pd.to_datetime(df['close_date'],
|
|
||||||
unit='s',
|
|
||||||
utc=True,
|
|
||||||
infer_datetime_format=True
|
|
||||||
)
|
|
||||||
# Create compatibility with new format
|
|
||||||
df['profit_abs'] = df['close_rate'] - df['open_rate']
|
|
||||||
if not df.empty:
|
if not df.empty:
|
||||||
if 'profit_ratio' not in df.columns:
|
|
||||||
df['profit_ratio'] = df['profit_percent']
|
|
||||||
df = df.sort_values("open_date").reset_index(drop=True)
|
df = df.sort_values("open_date").reset_index(drop=True)
|
||||||
return df
|
return df
|
||||||
|
|
||||||
@ -325,6 +303,7 @@ def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame],
|
|||||||
:param column: Column in the original dataframes to use
|
:param column: Column in the original dataframes to use
|
||||||
:return: DataFrame with the column renamed to the dict key, and a column
|
:return: DataFrame with the column renamed to the dict key, and a column
|
||||||
named mean, containing the mean of all pairs.
|
named mean, containing the mean of all pairs.
|
||||||
|
:raise: ValueError if no data is provided.
|
||||||
"""
|
"""
|
||||||
df_comb = pd.concat([data[pair].set_index('date').rename(
|
df_comb = pd.concat([data[pair].set_index('date').rename(
|
||||||
{column: pair}, axis=1)[pair] for pair in data], axis=1)
|
{column: pair}, axis=1)[pair] for pair in data], axis=1)
|
||||||
@ -360,9 +339,19 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
|
|||||||
return df
|
return df
|
||||||
|
|
||||||
|
|
||||||
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str
|
||||||
value_col: str = 'profit_ratio'
|
) -> pd.DataFrame:
|
||||||
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float]:
|
max_drawdown_df = pd.DataFrame()
|
||||||
|
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
|
||||||
|
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
|
||||||
|
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
|
||||||
|
max_drawdown_df['date'] = profit_results.loc[:, date_col]
|
||||||
|
return max_drawdown_df
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||||
|
value_col: str = 'profit_ratio'
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Calculate max drawdown and the corresponding close dates
|
Calculate max drawdown and the corresponding close dates
|
||||||
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
||||||
@ -375,10 +364,29 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date'
|
|||||||
if len(trades) == 0:
|
if len(trades) == 0:
|
||||||
raise ValueError("Trade dataframe empty.")
|
raise ValueError("Trade dataframe empty.")
|
||||||
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||||
max_drawdown_df = pd.DataFrame()
|
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||||
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
|
|
||||||
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
|
return max_drawdown_df
|
||||||
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
|
|
||||||
|
|
||||||
|
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||||
|
value_col: str = 'profit_abs', starting_balance: float = 0
|
||||||
|
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:
|
||||||
|
"""
|
||||||
|
Calculate max drawdown and the corresponding close dates
|
||||||
|
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
||||||
|
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
|
||||||
|
:param value_col: Column in DataFrame to use for values (defaults to 'profit_abs')
|
||||||
|
:param starting_balance: Portfolio starting balance - properly calculate relative drawdown.
|
||||||
|
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown)
|
||||||
|
with absolute max drawdown, high and low time and high and low value,
|
||||||
|
and the relative account drawdown
|
||||||
|
:raise: ValueError if trade-dataframe was found empty.
|
||||||
|
"""
|
||||||
|
if len(trades) == 0:
|
||||||
|
raise ValueError("Trade dataframe empty.")
|
||||||
|
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||||
|
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||||
|
|
||||||
idxmin = max_drawdown_df['drawdown'].idxmin()
|
idxmin = max_drawdown_df['drawdown'].idxmin()
|
||||||
if idxmin == 0:
|
if idxmin == 0:
|
||||||
@ -388,7 +396,18 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date'
|
|||||||
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
|
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
|
||||||
['high_value'].idxmax(), 'cumulative']
|
['high_value'].idxmax(), 'cumulative']
|
||||||
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
|
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
|
||||||
return abs(min(max_drawdown_df['drawdown'])), high_date, low_date, high_val, low_val
|
max_drawdown_rel = 0.0
|
||||||
|
if high_val + starting_balance != 0:
|
||||||
|
max_drawdown_rel = (high_val - low_val) / (high_val + starting_balance)
|
||||||
|
|
||||||
|
return (
|
||||||
|
abs(min(max_drawdown_df['drawdown'])),
|
||||||
|
high_date,
|
||||||
|
low_date,
|
||||||
|
high_val,
|
||||||
|
low_val,
|
||||||
|
max_drawdown_rel
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[float, float]:
|
def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[float, float]:
|
||||||
|
@ -201,7 +201,7 @@ class IDataHandler(ABC):
|
|||||||
enddate = pairdf.iloc[-1]['date']
|
enddate = pairdf.iloc[-1]['date']
|
||||||
|
|
||||||
if timerange_startup:
|
if timerange_startup:
|
||||||
self._validate_pairdata(pair, pairdf, timerange_startup)
|
self._validate_pairdata(pair, pairdf, timeframe, timerange_startup)
|
||||||
pairdf = trim_dataframe(pairdf, timerange_startup)
|
pairdf = trim_dataframe(pairdf, timerange_startup)
|
||||||
if self._check_empty_df(pairdf, pair, timeframe, warn_no_data):
|
if self._check_empty_df(pairdf, pair, timeframe, warn_no_data):
|
||||||
return pairdf
|
return pairdf
|
||||||
@ -228,7 +228,7 @@ class IDataHandler(ABC):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _validate_pairdata(self, pair, pairdata: DataFrame, timerange: TimeRange):
|
def _validate_pairdata(self, pair, pairdata: DataFrame, timeframe: str, timerange: TimeRange):
|
||||||
"""
|
"""
|
||||||
Validates pairdata for missing data at start end end and logs warnings.
|
Validates pairdata for missing data at start end end and logs warnings.
|
||||||
:param pairdata: Dataframe to validate
|
:param pairdata: Dataframe to validate
|
||||||
@ -238,12 +238,12 @@ class IDataHandler(ABC):
|
|||||||
if timerange.starttype == 'date':
|
if timerange.starttype == 'date':
|
||||||
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
||||||
if pairdata.iloc[0]['date'] > start:
|
if pairdata.iloc[0]['date'] > start:
|
||||||
logger.warning(f"Missing data at start for pair {pair}, "
|
logger.warning(f"Missing data at start for pair {pair} at {timeframe}, "
|
||||||
f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}")
|
f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}")
|
||||||
if timerange.stoptype == 'date':
|
if timerange.stoptype == 'date':
|
||||||
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
||||||
if pairdata.iloc[-1]['date'] < stop:
|
if pairdata.iloc[-1]['date'] < stop:
|
||||||
logger.warning(f"Missing data at end for pair {pair}, "
|
logger.warning(f"Missing data at end for pair {pair} at {timeframe}, "
|
||||||
f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}")
|
f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ from freqtrade.exchange.exchange import Exchange
|
|||||||
# isort: on
|
# isort: on
|
||||||
from freqtrade.exchange.bibox import Bibox
|
from freqtrade.exchange.bibox import Bibox
|
||||||
from freqtrade.exchange.binance import Binance
|
from freqtrade.exchange.binance import Binance
|
||||||
|
from freqtrade.exchange.bitpanda import Bitpanda
|
||||||
from freqtrade.exchange.bittrex import Bittrex
|
from freqtrade.exchange.bittrex import Bittrex
|
||||||
from freqtrade.exchange.bybit import Bybit
|
from freqtrade.exchange.bybit import Bybit
|
||||||
from freqtrade.exchange.coinbasepro import Coinbasepro
|
from freqtrade.exchange.coinbasepro import Coinbasepro
|
||||||
|
37
freqtrade/exchange/bitpanda.py
Normal file
37
freqtrade/exchange/bitpanda.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
""" Bitpanda exchange subclass """
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
from freqtrade.exchange import Exchange
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Bitpanda(Exchange):
|
||||||
|
"""
|
||||||
|
Bitpanda exchange class. Contains adjustments needed for Freqtrade to work
|
||||||
|
with this exchange.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_trades_for_order(self, order_id: str, pair: str, since: datetime,
|
||||||
|
params: Optional[Dict] = None) -> List:
|
||||||
|
"""
|
||||||
|
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.
|
||||||
|
The "since" argument passed in is coming from the database and is in UTC,
|
||||||
|
as timezone-native datetime object.
|
||||||
|
From the python documentation:
|
||||||
|
> Naive datetime instances are assumed to represent local time
|
||||||
|
Therefore, calling "since.timestamp()" will get the UTC timestamp, after applying the
|
||||||
|
transformation from local timezone to UTC.
|
||||||
|
This works for timezones UTC+ since then the result will contain trades from a few hours
|
||||||
|
instead of from the last 5 seconds, however fails for UTC- timezones,
|
||||||
|
since we're then asking for trades with a "since" argument in the future.
|
||||||
|
|
||||||
|
:param order_id order_id: Order-id as given when creating the order
|
||||||
|
:param pair: Pair the order is for
|
||||||
|
:param since: datetime object of the order creation time. Assumes object is in UTC.
|
||||||
|
"""
|
||||||
|
params = {'to': int(datetime.now(timezone.utc).timestamp() * 1000)}
|
||||||
|
return super().get_trades_for_order(order_id, pair, since, params)
|
@ -4,9 +4,20 @@ import time
|
|||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
|
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
|
||||||
|
from freqtrade.mixins import LoggingMixin
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
__logging_mixin = None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_logging_mixin():
|
||||||
|
# Logging-mixin to cache kucoin responses
|
||||||
|
# Only to be used in retrier
|
||||||
|
global __logging_mixin
|
||||||
|
if not __logging_mixin:
|
||||||
|
__logging_mixin = LoggingMixin(logger)
|
||||||
|
return __logging_mixin
|
||||||
|
|
||||||
|
|
||||||
# Maximum default retry count.
|
# Maximum default retry count.
|
||||||
@ -72,28 +83,33 @@ def calculate_backoff(retrycount, max_retries):
|
|||||||
def retrier_async(f):
|
def retrier_async(f):
|
||||||
async def wrapper(*args, **kwargs):
|
async def wrapper(*args, **kwargs):
|
||||||
count = kwargs.pop('count', API_RETRY_COUNT)
|
count = kwargs.pop('count', API_RETRY_COUNT)
|
||||||
|
kucoin = args[0].name == "Kucoin" # Check if the exchange is KuCoin.
|
||||||
try:
|
try:
|
||||||
return await f(*args, **kwargs)
|
return await f(*args, **kwargs)
|
||||||
except TemporaryError as ex:
|
except TemporaryError as ex:
|
||||||
logger.warning('%s() returned exception: "%s"', f.__name__, ex)
|
msg = f'{f.__name__}() returned exception: "{ex}". '
|
||||||
if count > 0:
|
if count > 0:
|
||||||
logger.warning('retrying %s() still for %s times', f.__name__, count)
|
msg += f'Retrying still for {count} times.'
|
||||||
count -= 1
|
count -= 1
|
||||||
kwargs.update({'count': count})
|
kwargs['count'] = count
|
||||||
if isinstance(ex, DDosProtection):
|
if isinstance(ex, DDosProtection):
|
||||||
if "kucoin" in str(ex) and "429000" in str(ex):
|
if kucoin and "429000" in str(ex):
|
||||||
# Temporary fix for 429000 error on kucoin
|
# Temporary fix for 429000 error on kucoin
|
||||||
# see https://github.com/freqtrade/freqtrade/issues/5700 for details.
|
# see https://github.com/freqtrade/freqtrade/issues/5700 for details.
|
||||||
logger.warning(
|
_get_logging_mixin().log_once(
|
||||||
f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. "
|
f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. "
|
||||||
f"{count} tries left before giving up")
|
f"{count} tries left before giving up", logmethod=logger.warning)
|
||||||
|
# Reset msg to avoid logging too many times.
|
||||||
|
msg = ''
|
||||||
else:
|
else:
|
||||||
backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
|
backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
|
||||||
logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
|
logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
|
||||||
await asyncio.sleep(backoff_delay)
|
await asyncio.sleep(backoff_delay)
|
||||||
|
if msg:
|
||||||
|
logger.warning(msg)
|
||||||
return await wrapper(*args, **kwargs)
|
return await wrapper(*args, **kwargs)
|
||||||
else:
|
else:
|
||||||
logger.warning('Giving up retrying: %s()', f.__name__)
|
logger.warning(msg + 'Giving up.')
|
||||||
raise ex
|
raise ex
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
@ -106,9 +122,9 @@ def retrier(_func=None, retries=API_RETRY_COUNT):
|
|||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
except (TemporaryError, RetryableOrderError) as ex:
|
except (TemporaryError, RetryableOrderError) as ex:
|
||||||
logger.warning('%s() returned exception: "%s"', f.__name__, ex)
|
msg = f'{f.__name__}() returned exception: "{ex}". '
|
||||||
if count > 0:
|
if count > 0:
|
||||||
logger.warning('retrying %s() still for %s times', f.__name__, count)
|
logger.warning(msg + f'Retrying still for {count} times.')
|
||||||
count -= 1
|
count -= 1
|
||||||
kwargs.update({'count': count})
|
kwargs.update({'count': count})
|
||||||
if isinstance(ex, (DDosProtection, RetryableOrderError)):
|
if isinstance(ex, (DDosProtection, RetryableOrderError)):
|
||||||
@ -118,7 +134,7 @@ def retrier(_func=None, retries=API_RETRY_COUNT):
|
|||||||
time.sleep(backoff_delay)
|
time.sleep(backoff_delay)
|
||||||
return wrapper(*args, **kwargs)
|
return wrapper(*args, **kwargs)
|
||||||
else:
|
else:
|
||||||
logger.warning('Giving up retrying: %s()', f.__name__)
|
logger.warning(msg + 'Giving up.')
|
||||||
raise ex
|
raise ex
|
||||||
return wrapper
|
return wrapper
|
||||||
# Support both @retrier and @retrier(retries=2) syntax
|
# Support both @retrier and @retrier(retries=2) syntax
|
||||||
|
@ -67,6 +67,8 @@ class Exchange:
|
|||||||
"ohlcv_params": {},
|
"ohlcv_params": {},
|
||||||
"ohlcv_candle_limit": 500,
|
"ohlcv_candle_limit": 500,
|
||||||
"ohlcv_partial_candle": True,
|
"ohlcv_partial_candle": True,
|
||||||
|
# Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency
|
||||||
|
"ohlcv_volume_currency": "base", # "base" or "quote"
|
||||||
"trades_pagination": "time", # Possible are "time" or "id"
|
"trades_pagination": "time", # Possible are "time" or "id"
|
||||||
"trades_pagination_arg": "since",
|
"trades_pagination_arg": "since",
|
||||||
"l2_limit_range": None,
|
"l2_limit_range": None,
|
||||||
@ -83,6 +85,8 @@ class Exchange:
|
|||||||
self._api: ccxt.Exchange = None
|
self._api: ccxt.Exchange = None
|
||||||
self._api_async: ccxt_async.Exchange = None
|
self._api_async: ccxt_async.Exchange = None
|
||||||
self._markets: Dict = {}
|
self._markets: Dict = {}
|
||||||
|
self.loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(self.loop)
|
||||||
|
|
||||||
self._config.update(config)
|
self._config.update(config)
|
||||||
|
|
||||||
@ -170,8 +174,10 @@ class Exchange:
|
|||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
logger.debug("Exchange object destroyed, closing async loop")
|
logger.debug("Exchange object destroyed, closing async loop")
|
||||||
if self._api_async and inspect.iscoroutinefunction(self._api_async.close):
|
if (self._api_async and inspect.iscoroutinefunction(self._api_async.close)
|
||||||
asyncio.get_event_loop().run_until_complete(self._api_async.close())
|
and self._api_async.session):
|
||||||
|
logger.info("Closing async ccxt session.")
|
||||||
|
self.loop.run_until_complete(self._api_async.close())
|
||||||
|
|
||||||
def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt,
|
def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt,
|
||||||
ccxt_kwargs: Dict = {}) -> ccxt.Exchange:
|
ccxt_kwargs: Dict = {}) -> ccxt.Exchange:
|
||||||
@ -326,7 +332,7 @@ class Exchange:
|
|||||||
def _load_async_markets(self, reload: bool = False) -> None:
|
def _load_async_markets(self, reload: bool = False) -> None:
|
||||||
try:
|
try:
|
||||||
if self._api_async:
|
if self._api_async:
|
||||||
asyncio.get_event_loop().run_until_complete(
|
self.loop.run_until_complete(
|
||||||
self._api_async.load_markets(reload=reload))
|
self._api_async.load_markets(reload=reload))
|
||||||
|
|
||||||
except (asyncio.TimeoutError, ccxt.BaseError) as e:
|
except (asyncio.TimeoutError, ccxt.BaseError) as e:
|
||||||
@ -652,7 +658,8 @@ class Exchange:
|
|||||||
max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage))
|
max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage))
|
||||||
|
|
||||||
remaining_amount = amount
|
remaining_amount = amount
|
||||||
filled_amount = 0
|
filled_amount = 0.0
|
||||||
|
book_entry_price = 0.0
|
||||||
for book_entry in ob[ob_type]:
|
for book_entry in ob[ob_type]:
|
||||||
book_entry_price = book_entry[0]
|
book_entry_price = book_entry[0]
|
||||||
book_entry_coin_volume = book_entry[1]
|
book_entry_coin_volume = book_entry[1]
|
||||||
@ -1091,7 +1098,8 @@ class Exchange:
|
|||||||
# Fee handling
|
# Fee handling
|
||||||
|
|
||||||
@retrier
|
@retrier
|
||||||
def get_trades_for_order(self, order_id: str, pair: str, since: datetime) -> List:
|
def get_trades_for_order(self, order_id: str, pair: str, since: datetime,
|
||||||
|
params: Optional[Dict] = None) -> List:
|
||||||
"""
|
"""
|
||||||
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.
|
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.
|
||||||
The "since" argument passed in is coming from the database and is in UTC,
|
The "since" argument passed in is coming from the database and is in UTC,
|
||||||
@ -1115,8 +1123,10 @@ class Exchange:
|
|||||||
try:
|
try:
|
||||||
# Allow 5s offset to catch slight time offsets (discovered in #1185)
|
# Allow 5s offset to catch slight time offsets (discovered in #1185)
|
||||||
# since needs to be int in milliseconds
|
# since needs to be int in milliseconds
|
||||||
|
_params = params if params else {}
|
||||||
my_trades = self._api.fetch_my_trades(
|
my_trades = self._api.fetch_my_trades(
|
||||||
pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000))
|
pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000),
|
||||||
|
params=_params)
|
||||||
matched_trades = [trade for trade in my_trades if trade['order'] == order_id]
|
matched_trades = [trade for trade in my_trades if trade['order'] == order_id]
|
||||||
|
|
||||||
self._log_exchange_response('get_trades_for_order', matched_trades)
|
self._log_exchange_response('get_trades_for_order', matched_trades)
|
||||||
@ -1224,7 +1234,7 @@ class Exchange:
|
|||||||
:param since_ms: Timestamp in milliseconds to get history from
|
:param since_ms: Timestamp in milliseconds to get history from
|
||||||
:return: List with candle (OHLCV) data
|
:return: List with candle (OHLCV) data
|
||||||
"""
|
"""
|
||||||
pair, timeframe, data = asyncio.get_event_loop().run_until_complete(
|
pair, timeframe, data = self.loop.run_until_complete(
|
||||||
self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe,
|
self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe,
|
||||||
since_ms=since_ms, is_new_pair=is_new_pair))
|
since_ms=since_ms, is_new_pair=is_new_pair))
|
||||||
logger.info(f"Downloaded data for {pair} with length {len(data)}.")
|
logger.info(f"Downloaded data for {pair} with length {len(data)}.")
|
||||||
@ -1326,8 +1336,10 @@ class Exchange:
|
|||||||
results_df = {}
|
results_df = {}
|
||||||
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
||||||
for input_coro in chunks(input_coroutines, 100):
|
for input_coro in chunks(input_coroutines, 100):
|
||||||
results = asyncio.get_event_loop().run_until_complete(
|
async def gather_stuff():
|
||||||
asyncio.gather(*input_coro, return_exceptions=True))
|
return await asyncio.gather(*input_coro, return_exceptions=True)
|
||||||
|
|
||||||
|
results = self.loop.run_until_complete(gather_stuff())
|
||||||
|
|
||||||
# handle caching
|
# handle caching
|
||||||
for res in results:
|
for res in results:
|
||||||
@ -1563,7 +1575,7 @@ class Exchange:
|
|||||||
if not self.exchange_has("fetchTrades"):
|
if not self.exchange_has("fetchTrades"):
|
||||||
raise OperationalException("This exchange does not support downloading Trades.")
|
raise OperationalException("This exchange does not support downloading Trades.")
|
||||||
|
|
||||||
return asyncio.get_event_loop().run_until_complete(
|
return self.loop.run_until_complete(
|
||||||
self._async_get_trade_history(pair=pair, since=since,
|
self._async_get_trade_history(pair=pair, since=since,
|
||||||
until=until, from_id=from_id))
|
until=until, from_id=from_id))
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ class Ftx(Exchange):
|
|||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"stoploss_on_exchange": True,
|
"stoploss_on_exchange": True,
|
||||||
"ohlcv_candle_limit": 1500,
|
"ohlcv_candle_limit": 1500,
|
||||||
|
"ohlcv_volume_currency": "quote",
|
||||||
}
|
}
|
||||||
|
|
||||||
def market_is_tradable(self, market: Dict[str, Any]) -> bool:
|
def market_is_tradable(self, market: Dict[str, Any]) -> bool:
|
||||||
|
@ -21,6 +21,7 @@ class Gateio(Exchange):
|
|||||||
|
|
||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"ohlcv_candle_limit": 1000,
|
"ohlcv_candle_limit": 1000,
|
||||||
|
"ohlcv_volume_currency": "quote",
|
||||||
}
|
}
|
||||||
|
|
||||||
_headers = {'X-Gate-Channel-Id': 'freqtrade'}
|
_headers = {'X-Gate-Channel-Id': 'freqtrade'}
|
||||||
|
@ -14,5 +14,5 @@ class Okex(Exchange):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"ohlcv_candle_limit": 100,
|
"ohlcv_candle_limit": 300,
|
||||||
}
|
}
|
||||||
|
@ -126,6 +126,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
self.rpc.cleanup()
|
self.rpc.cleanup()
|
||||||
cleanup_db()
|
cleanup_db()
|
||||||
|
self.exchange.close()
|
||||||
|
|
||||||
def startup(self) -> None:
|
def startup(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -246,6 +246,9 @@ class Backtesting:
|
|||||||
Helper function to convert a processed dataframes into lists for performance reasons.
|
Helper function to convert a processed dataframes into lists for performance reasons.
|
||||||
|
|
||||||
Used by backtest() - so keep this optimized for performance.
|
Used by backtest() - so keep this optimized for performance.
|
||||||
|
|
||||||
|
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
|
||||||
|
optimize memory usage!
|
||||||
"""
|
"""
|
||||||
# Every change to this headers list must evaluate further usages of the resulting tuple
|
# Every change to this headers list must evaluate further usages of the resulting tuple
|
||||||
# and eventually change the constants for indexes at the top
|
# and eventually change the constants for indexes at the top
|
||||||
@ -254,7 +257,8 @@ class Backtesting:
|
|||||||
self.progress.init_step(BacktestState.CONVERT, len(processed))
|
self.progress.init_step(BacktestState.CONVERT, len(processed))
|
||||||
|
|
||||||
# Create dict with data
|
# Create dict with data
|
||||||
for pair, pair_data in processed.items():
|
for pair in processed.keys():
|
||||||
|
pair_data = processed[pair]
|
||||||
self.check_abort()
|
self.check_abort()
|
||||||
self.progress.increment()
|
self.progress.increment()
|
||||||
if not pair_data.empty:
|
if not pair_data.empty:
|
||||||
@ -266,8 +270,8 @@ class Backtesting:
|
|||||||
df_analyzed = self.strategy.advise_sell(
|
df_analyzed = self.strategy.advise_sell(
|
||||||
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair}).copy()
|
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair}).copy()
|
||||||
# Trim startup period from analyzed dataframe
|
# Trim startup period from analyzed dataframe
|
||||||
df_analyzed = trim_dataframe(df_analyzed, self.timerange,
|
df_analyzed = processed[pair] = pair_data = trim_dataframe(
|
||||||
startup_candles=self.required_startup)
|
df_analyzed, self.timerange, startup_candles=self.required_startup)
|
||||||
# To avoid using data from future, we use buy/sell signals shifted
|
# To avoid using data from future, we use buy/sell signals shifted
|
||||||
# from the previous candle
|
# from the previous candle
|
||||||
df_analyzed.loc[:, 'buy'] = df_analyzed.loc[:, 'buy'].shift(1)
|
df_analyzed.loc[:, 'buy'] = df_analyzed.loc[:, 'buy'].shift(1)
|
||||||
@ -416,7 +420,9 @@ class Backtesting:
|
|||||||
return self._get_sell_trade_entry_for_candle(trade, sell_row)
|
return self._get_sell_trade_entry_for_candle(trade, sell_row)
|
||||||
detail_data.loc[:, 'buy'] = sell_row[BUY_IDX]
|
detail_data.loc[:, 'buy'] = sell_row[BUY_IDX]
|
||||||
detail_data.loc[:, 'sell'] = sell_row[SELL_IDX]
|
detail_data.loc[:, 'sell'] = sell_row[SELL_IDX]
|
||||||
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high']
|
detail_data.loc[:, 'buy_tag'] = sell_row[BUY_TAG_IDX]
|
||||||
|
detail_data.loc[:, 'exit_tag'] = sell_row[EXIT_TAG_IDX]
|
||||||
|
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high', 'buy_tag', 'exit_tag']
|
||||||
for det_row in detail_data[headers].values.tolist():
|
for det_row in detail_data[headers].values.tolist():
|
||||||
res = self._get_sell_trade_entry_for_candle(trade, det_row)
|
res = self._get_sell_trade_entry_for_candle(trade, det_row)
|
||||||
if res:
|
if res:
|
||||||
@ -519,7 +525,8 @@ class Backtesting:
|
|||||||
Of course try to not have ugly code. By some accessor are sometime slower than functions.
|
Of course try to not have ugly code. By some accessor are sometime slower than functions.
|
||||||
Avoid extensive logging in this method and functions it calls.
|
Avoid extensive logging in this method and functions it calls.
|
||||||
|
|
||||||
:param processed: a processed dictionary with format {pair, data}
|
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
|
||||||
|
optimize memory usage!
|
||||||
:param start_date: backtesting timerange start datetime
|
:param start_date: backtesting timerange start datetime
|
||||||
:param end_date: backtesting timerange end datetime
|
:param end_date: backtesting timerange end datetime
|
||||||
:param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited
|
:param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited
|
||||||
|
@ -12,7 +12,7 @@ class BTProgress:
|
|||||||
def init_step(self, action: BacktestState, max_steps: float):
|
def init_step(self, action: BacktestState, max_steps: float):
|
||||||
self._action = action
|
self._action = action
|
||||||
self._max_steps = max_steps
|
self._max_steps = max_steps
|
||||||
self._proress = 0
|
self._progress = 0
|
||||||
|
|
||||||
def set_new_value(self, new_value: float):
|
def set_new_value(self, new_value: float):
|
||||||
self._progress = new_value
|
self._progress = new_value
|
||||||
|
@ -76,6 +76,7 @@ class Hyperopt:
|
|||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
self.backtesting = Backtesting(self.config)
|
self.backtesting = Backtesting(self.config)
|
||||||
|
self.pairlist = self.backtesting.pairlists.whitelist
|
||||||
|
|
||||||
if not self.config.get('hyperopt'):
|
if not self.config.get('hyperopt'):
|
||||||
self.custom_hyperopt = HyperOptAuto(self.config)
|
self.custom_hyperopt = HyperOptAuto(self.config)
|
||||||
@ -332,7 +333,7 @@ class Hyperopt:
|
|||||||
params_details = self._get_params_details(params_dict)
|
params_details = self._get_params_details(params_dict)
|
||||||
|
|
||||||
strat_stats = generate_strategy_stats(
|
strat_stats = generate_strategy_stats(
|
||||||
processed, self.backtesting.strategy.get_strategy_name(),
|
self.pairlist, self.backtesting.strategy.get_strategy_name(),
|
||||||
backtesting_results, min_date, max_date, market_change=0
|
backtesting_results, min_date, max_date, market_change=0
|
||||||
)
|
)
|
||||||
results_explanation = HyperoptTools.format_results_explanation_string(
|
results_explanation = HyperoptTools.format_results_explanation_string(
|
||||||
@ -422,6 +423,7 @@ class Hyperopt:
|
|||||||
self.backtesting.exchange.close()
|
self.backtesting.exchange.close()
|
||||||
self.backtesting.exchange._api = None # type: ignore
|
self.backtesting.exchange._api = None # type: ignore
|
||||||
self.backtesting.exchange._api_async = None # type: ignore
|
self.backtesting.exchange._api_async = None # type: ignore
|
||||||
|
self.backtesting.exchange.loop = None # type: ignore
|
||||||
# self.backtesting.exchange = None # type: ignore
|
# self.backtesting.exchange = None # type: ignore
|
||||||
self.backtesting.pairlists = None # type: ignore
|
self.backtesting.pairlists = None # type: ignore
|
||||||
|
|
||||||
|
@ -47,10 +47,9 @@ class CalmarHyperOptLoss(IHyperOptLoss):
|
|||||||
|
|
||||||
# calculate max drawdown
|
# calculate max drawdown
|
||||||
try:
|
try:
|
||||||
_, _, _, high_val, low_val = calculate_max_drawdown(
|
_, _, _, _, _, max_drawdown = calculate_max_drawdown(
|
||||||
results, value_col="profit_abs"
|
results, value_col="profit_abs"
|
||||||
)
|
)
|
||||||
max_drawdown = (high_val - low_val) / high_val
|
|
||||||
except ValueError:
|
except ValueError:
|
||||||
max_drawdown = 0
|
max_drawdown = 0
|
||||||
|
|
||||||
|
@ -299,8 +299,7 @@ class HyperoptTools():
|
|||||||
f"Objective: {results['loss']:.5f}")
|
f"Objective: {results['loss']:.5f}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def prepare_trials_columns(trials: pd.DataFrame, legacy_mode: bool,
|
def prepare_trials_columns(trials: pd.DataFrame, has_drawdown: bool) -> pd.DataFrame:
|
||||||
has_drawdown: bool) -> pd.DataFrame:
|
|
||||||
trials['Best'] = ''
|
trials['Best'] = ''
|
||||||
|
|
||||||
if 'results_metrics.winsdrawslosses' not in trials.columns:
|
if 'results_metrics.winsdrawslosses' not in trials.columns:
|
||||||
@ -309,33 +308,26 @@ class HyperoptTools():
|
|||||||
|
|
||||||
if not has_drawdown:
|
if not has_drawdown:
|
||||||
# Ensure compatibility with older versions of hyperopt results
|
# Ensure compatibility with older versions of hyperopt results
|
||||||
trials['results_metrics.max_drawdown_abs'] = None
|
trials['results_metrics.max_drawdown_account'] = None
|
||||||
trials['results_metrics.max_drawdown'] = None
|
|
||||||
|
|
||||||
if not legacy_mode:
|
# New mode, using backtest result for metrics
|
||||||
# New mode, using backtest result for metrics
|
trials['results_metrics.winsdrawslosses'] = trials.apply(
|
||||||
trials['results_metrics.winsdrawslosses'] = trials.apply(
|
lambda x: f"{x['results_metrics.wins']} {x['results_metrics.draws']:>4} "
|
||||||
lambda x: f"{x['results_metrics.wins']} {x['results_metrics.draws']:>4} "
|
f"{x['results_metrics.losses']:>4}", axis=1)
|
||||||
f"{x['results_metrics.losses']:>4}", axis=1)
|
|
||||||
trials = trials[['Best', 'current_epoch', 'results_metrics.total_trades',
|
|
||||||
'results_metrics.winsdrawslosses',
|
|
||||||
'results_metrics.profit_mean', 'results_metrics.profit_total_abs',
|
|
||||||
'results_metrics.profit_total', 'results_metrics.holding_avg',
|
|
||||||
'results_metrics.max_drawdown', 'results_metrics.max_drawdown_abs',
|
|
||||||
'loss', 'is_initial_point', 'is_best']]
|
|
||||||
|
|
||||||
else:
|
trials = trials[['Best', 'current_epoch', 'results_metrics.total_trades',
|
||||||
# Legacy mode
|
'results_metrics.winsdrawslosses',
|
||||||
trials = trials[['Best', 'current_epoch', 'results_metrics.trade_count',
|
'results_metrics.profit_mean', 'results_metrics.profit_total_abs',
|
||||||
'results_metrics.winsdrawslosses', 'results_metrics.avg_profit',
|
'results_metrics.profit_total', 'results_metrics.holding_avg',
|
||||||
'results_metrics.total_profit', 'results_metrics.profit',
|
'results_metrics.max_drawdown',
|
||||||
'results_metrics.duration', 'results_metrics.max_drawdown',
|
'results_metrics.max_drawdown_account', 'results_metrics.max_drawdown_abs',
|
||||||
'results_metrics.max_drawdown_abs', 'loss', 'is_initial_point',
|
'loss', 'is_initial_point', 'is_best']]
|
||||||
'is_best']]
|
|
||||||
|
|
||||||
trials.columns = ['Best', 'Epoch', 'Trades', ' Win Draw Loss', 'Avg profit',
|
trials.columns = [
|
||||||
'Total profit', 'Profit', 'Avg duration', 'Max Drawdown',
|
'Best', 'Epoch', 'Trades', ' Win Draw Loss', 'Avg profit',
|
||||||
'max_drawdown_abs', 'Objective', 'is_initial_point', 'is_best']
|
'Total profit', 'Profit', 'Avg duration', 'max_drawdown', 'max_drawdown_account',
|
||||||
|
'max_drawdown_abs', 'Objective', 'is_initial_point', 'is_best'
|
||||||
|
]
|
||||||
|
|
||||||
return trials
|
return trials
|
||||||
|
|
||||||
@ -351,10 +343,9 @@ class HyperoptTools():
|
|||||||
tabulate.PRESERVE_WHITESPACE = True
|
tabulate.PRESERVE_WHITESPACE = True
|
||||||
trials = json_normalize(results, max_level=1)
|
trials = json_normalize(results, max_level=1)
|
||||||
|
|
||||||
legacy_mode = 'results_metrics.total_trades' not in trials
|
has_account_drawdown = 'results_metrics.max_drawdown_account' in trials.columns
|
||||||
has_drawdown = 'results_metrics.max_drawdown_abs' in trials.columns
|
|
||||||
|
|
||||||
trials = HyperoptTools.prepare_trials_columns(trials, legacy_mode, has_drawdown)
|
trials = HyperoptTools.prepare_trials_columns(trials, has_account_drawdown)
|
||||||
|
|
||||||
trials['is_profit'] = False
|
trials['is_profit'] = False
|
||||||
trials.loc[trials['is_initial_point'], 'Best'] = '* '
|
trials.loc[trials['is_initial_point'], 'Best'] = '* '
|
||||||
@ -362,12 +353,12 @@ class HyperoptTools():
|
|||||||
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
||||||
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
||||||
trials['Trades'] = trials['Trades'].astype(str)
|
trials['Trades'] = trials['Trades'].astype(str)
|
||||||
perc_multi = 1 if legacy_mode else 100
|
# perc_multi = 1 if legacy_mode else 100
|
||||||
trials['Epoch'] = trials['Epoch'].apply(
|
trials['Epoch'] = trials['Epoch'].apply(
|
||||||
lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs)
|
lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs)
|
||||||
)
|
)
|
||||||
trials['Avg profit'] = trials['Avg profit'].apply(
|
trials['Avg profit'] = trials['Avg profit'].apply(
|
||||||
lambda x: f'{x * perc_multi:,.2f}%'.rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
|
lambda x: f'{x:,.2%}'.rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
|
||||||
)
|
)
|
||||||
trials['Avg duration'] = trials['Avg duration'].apply(
|
trials['Avg duration'] = trials['Avg duration'].apply(
|
||||||
lambda x: f'{x:,.1f} m'.rjust(7, ' ') if isinstance(x, float) else f"{x}"
|
lambda x: f'{x:,.1f} m'.rjust(7, ' ') if isinstance(x, float) else f"{x}"
|
||||||
@ -379,24 +370,25 @@ class HyperoptTools():
|
|||||||
|
|
||||||
stake_currency = config['stake_currency']
|
stake_currency = config['stake_currency']
|
||||||
|
|
||||||
if has_drawdown:
|
trials[f"Max Drawdown{' (Acct)' if has_account_drawdown else ''}"] = trials.apply(
|
||||||
trials['Max Drawdown'] = trials.apply(
|
lambda x: "{} {}".format(
|
||||||
lambda x: '{} {}'.format(
|
round_coin_value(x['max_drawdown_abs'], stake_currency),
|
||||||
round_coin_value(x['max_drawdown_abs'], stake_currency),
|
(f"({x['max_drawdown_account']:,.2%})"
|
||||||
'({:,.2f}%)'.format(x['Max Drawdown'] * perc_multi).rjust(10, ' ')
|
if has_account_drawdown
|
||||||
).rjust(25 + len(stake_currency))
|
else f"({x['max_drawdown']:,.2%})"
|
||||||
if x['Max Drawdown'] != 0.0 else '--'.rjust(25 + len(stake_currency)),
|
).rjust(10, ' ')
|
||||||
axis=1
|
).rjust(25 + len(stake_currency))
|
||||||
)
|
if x['max_drawdown'] != 0.0 or x['max_drawdown_account'] != 0.0
|
||||||
else:
|
else '--'.rjust(25 + len(stake_currency)),
|
||||||
trials = trials.drop(columns=['Max Drawdown'])
|
axis=1
|
||||||
|
)
|
||||||
|
|
||||||
trials = trials.drop(columns=['max_drawdown_abs'])
|
trials = trials.drop(columns=['max_drawdown_abs', 'max_drawdown', 'max_drawdown_account'])
|
||||||
|
|
||||||
trials['Profit'] = trials.apply(
|
trials['Profit'] = trials.apply(
|
||||||
lambda x: '{} {}'.format(
|
lambda x: '{} {}'.format(
|
||||||
round_coin_value(x['Total profit'], stake_currency),
|
round_coin_value(x['Total profit'], stake_currency),
|
||||||
'({:,.2f}%)'.format(x['Profit'] * perc_multi).rjust(10, ' ')
|
f"({x['Profit']:,.2%})".rjust(10, ' ')
|
||||||
).rjust(25+len(stake_currency))
|
).rjust(25+len(stake_currency))
|
||||||
if x['Total profit'] != 0.0 else '--'.rjust(25+len(stake_currency)),
|
if x['Total profit'] != 0.0 else '--'.rjust(25+len(stake_currency)),
|
||||||
axis=1
|
axis=1
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
|
from copy import deepcopy
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, Dict, List, Union
|
||||||
@ -98,11 +99,11 @@ def _generate_result_line(result: DataFrame, starting_balance: int, first_column
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def generate_pair_metrics(data: Dict[str, Dict], stake_currency: str, starting_balance: int,
|
def generate_pair_metrics(pairlist: List[str], stake_currency: str, starting_balance: int,
|
||||||
results: DataFrame, skip_nan: bool = False) -> List[Dict]:
|
results: DataFrame, skip_nan: bool = False) -> List[Dict]:
|
||||||
"""
|
"""
|
||||||
Generates and returns a list for the given backtest data and the results dataframe
|
Generates and returns a list for the given backtest data and the results dataframe
|
||||||
:param data: Dict of <pair: dataframe> containing data that was used during backtesting.
|
:param pairlist: Pairlist used
|
||||||
:param stake_currency: stake-currency - used to correctly name headers
|
:param stake_currency: stake-currency - used to correctly name headers
|
||||||
:param starting_balance: Starting balance
|
:param starting_balance: Starting balance
|
||||||
:param results: Dataframe containing the backtest results
|
:param results: Dataframe containing the backtest results
|
||||||
@ -112,7 +113,7 @@ def generate_pair_metrics(data: Dict[str, Dict], stake_currency: str, starting_b
|
|||||||
|
|
||||||
tabular_data = []
|
tabular_data = []
|
||||||
|
|
||||||
for pair in data:
|
for pair in pairlist:
|
||||||
result = results[results['pair'] == pair]
|
result = results[results['pair'] == pair]
|
||||||
if skip_nan and result['profit_abs'].isnull().all():
|
if skip_nan and result['profit_abs'].isnull().all():
|
||||||
continue
|
continue
|
||||||
@ -194,29 +195,21 @@ def generate_sell_reason_stats(max_open_trades: int, results: DataFrame) -> List
|
|||||||
return tabular_data
|
return tabular_data
|
||||||
|
|
||||||
|
|
||||||
def generate_strategy_comparison(all_results: Dict) -> List[Dict]:
|
def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
|
||||||
"""
|
"""
|
||||||
Generate summary per strategy
|
Generate summary per strategy
|
||||||
:param all_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
:param bt_stats: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||||
:return: List of Dicts containing the metrics per Strategy
|
:return: List of Dicts containing the metrics per Strategy
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tabular_data = []
|
tabular_data = []
|
||||||
for strategy, results in all_results.items():
|
for strategy, result in bt_stats.items():
|
||||||
tabular_data.append(_generate_result_line(
|
tabular_data.append(deepcopy(result['results_per_pair'][-1]))
|
||||||
results['results'], results['config']['dry_run_wallet'], strategy)
|
# Update "key" to strategy (results_per_pair has it as "Total").
|
||||||
)
|
tabular_data[-1]['key'] = strategy
|
||||||
try:
|
tabular_data[-1]['max_drawdown_account'] = result['max_drawdown_account']
|
||||||
max_drawdown_per, _, _, _, _ = calculate_max_drawdown(results['results'],
|
tabular_data[-1]['max_drawdown_abs'] = round_coin_value(
|
||||||
value_col='profit_ratio')
|
result['max_drawdown_abs'], result['stake_currency'], False)
|
||||||
max_drawdown_abs, _, _, _, _ = calculate_max_drawdown(results['results'],
|
|
||||||
value_col='profit_abs')
|
|
||||||
except ValueError:
|
|
||||||
max_drawdown_per = 0
|
|
||||||
max_drawdown_abs = 0
|
|
||||||
tabular_data[-1]['max_drawdown_per'] = round(max_drawdown_per * 100, 2)
|
|
||||||
tabular_data[-1]['max_drawdown_abs'] = \
|
|
||||||
round_coin_value(max_drawdown_abs, results['config']['stake_currency'], False)
|
|
||||||
return tabular_data
|
return tabular_data
|
||||||
|
|
||||||
|
|
||||||
@ -352,14 +345,14 @@ def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
def generate_strategy_stats(pairlist: List[str],
|
||||||
strategy: str,
|
strategy: str,
|
||||||
content: Dict[str, Any],
|
content: Dict[str, Any],
|
||||||
min_date: datetime, max_date: datetime,
|
min_date: datetime, max_date: datetime,
|
||||||
market_change: float
|
market_change: float
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
:param btdata: Backtest data
|
:param pairlist: List of pairs to backtest
|
||||||
:param strategy: Strategy name
|
:param strategy: Strategy name
|
||||||
:param content: Backtest result data in the format:
|
:param content: Backtest result data in the format:
|
||||||
{'results: results, 'config: config}}.
|
{'results: results, 'config: config}}.
|
||||||
@ -372,11 +365,11 @@ def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
|||||||
if not isinstance(results, DataFrame):
|
if not isinstance(results, DataFrame):
|
||||||
return {}
|
return {}
|
||||||
config = content['config']
|
config = content['config']
|
||||||
max_open_trades = min(config['max_open_trades'], len(btdata.keys()))
|
max_open_trades = min(config['max_open_trades'], len(pairlist))
|
||||||
starting_balance = config['dry_run_wallet']
|
starting_balance = config['dry_run_wallet']
|
||||||
stake_currency = config['stake_currency']
|
stake_currency = config['stake_currency']
|
||||||
|
|
||||||
pair_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
pair_results = generate_pair_metrics(pairlist, stake_currency=stake_currency,
|
||||||
starting_balance=starting_balance,
|
starting_balance=starting_balance,
|
||||||
results=results, skip_nan=False)
|
results=results, skip_nan=False)
|
||||||
|
|
||||||
@ -385,7 +378,7 @@ def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
|||||||
|
|
||||||
sell_reason_stats = generate_sell_reason_stats(max_open_trades=max_open_trades,
|
sell_reason_stats = generate_sell_reason_stats(max_open_trades=max_open_trades,
|
||||||
results=results)
|
results=results)
|
||||||
left_open_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
left_open_results = generate_pair_metrics(pairlist, stake_currency=stake_currency,
|
||||||
starting_balance=starting_balance,
|
starting_balance=starting_balance,
|
||||||
results=results.loc[results['is_open']],
|
results=results.loc[results['is_open']],
|
||||||
skip_nan=True)
|
skip_nan=True)
|
||||||
@ -429,7 +422,7 @@ def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
|||||||
|
|
||||||
'trades_per_day': round(len(results) / backtest_days, 2),
|
'trades_per_day': round(len(results) / backtest_days, 2),
|
||||||
'market_change': market_change,
|
'market_change': market_change,
|
||||||
'pairlist': list(btdata.keys()),
|
'pairlist': pairlist,
|
||||||
'stake_amount': config['stake_amount'],
|
'stake_amount': config['stake_amount'],
|
||||||
'stake_currency': config['stake_currency'],
|
'stake_currency': config['stake_currency'],
|
||||||
'stake_currency_decimals': decimals_per_coin(config['stake_currency']),
|
'stake_currency_decimals': decimals_per_coin(config['stake_currency']),
|
||||||
@ -462,12 +455,14 @@ def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
|||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
max_drawdown, _, _, _, _ = calculate_max_drawdown(
|
max_drawdown_legacy, _, _, _, _, _ = calculate_max_drawdown(
|
||||||
results, value_col='profit_ratio')
|
results, value_col='profit_ratio')
|
||||||
drawdown_abs, drawdown_start, drawdown_end, high_val, low_val = calculate_max_drawdown(
|
(drawdown_abs, drawdown_start, drawdown_end, high_val, low_val,
|
||||||
results, value_col='profit_abs')
|
max_drawdown) = calculate_max_drawdown(
|
||||||
|
results, value_col='profit_abs', starting_balance=starting_balance)
|
||||||
strat_stats.update({
|
strat_stats.update({
|
||||||
'max_drawdown': max_drawdown,
|
'max_drawdown': max_drawdown_legacy, # Deprecated - do not use
|
||||||
|
'max_drawdown_account': max_drawdown,
|
||||||
'max_drawdown_abs': drawdown_abs,
|
'max_drawdown_abs': drawdown_abs,
|
||||||
'drawdown_start': drawdown_start.strftime(DATETIME_PRINT_FORMAT),
|
'drawdown_start': drawdown_start.strftime(DATETIME_PRINT_FORMAT),
|
||||||
'drawdown_start_ts': drawdown_start.timestamp() * 1000,
|
'drawdown_start_ts': drawdown_start.timestamp() * 1000,
|
||||||
@ -487,6 +482,7 @@ def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
strat_stats.update({
|
strat_stats.update({
|
||||||
'max_drawdown': 0.0,
|
'max_drawdown': 0.0,
|
||||||
|
'max_drawdown_account': 0.0,
|
||||||
'max_drawdown_abs': 0.0,
|
'max_drawdown_abs': 0.0,
|
||||||
'max_drawdown_low': 0.0,
|
'max_drawdown_low': 0.0,
|
||||||
'max_drawdown_high': 0.0,
|
'max_drawdown_high': 0.0,
|
||||||
@ -515,13 +511,13 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
|||||||
"""
|
"""
|
||||||
result: Dict[str, Any] = {'strategy': {}}
|
result: Dict[str, Any] = {'strategy': {}}
|
||||||
market_change = calculate_market_change(btdata, 'close')
|
market_change = calculate_market_change(btdata, 'close')
|
||||||
|
pairlist = list(btdata.keys())
|
||||||
for strategy, content in all_results.items():
|
for strategy, content in all_results.items():
|
||||||
strat_stats = generate_strategy_stats(btdata, strategy, content,
|
strat_stats = generate_strategy_stats(pairlist, strategy, content,
|
||||||
min_date, max_date, market_change=market_change)
|
min_date, max_date, market_change=market_change)
|
||||||
result['strategy'][strategy] = strat_stats
|
result['strategy'][strategy] = strat_stats
|
||||||
|
|
||||||
strategy_results = generate_strategy_comparison(all_results=all_results)
|
strategy_results = generate_strategy_comparison(bt_stats=result['strategy'])
|
||||||
|
|
||||||
result['strategy_comparison'] = strategy_results
|
result['strategy_comparison'] = strategy_results
|
||||||
|
|
||||||
@ -646,7 +642,12 @@ def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
|||||||
headers.append('Drawdown')
|
headers.append('Drawdown')
|
||||||
|
|
||||||
# Align drawdown string on the center two space separator.
|
# Align drawdown string on the center two space separator.
|
||||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
if 'max_drawdown_account' in strategy_results[0]:
|
||||||
|
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
|
||||||
|
else:
|
||||||
|
# Support for prior backtest results
|
||||||
|
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||||
|
|
||||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||||
@ -716,7 +717,10 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
|||||||
('Max balance', round_coin_value(strat_results['csum_max'],
|
('Max balance', round_coin_value(strat_results['csum_max'],
|
||||||
strat_results['stake_currency'])),
|
strat_results['stake_currency'])),
|
||||||
|
|
||||||
('Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
# Compatibility to show old hyperopt results
|
||||||
|
('Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
|
||||||
|
if 'max_drawdown_account' in strat_results else (
|
||||||
|
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
||||||
('Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
('Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
||||||
strat_results['stake_currency'])),
|
strat_results['stake_currency'])),
|
||||||
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
||||||
|
@ -5,7 +5,8 @@ from typing import Any, Dict, List
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data.btanalysis import (calculate_max_drawdown, combine_dataframes_with_mean,
|
from freqtrade.data.btanalysis import (analyze_trade_parallelism, calculate_max_drawdown,
|
||||||
|
calculate_underwater, combine_dataframes_with_mean,
|
||||||
create_cum_profit, extract_trades_of_period, load_trades)
|
create_cum_profit, extract_trades_of_period, load_trades)
|
||||||
from freqtrade.data.converter import trim_dataframe
|
from freqtrade.data.converter import trim_dataframe
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
@ -160,7 +161,7 @@ def add_max_drawdown(fig, row, trades: pd.DataFrame, df_comb: pd.DataFrame,
|
|||||||
Add scatter points indicating max drawdown
|
Add scatter points indicating max drawdown
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
max_drawdown, highdate, lowdate, _, _ = calculate_max_drawdown(trades)
|
_, highdate, lowdate, _, _, max_drawdown = calculate_max_drawdown(trades)
|
||||||
|
|
||||||
drawdown = go.Scatter(
|
drawdown = go.Scatter(
|
||||||
x=[highdate, lowdate],
|
x=[highdate, lowdate],
|
||||||
@ -185,6 +186,48 @@ def add_max_drawdown(fig, row, trades: pd.DataFrame, df_comb: pd.DataFrame,
|
|||||||
return fig
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def add_underwater(fig, row, trades: pd.DataFrame) -> make_subplots:
|
||||||
|
"""
|
||||||
|
Add underwater plot
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
underwater = calculate_underwater(trades, value_col="profit_abs")
|
||||||
|
|
||||||
|
underwater = go.Scatter(
|
||||||
|
x=underwater['date'],
|
||||||
|
y=underwater['drawdown'],
|
||||||
|
name="Underwater Plot",
|
||||||
|
fill='tozeroy',
|
||||||
|
fillcolor='#cc362b',
|
||||||
|
line={'color': '#cc362b'},
|
||||||
|
)
|
||||||
|
fig.add_trace(underwater, row, 1)
|
||||||
|
except ValueError:
|
||||||
|
logger.warning("No trades found - not plotting underwater plot")
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def add_parallelism(fig, row, trades: pd.DataFrame, timeframe: str) -> make_subplots:
|
||||||
|
"""
|
||||||
|
Add Chart showing trade parallelism
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
result = analyze_trade_parallelism(trades, timeframe)
|
||||||
|
|
||||||
|
drawdown = go.Scatter(
|
||||||
|
x=result.index,
|
||||||
|
y=result['open_trades'],
|
||||||
|
name="Parallel trades",
|
||||||
|
fill='tozeroy',
|
||||||
|
fillcolor='#242222',
|
||||||
|
line={'color': '#242222'},
|
||||||
|
)
|
||||||
|
fig.add_trace(drawdown, row, 1)
|
||||||
|
except ValueError:
|
||||||
|
logger.warning("No trades found - not plotting Parallelism.")
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
def plot_trades(fig, trades: pd.DataFrame) -> make_subplots:
|
def plot_trades(fig, trades: pd.DataFrame) -> make_subplots:
|
||||||
"""
|
"""
|
||||||
Add trades to "fig"
|
Add trades to "fig"
|
||||||
@ -460,7 +503,12 @@ def generate_candlestick_graph(pair: str, data: pd.DataFrame, trades: pd.DataFra
|
|||||||
def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame],
|
def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame],
|
||||||
trades: pd.DataFrame, timeframe: str, stake_currency: str) -> go.Figure:
|
trades: pd.DataFrame, timeframe: str, stake_currency: str) -> go.Figure:
|
||||||
# Combine close-values for all pairs, rename columns to "pair"
|
# Combine close-values for all pairs, rename columns to "pair"
|
||||||
df_comb = combine_dataframes_with_mean(data, "close")
|
try:
|
||||||
|
df_comb = combine_dataframes_with_mean(data, "close")
|
||||||
|
except ValueError:
|
||||||
|
raise OperationalException(
|
||||||
|
"No data found. Please make sure that data is available for "
|
||||||
|
"the timerange and pairs selected.")
|
||||||
|
|
||||||
# Trim trades to available OHLCV data
|
# Trim trades to available OHLCV data
|
||||||
trades = extract_trades_of_period(df_comb, trades, date_index=True)
|
trades = extract_trades_of_period(df_comb, trades, date_index=True)
|
||||||
@ -477,20 +525,30 @@ def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame],
|
|||||||
name='Avg close price',
|
name='Avg close price',
|
||||||
)
|
)
|
||||||
|
|
||||||
fig = make_subplots(rows=3, cols=1, shared_xaxes=True,
|
fig = make_subplots(rows=5, cols=1, shared_xaxes=True,
|
||||||
row_width=[1, 1, 1],
|
row_heights=[1, 1, 1, 0.5, 1],
|
||||||
vertical_spacing=0.05,
|
vertical_spacing=0.05,
|
||||||
subplot_titles=["AVG Close Price", "Combined Profit", "Profit per pair"])
|
subplot_titles=[
|
||||||
|
"AVG Close Price",
|
||||||
|
"Combined Profit",
|
||||||
|
"Profit per pair",
|
||||||
|
"Parallelism",
|
||||||
|
"Underwater",
|
||||||
|
])
|
||||||
fig['layout'].update(title="Freqtrade Profit plot")
|
fig['layout'].update(title="Freqtrade Profit plot")
|
||||||
fig['layout']['yaxis1'].update(title='Price')
|
fig['layout']['yaxis1'].update(title='Price')
|
||||||
fig['layout']['yaxis2'].update(title=f'Profit {stake_currency}')
|
fig['layout']['yaxis2'].update(title=f'Profit {stake_currency}')
|
||||||
fig['layout']['yaxis3'].update(title=f'Profit {stake_currency}')
|
fig['layout']['yaxis3'].update(title=f'Profit {stake_currency}')
|
||||||
|
fig['layout']['yaxis4'].update(title='Trade count')
|
||||||
|
fig['layout']['yaxis5'].update(title='Underwater Plot')
|
||||||
fig['layout']['xaxis']['rangeslider'].update(visible=False)
|
fig['layout']['xaxis']['rangeslider'].update(visible=False)
|
||||||
fig.update_layout(modebar_add=["v1hovermode", "toggleSpikeLines"])
|
fig.update_layout(modebar_add=["v1hovermode", "toggleSpikeLines"])
|
||||||
|
|
||||||
fig.add_trace(avgclose, 1, 1)
|
fig.add_trace(avgclose, 1, 1)
|
||||||
fig = add_profit(fig, 2, df_comb, 'cum_profit', 'Profit')
|
fig = add_profit(fig, 2, df_comb, 'cum_profit', 'Profit')
|
||||||
fig = add_max_drawdown(fig, 2, trades, df_comb, timeframe)
|
fig = add_max_drawdown(fig, 2, trades, df_comb, timeframe)
|
||||||
|
fig = add_parallelism(fig, 4, trades, timeframe)
|
||||||
|
fig = add_underwater(fig, 5, trades)
|
||||||
|
|
||||||
for pair in pairs:
|
for pair in pairs:
|
||||||
profit_col = f'cum_profit_{pair}'
|
profit_col = f'cum_profit_{pair}'
|
||||||
|
@ -8,7 +8,7 @@ from typing import Any, Dict, List, Optional
|
|||||||
|
|
||||||
import arrow
|
import arrow
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from cachetools.ttl import TTLCache
|
from cachetools import TTLCache
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
|
@ -4,11 +4,10 @@ Volume PairList provider
|
|||||||
Provides dynamic pair list based on trade volumes
|
Provides dynamic pair list based on trade volumes
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from functools import partial
|
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
import arrow
|
import arrow
|
||||||
from cachetools.ttl import TTLCache
|
from cachetools import TTLCache
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_minutes
|
from freqtrade.exchange import timeframe_to_minutes
|
||||||
@ -120,10 +119,17 @@ class VolumePairList(IPairList):
|
|||||||
else:
|
else:
|
||||||
# Use fresh pairlist
|
# Use fresh pairlist
|
||||||
# Check if pair quote currency equals to the stake currency.
|
# Check if pair quote currency equals to the stake currency.
|
||||||
|
_pairlist = [k for k in self._exchange.get_markets(
|
||||||
|
quote_currencies=[self._stake_currency],
|
||||||
|
pairs_only=True, active_only=True).keys()]
|
||||||
|
# No point in testing for blacklisted pairs...
|
||||||
|
_pairlist = self.verify_blacklist(_pairlist, logger.info)
|
||||||
|
|
||||||
filtered_tickers = [
|
filtered_tickers = [
|
||||||
v for k, v in tickers.items()
|
v for k, v in tickers.items()
|
||||||
if (self._exchange.get_pair_quote_currency(k) == self._stake_currency
|
if (self._exchange.get_pair_quote_currency(k) == self._stake_currency
|
||||||
and (self._use_range or v[self._sort_key] is not None))]
|
and (self._use_range or v[self._sort_key] is not None)
|
||||||
|
and v['symbol'] in _pairlist)]
|
||||||
pairlist = [s['symbol'] for s in filtered_tickers]
|
pairlist = [s['symbol'] for s in filtered_tickers]
|
||||||
|
|
||||||
pairlist = self.filter_pairlist(pairlist, tickers)
|
pairlist = self.filter_pairlist(pairlist, tickers)
|
||||||
@ -178,12 +184,16 @@ class VolumePairList(IPairList):
|
|||||||
] if (p['symbol'], self._lookback_timeframe) in candles else None
|
] if (p['symbol'], self._lookback_timeframe) in candles else None
|
||||||
# in case of candle data calculate typical price and quoteVolume for candle
|
# in case of candle data calculate typical price and quoteVolume for candle
|
||||||
if pair_candles is not None and not pair_candles.empty:
|
if pair_candles is not None and not pair_candles.empty:
|
||||||
pair_candles['typical_price'] = (pair_candles['high'] + pair_candles['low']
|
if self._exchange._ft_has["ohlcv_volume_currency"] == "base":
|
||||||
+ pair_candles['close']) / 3
|
pair_candles['typical_price'] = (pair_candles['high'] + pair_candles['low']
|
||||||
pair_candles['quoteVolume'] = (
|
+ pair_candles['close']) / 3
|
||||||
pair_candles['volume'] * pair_candles['typical_price']
|
|
||||||
)
|
|
||||||
|
|
||||||
|
pair_candles['quoteVolume'] = (
|
||||||
|
pair_candles['volume'] * pair_candles['typical_price']
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Exchange ohlcv data is in quote volume already.
|
||||||
|
pair_candles['quoteVolume'] = pair_candles['volume']
|
||||||
# ensure that a rolling sum over the lookback_period is built
|
# ensure that a rolling sum over the lookback_period is built
|
||||||
# if pair_candles contains more candles than lookback_period
|
# if pair_candles contains more candles than lookback_period
|
||||||
quoteVolume = (pair_candles['quoteVolume']
|
quoteVolume = (pair_candles['quoteVolume']
|
||||||
@ -204,7 +214,7 @@ class VolumePairList(IPairList):
|
|||||||
|
|
||||||
# Validate whitelist to only have active market pairs
|
# Validate whitelist to only have active market pairs
|
||||||
pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])
|
pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])
|
||||||
pairs = self.verify_blacklist(pairs, partial(self.log_once, logmethod=logger.info))
|
pairs = self.verify_blacklist(pairs, logmethod=logger.info)
|
||||||
# Limit pairlist to the requested number of pairs
|
# Limit pairlist to the requested number of pairs
|
||||||
pairs = pairs[:self._number_pairs]
|
pairs = pairs[:self._number_pairs]
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ from copy import deepcopy
|
|||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import arrow
|
import arrow
|
||||||
from cachetools.ttl import TTLCache
|
from cachetools import TTLCache
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
|
@ -2,13 +2,14 @@
|
|||||||
PairList manager class
|
PairList manager class
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from functools import partial
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
|
|
||||||
from cachetools import TTLCache, cached
|
from cachetools import TTLCache, cached
|
||||||
|
|
||||||
from freqtrade.constants import ListPairsWithTimeframes
|
from freqtrade.constants import ListPairsWithTimeframes
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.mixins import LoggingMixin
|
||||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||||
from freqtrade.resolvers import PairListResolver
|
from freqtrade.resolvers import PairListResolver
|
||||||
@ -17,7 +18,7 @@ from freqtrade.resolvers import PairListResolver
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PairListManager():
|
class PairListManager(LoggingMixin):
|
||||||
|
|
||||||
def __init__(self, exchange, config: dict) -> None:
|
def __init__(self, exchange, config: dict) -> None:
|
||||||
self._exchange = exchange
|
self._exchange = exchange
|
||||||
@ -41,6 +42,9 @@ class PairListManager():
|
|||||||
if not self._pairlist_handlers:
|
if not self._pairlist_handlers:
|
||||||
raise OperationalException("No Pairlist Handlers defined")
|
raise OperationalException("No Pairlist Handlers defined")
|
||||||
|
|
||||||
|
refresh_period = config.get('pairlist_refresh_period', 3600)
|
||||||
|
LoggingMixin.__init__(self, logger, refresh_period)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def whitelist(self) -> List[str]:
|
def whitelist(self) -> List[str]:
|
||||||
"""The current whitelist"""
|
"""The current whitelist"""
|
||||||
@ -108,9 +112,10 @@ class PairListManager():
|
|||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
logger.error(f"Pair blacklist contains an invalid Wildcard: {err}")
|
logger.error(f"Pair blacklist contains an invalid Wildcard: {err}")
|
||||||
return []
|
return []
|
||||||
for pair in deepcopy(pairlist):
|
log_once = partial(self.log_once, logmethod=logmethod)
|
||||||
|
for pair in pairlist.copy():
|
||||||
if pair in blacklist:
|
if pair in blacklist:
|
||||||
logmethod(f"Pair {pair} in your blacklist. Removing it from whitelist...")
|
log_once(f"Pair {pair} in your blacklist. Removing it from whitelist...")
|
||||||
pairlist.remove(pair)
|
pairlist.remove(pair)
|
||||||
return pairlist
|
return pairlist
|
||||||
|
|
||||||
|
@ -55,7 +55,8 @@ class MaxDrawdown(IProtection):
|
|||||||
|
|
||||||
# Drawdown is always positive
|
# Drawdown is always positive
|
||||||
try:
|
try:
|
||||||
drawdown, _, _, _, _ = calculate_max_drawdown(trades_df, value_col='close_profit')
|
# TODO: This should use absolute profit calculation, considering account balance.
|
||||||
|
drawdown, _, _, _, _, _ = calculate_max_drawdown(trades_df, value_col='close_profit')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return False, None, None
|
return False, None, None
|
||||||
|
|
||||||
|
@ -33,6 +33,9 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac
|
|||||||
if settings[setting] is not None:
|
if settings[setting] is not None:
|
||||||
btconfig[setting] = settings[setting]
|
btconfig[setting] = settings[setting]
|
||||||
|
|
||||||
|
# Force dry-run for backtesting
|
||||||
|
btconfig['dry_run'] = True
|
||||||
|
|
||||||
# Start backtesting
|
# Start backtesting
|
||||||
# Initialize backtesting object
|
# Initialize backtesting object
|
||||||
def run_backtest():
|
def run_backtest():
|
||||||
|
@ -47,7 +47,7 @@ class UvicornServer(uvicorn.Server):
|
|||||||
else:
|
else:
|
||||||
asyncio.set_event_loop(uvloop.new_event_loop())
|
asyncio.set_event_loop(uvloop.new_event_loop())
|
||||||
try:
|
try:
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_running_loop()
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
# When running in a thread, we'll not have an eventloop yet.
|
# When running in a thread, we'll not have an eventloop yet.
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
|
@ -7,7 +7,7 @@ import datetime
|
|||||||
import logging
|
import logging
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
|
|
||||||
from cachetools.ttl import TTLCache
|
from cachetools import TTLCache
|
||||||
from pycoingecko import CoinGeckoAPI
|
from pycoingecko import CoinGeckoAPI
|
||||||
from requests.exceptions import RequestException
|
from requests.exceptions import RequestException
|
||||||
|
|
||||||
|
@ -199,8 +199,8 @@ class Telegram(RPCHandler):
|
|||||||
|
|
||||||
self._updater.start_polling(
|
self._updater.start_polling(
|
||||||
bootstrap_retries=-1,
|
bootstrap_retries=-1,
|
||||||
timeout=30,
|
timeout=20,
|
||||||
read_latency=60,
|
read_latency=60, # Assumed transmission latency
|
||||||
drop_pending_updates=True,
|
drop_pending_updates=True,
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
@ -213,6 +213,7 @@ class Telegram(RPCHandler):
|
|||||||
Stops all running telegram threads.
|
Stops all running telegram threads.
|
||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
|
# This can take up to `timeout` from the call to `start_polling`.
|
||||||
self._updater.stop()
|
self._updater.stop()
|
||||||
|
|
||||||
def _format_buy_msg(self, msg: Dict[str, Any]) -> str:
|
def _format_buy_msg(self, msg: Dict[str, Any]) -> str:
|
||||||
|
@ -703,23 +703,21 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
custom_reason = custom_reason[:CUSTOM_SELL_MAX_LENGTH]
|
custom_reason = custom_reason[:CUSTOM_SELL_MAX_LENGTH]
|
||||||
else:
|
else:
|
||||||
custom_reason = None
|
custom_reason = None
|
||||||
# TODO: return here if sell-signal should be favored over ROI
|
if sell_signal in (SellType.CUSTOM_SELL, SellType.SELL_SIGNAL):
|
||||||
|
logger.debug(f"{trade.pair} - Sell signal received. "
|
||||||
|
f"sell_type=SellType.{sell_signal.name}" +
|
||||||
|
(f", custom_reason={custom_reason}" if custom_reason else ""))
|
||||||
|
return SellCheckTuple(sell_type=sell_signal, sell_reason=custom_reason)
|
||||||
|
|
||||||
# Start evaluations
|
# Start evaluations
|
||||||
# Sequence:
|
# Sequence:
|
||||||
# ROI (if not stoploss)
|
|
||||||
# Sell-signal
|
# Sell-signal
|
||||||
|
# ROI (if not stoploss)
|
||||||
# Stoploss
|
# Stoploss
|
||||||
if roi_reached and stoplossflag.sell_type != SellType.STOP_LOSS:
|
if roi_reached and stoplossflag.sell_type != SellType.STOP_LOSS:
|
||||||
logger.debug(f"{trade.pair} - Required profit reached. sell_type=SellType.ROI")
|
logger.debug(f"{trade.pair} - Required profit reached. sell_type=SellType.ROI")
|
||||||
return SellCheckTuple(sell_type=SellType.ROI)
|
return SellCheckTuple(sell_type=SellType.ROI)
|
||||||
|
|
||||||
if sell_signal != SellType.NONE:
|
|
||||||
logger.debug(f"{trade.pair} - Sell signal received. "
|
|
||||||
f"sell_type=SellType.{sell_signal.name}" +
|
|
||||||
(f", custom_reason={custom_reason}" if custom_reason else ""))
|
|
||||||
return SellCheckTuple(sell_type=sell_signal, sell_reason=custom_reason)
|
|
||||||
|
|
||||||
if stoplossflag.sell_flag:
|
if stoplossflag.sell_flag:
|
||||||
|
|
||||||
logger.debug(f"{trade.pair} - Stoploss hit. sell_type={stoplossflag.sell_type}")
|
logger.debug(f"{trade.pair} - Stoploss hit. sell_type={stoplossflag.sell_type}")
|
||||||
|
@ -85,9 +85,12 @@ class Worker:
|
|||||||
|
|
||||||
# Log state transition
|
# Log state transition
|
||||||
if state != old_state:
|
if state != old_state:
|
||||||
self.freqtrade.notify_status(f'{state.name.lower()}')
|
|
||||||
|
|
||||||
logger.info(f"Changing state to: {state.name}")
|
if old_state != State.RELOAD_CONFIG:
|
||||||
|
self.freqtrade.notify_status(f'{state.name.lower()}')
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Changing state{f' from {old_state.name}' if old_state else ''} to: {state.name}")
|
||||||
if state == State.RUNNING:
|
if state == State.RUNNING:
|
||||||
self.freqtrade.startup()
|
self.freqtrade.startup()
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ exclude = '''
|
|||||||
line_length = 100
|
line_length = 100
|
||||||
multi_line_output=0
|
multi_line_output=0
|
||||||
lines_after_imports=2
|
lines_after_imports=2
|
||||||
|
skip_glob = ["**/.env*", "**/env/*", "**/.venv/*", "**/docs/*"]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools >= 46.4.0", "wheel"]
|
requires = ["setuptools >= 46.4.0", "wheel"]
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
coveralls==3.3.1
|
coveralls==3.3.1
|
||||||
flake8==4.0.1
|
flake8==4.0.1
|
||||||
flake8-tidy-imports==4.5.0
|
flake8-tidy-imports==4.5.0
|
||||||
mypy==0.910
|
mypy==0.930
|
||||||
pytest==6.2.5
|
pytest==6.2.5
|
||||||
pytest-asyncio==0.16.0
|
pytest-asyncio==0.16.0
|
||||||
pytest-cov==3.0.0
|
pytest-cov==3.0.0
|
||||||
@ -14,16 +14,16 @@ pytest-mock==3.6.1
|
|||||||
pytest-random-order==1.0.4
|
pytest-random-order==1.0.4
|
||||||
isort==5.10.1
|
isort==5.10.1
|
||||||
# For datetime mocking
|
# For datetime mocking
|
||||||
time-machine==2.4.1
|
time-machine==2.5.0
|
||||||
|
|
||||||
# Convert jupyter notebooks to markdown documents
|
# Convert jupyter notebooks to markdown documents
|
||||||
nbconvert==6.3.0
|
nbconvert==6.3.0
|
||||||
|
|
||||||
# mypy types
|
# mypy types
|
||||||
types-cachetools==4.2.6
|
types-cachetools==4.2.7
|
||||||
types-filelock==3.2.1
|
types-filelock==3.2.1
|
||||||
types-requests==2.26.1
|
types-requests==2.26.3
|
||||||
types-tabulate==0.8.3
|
types-tabulate==0.8.4
|
||||||
|
|
||||||
# Extensions to datetime library
|
# Extensions to datetime library
|
||||||
types-python-dateutil==2.8.3
|
types-python-dateutil==2.8.4
|
@ -3,9 +3,8 @@
|
|||||||
|
|
||||||
# Required for hyperopt
|
# Required for hyperopt
|
||||||
scipy==1.7.3
|
scipy==1.7.3
|
||||||
scikit-learn==1.0.1
|
scikit-learn==1.0.2
|
||||||
scikit-optimize==0.9.0
|
scikit-optimize==0.9.0
|
||||||
filelock==3.4.0
|
filelock==3.4.2
|
||||||
joblib==1.1.0
|
joblib==1.1.0
|
||||||
psutil==5.8.0
|
|
||||||
progressbar2==3.55.0
|
progressbar2==3.55.0
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Include all requirements to run the bot.
|
# Include all requirements to run the bot.
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
|
||||||
plotly==5.4.0
|
plotly==5.5.0
|
||||||
|
|
||||||
|
@ -1,24 +1,25 @@
|
|||||||
numpy==1.21.4
|
numpy==1.21.5; python_version <= '3.7'
|
||||||
|
numpy==1.22.0; python_version > '3.7'
|
||||||
pandas==1.3.5
|
pandas==1.3.5
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==1.63.65
|
ccxt==1.66.32
|
||||||
# Pin cryptography for now due to rust build errors with piwheels
|
# Pin cryptography for now due to rust build errors with piwheels
|
||||||
cryptography==36.0.0
|
cryptography==36.0.1
|
||||||
aiohttp==3.8.1
|
aiohttp==3.8.1
|
||||||
SQLAlchemy==1.4.28
|
SQLAlchemy==1.4.29
|
||||||
python-telegram-bot==13.9
|
python-telegram-bot==13.9
|
||||||
arrow==1.2.1
|
arrow==1.2.1
|
||||||
cachetools==4.2.2
|
cachetools==4.2.2
|
||||||
requests==2.26.0
|
requests==2.26.0
|
||||||
urllib3==1.26.7
|
urllib3==1.26.7
|
||||||
jsonschema==4.2.1
|
jsonschema==4.3.3
|
||||||
TA-Lib==0.4.22
|
TA-Lib==0.4.23
|
||||||
technical==1.3.0
|
technical==1.3.0
|
||||||
tabulate==0.8.9
|
tabulate==0.8.9
|
||||||
pycoingecko==2.2.0
|
pycoingecko==2.2.0
|
||||||
jinja2==3.0.3
|
jinja2==3.0.3
|
||||||
tables==3.6.1
|
tables==3.7.0
|
||||||
blosc==1.10.6
|
blosc==1.10.6
|
||||||
|
|
||||||
# find first, C search in arrays
|
# find first, C search in arrays
|
||||||
@ -35,7 +36,7 @@ fastapi==0.70.1
|
|||||||
uvicorn==0.16.0
|
uvicorn==0.16.0
|
||||||
pyjwt==2.3.0
|
pyjwt==2.3.0
|
||||||
aiofiles==0.8.0
|
aiofiles==0.8.0
|
||||||
psutil==5.8.0
|
psutil==5.9.0
|
||||||
|
|
||||||
# Support for colorized terminal output
|
# Support for colorized terminal output
|
||||||
colorama==0.4.4
|
colorama==0.4.4
|
||||||
|
@ -17,6 +17,7 @@ classifiers =
|
|||||||
Programming Language :: Python :: 3.7
|
Programming Language :: Python :: 3.7
|
||||||
Programming Language :: Python :: 3.8
|
Programming Language :: Python :: 3.8
|
||||||
Programming Language :: Python :: 3.9
|
Programming Language :: Python :: 3.9
|
||||||
|
Programming Language :: Python :: 3.10
|
||||||
Operating System :: MacOS
|
Operating System :: MacOS
|
||||||
Operating System :: Unix
|
Operating System :: Unix
|
||||||
Topic :: Office/Business :: Financial :: Investment
|
Topic :: Office/Business :: Financial :: Investment
|
||||||
|
2
setup.py
2
setup.py
@ -43,7 +43,7 @@ setup(
|
|||||||
],
|
],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
# from requirements.txt
|
# from requirements.txt
|
||||||
'ccxt>=1.60.11',
|
'ccxt>=1.66.32',
|
||||||
'SQLAlchemy',
|
'SQLAlchemy',
|
||||||
'python-telegram-bot>=13.4',
|
'python-telegram-bot>=13.4',
|
||||||
'arrow>=0.17.0',
|
'arrow>=0.17.0',
|
||||||
|
5
setup.sh
5
setup.sh
@ -25,7 +25,7 @@ function check_installed_python() {
|
|||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for v in 9 8 7
|
for v in 9 10 8 7
|
||||||
do
|
do
|
||||||
PYTHON="python3.${v}"
|
PYTHON="python3.${v}"
|
||||||
which $PYTHON
|
which $PYTHON
|
||||||
@ -37,7 +37,6 @@ function check_installed_python() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
echo "No usable python found. Please make sure to have python3.7 or newer installed."
|
echo "No usable python found. Please make sure to have python3.7 or newer installed."
|
||||||
echo "python3.10 is currently not supported."
|
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +219,7 @@ function install() {
|
|||||||
install_redhat
|
install_redhat
|
||||||
else
|
else
|
||||||
echo "This script does not support your OS."
|
echo "This script does not support your OS."
|
||||||
echo "If you have Python version 3.7 - 3.9, pip, virtualenv, ta-lib you can continue."
|
echo "If you have Python version 3.7 - 3.10, pip, virtualenv, ta-lib you can continue."
|
||||||
echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell."
|
echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell."
|
||||||
sleep 10
|
sleep 10
|
||||||
fi
|
fi
|
||||||
|
@ -4,7 +4,6 @@ import logging
|
|||||||
import re
|
import re
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from functools import reduce
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock, Mock, PropertyMock
|
from unittest.mock import MagicMock, Mock, PropertyMock
|
||||||
|
|
||||||
@ -50,17 +49,23 @@ def pytest_configure(config):
|
|||||||
|
|
||||||
|
|
||||||
def log_has(line, logs):
|
def log_has(line, logs):
|
||||||
# caplog mocker returns log as a tuple: ('freqtrade.something', logging.WARNING, 'foobar')
|
"""Check if line is found on some caplog's message."""
|
||||||
# and we want to match line against foobar in the tuple
|
return any(line == message for message in logs.messages)
|
||||||
return reduce(lambda a, b: a or b,
|
|
||||||
filter(lambda x: x[2] == line, logs.record_tuples),
|
|
||||||
False)
|
|
||||||
|
|
||||||
|
|
||||||
def log_has_re(line, logs):
|
def log_has_re(line, logs):
|
||||||
return reduce(lambda a, b: a or b,
|
"""Check if line matches some caplog's message."""
|
||||||
filter(lambda x: re.match(line, x[2]), logs.record_tuples),
|
return any(re.match(line, message) for message in logs.messages)
|
||||||
False)
|
|
||||||
|
|
||||||
|
def num_log_has(line, logs):
|
||||||
|
"""Check how many times line is found in caplog's messages."""
|
||||||
|
return sum(line == message for message in logs.messages)
|
||||||
|
|
||||||
|
|
||||||
|
def num_log_has_re(line, logs):
|
||||||
|
"""Check how many times line matches caplog's messages."""
|
||||||
|
return sum(bool(re.match(line, message)) for message in logs.messages)
|
||||||
|
|
||||||
|
|
||||||
def get_args(args):
|
def get_args(args):
|
||||||
@ -2015,7 +2020,7 @@ def saved_hyperopt_results():
|
|||||||
'params_dict': {
|
'params_dict': {
|
||||||
'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1190, 'roi_t2': 541, 'roi_t3': 408, 'roi_p1': 0.026035863879169705, 'roi_p2': 0.12508730043628782, 'roi_p3': 0.27766427921605896, 'stoploss': -0.2562930402099556}, # noqa: E501
|
'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1190, 'roi_t2': 541, 'roi_t3': 408, 'roi_p1': 0.026035863879169705, 'roi_p2': 0.12508730043628782, 'roi_p3': 0.27766427921605896, 'stoploss': -0.2562930402099556}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4287874435315165, 408: 0.15112316431545753, 949: 0.026035863879169705, 2139: 0}, 'stoploss': {'stoploss': -0.2562930402099556}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4287874435315165, 408: 0.15112316431545753, 949: 0.026035863879169705, 2139: 0}, 'stoploss': {'stoploss': -0.2562930402099556}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 2, 'wins': 0, 'draws': 0, 'losses': 2, 'profit_mean': -0.01254995, 'profit_median': -0.012222, 'profit_total': -0.00125625, 'profit_total_abs': -2.50999, 'holding_avg': timedelta(minutes=3930.0), 'stake_currency': 'BTC', 'strategy_name': 'SampleStrategy'}, # noqa: E501
|
'results_metrics': {'total_trades': 2, 'wins': 0, 'draws': 0, 'losses': 2, 'profit_mean': -0.01254995, 'profit_median': -0.012222, 'profit_total': -0.00125625, 'profit_total_abs': -2.50999, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=3930.0), 'stake_currency': 'BTC', 'strategy_name': 'SampleStrategy'}, # noqa: E501
|
||||||
'results_explanation': ' 2 trades. Avg profit -1.25%. Total profit -0.00125625 BTC ( -2.51Σ%). Avg duration 3930.0 min.', # noqa: E501
|
'results_explanation': ' 2 trades. Avg profit -1.25%. Total profit -0.00125625 BTC ( -2.51Σ%). Avg duration 3930.0 min.', # noqa: E501
|
||||||
'total_profit': -0.00125625,
|
'total_profit': -0.00125625,
|
||||||
'current_epoch': 1,
|
'current_epoch': 1,
|
||||||
@ -2031,7 +2036,7 @@ def saved_hyperopt_results():
|
|||||||
'sell': {'sell-mfi-value': 96, 'sell-fastd-value': 68, 'sell-adx-value': 63, 'sell-rsi-value': 81, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, # noqa: E501
|
'sell': {'sell-mfi-value': 96, 'sell-fastd-value': 68, 'sell-adx-value': 63, 'sell-rsi-value': 81, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, # noqa: E501
|
||||||
'roi': {0: 0.4449309386008759, 140: 0.11955965746663, 823: 0.06403981740598495, 1157: 0}, # noqa: E501
|
'roi': {0: 0.4449309386008759, 140: 0.11955965746663, 823: 0.06403981740598495, 1157: 0}, # noqa: E501
|
||||||
'stoploss': {'stoploss': -0.338070047333259}},
|
'stoploss': {'stoploss': -0.338070047333259}},
|
||||||
'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 0, 'losses': 1, 'profit_mean': 0.012357, 'profit_median': -0.012222, 'profit_total': 6.185e-05, 'profit_total_abs': 0.12357, 'holding_avg': timedelta(minutes=1200.0)}, # noqa: E501
|
'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 0, 'losses': 1, 'profit_mean': 0.012357, 'profit_median': -0.012222, 'profit_total': 6.185e-05, 'profit_total_abs': 0.12357, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=1200.0)}, # noqa: E501
|
||||||
'results_explanation': ' 1 trades. Avg profit 0.12%. Total profit 0.00006185 BTC ( 0.12Σ%). Avg duration 1200.0 min.', # noqa: E501
|
'results_explanation': ' 1 trades. Avg profit 0.12%. Total profit 0.00006185 BTC ( 0.12Σ%). Avg duration 1200.0 min.', # noqa: E501
|
||||||
'total_profit': 6.185e-05,
|
'total_profit': 6.185e-05,
|
||||||
'current_epoch': 2,
|
'current_epoch': 2,
|
||||||
@ -2041,7 +2046,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 14.241196856510731,
|
'loss': 14.241196856510731,
|
||||||
'params_dict': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 889, 'roi_t2': 533, 'roi_t3': 263, 'roi_p1': 0.04759065393663096, 'roi_p2': 0.1488819964638463, 'roi_p3': 0.4102801822104605, 'stoploss': -0.05394588767607611}, # noqa: E501
|
'params_dict': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 889, 'roi_t2': 533, 'roi_t3': 263, 'roi_p1': 0.04759065393663096, 'roi_p2': 0.1488819964638463, 'roi_p3': 0.4102801822104605, 'stoploss': -0.05394588767607611}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.6067528326109377, 263: 0.19647265040047726, 796: 0.04759065393663096, 1685: 0}, 'stoploss': {'stoploss': -0.05394588767607611}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.6067528326109377, 263: 0.19647265040047726, 796: 0.04759065393663096, 1685: 0}, 'stoploss': {'stoploss': -0.05394588767607611}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 621, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501
|
'results_metrics': {'total_trades': 621, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'max_drawdown': 0.25, 'max_drawdown_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501
|
||||||
'results_explanation': ' 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.', # noqa: E501
|
'results_explanation': ' 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.', # noqa: E501
|
||||||
'total_profit': -0.13639474,
|
'total_profit': -0.13639474,
|
||||||
'current_epoch': 3,
|
'current_epoch': 3,
|
||||||
@ -2058,7 +2063,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 0.22195522184191518,
|
'loss': 0.22195522184191518,
|
||||||
'params_dict': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 1269, 'roi_t2': 601, 'roi_t3': 444, 'roi_p1': 0.07280999507931168, 'roi_p2': 0.08946698095898986, 'roi_p3': 0.1454876733325284, 'stoploss': -0.18181041180901014}, # noqa: E501
|
'params_dict': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 1269, 'roi_t2': 601, 'roi_t3': 444, 'roi_p1': 0.07280999507931168, 'roi_p2': 0.08946698095898986, 'roi_p3': 0.1454876733325284, 'stoploss': -0.18181041180901014}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3077646493708299, 444: 0.16227697603830155, 1045: 0.07280999507931168, 2314: 0}, 'stoploss': {'stoploss': -0.18181041180901014}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3077646493708299, 444: 0.16227697603830155, 1045: 0.07280999507931168, 2314: 0}, 'stoploss': {'stoploss': -0.18181041180901014}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 14, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501
|
'results_metrics': {'total_trades': 14, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'max_drawdown': 0.34, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501
|
||||||
'results_explanation': ' 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.', # noqa: E501
|
'results_explanation': ' 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.', # noqa: E501
|
||||||
'total_profit': -0.002480140000000001,
|
'total_profit': -0.002480140000000001,
|
||||||
'current_epoch': 5,
|
'current_epoch': 5,
|
||||||
@ -2068,7 +2073,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 0.545315889154162,
|
'loss': 0.545315889154162,
|
||||||
'params_dict': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower', 'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 319, 'roi_t2': 556, 'roi_t3': 216, 'roi_p1': 0.06251955472249589, 'roi_p2': 0.11659519602202795, 'roi_p3': 0.0953744132197762, 'stoploss': -0.024551752215582423}, # noqa: E501
|
'params_dict': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower', 'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 319, 'roi_t2': 556, 'roi_t3': 216, 'roi_p1': 0.06251955472249589, 'roi_p2': 0.11659519602202795, 'roi_p3': 0.0953744132197762, 'stoploss': -0.024551752215582423}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.2744891639643, 216: 0.17911475074452382, 772: 0.06251955472249589, 1091: 0}, 'stoploss': {'stoploss': -0.024551752215582423}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.2744891639643, 216: 0.17911475074452382, 772: 0.06251955472249589, 1091: 0}, 'stoploss': {'stoploss': -0.024551752215582423}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 39, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501
|
'results_metrics': {'total_trades': 39, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'max_drawdown': 0.45, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501
|
||||||
'results_explanation': ' 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.', # noqa: E501
|
'results_explanation': ' 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.', # noqa: E501
|
||||||
'total_profit': -0.0041773,
|
'total_profit': -0.0041773,
|
||||||
'current_epoch': 6,
|
'current_epoch': 6,
|
||||||
@ -2080,7 +2085,7 @@ def saved_hyperopt_results():
|
|||||||
'params_details': {
|
'params_details': {
|
||||||
'buy': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.4837436938134452, 145: 0.10853310701097472, 765: 0.0586919200378493, 1536: 0}, # noqa: E501
|
'buy': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.4837436938134452, 145: 0.10853310701097472, 765: 0.0586919200378493, 1536: 0}, # noqa: E501
|
||||||
'stoploss': {'stoploss': -0.14613268022709905}}, # noqa: E501
|
'stoploss': {'stoploss': -0.14613268022709905}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 318, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501
|
'results_metrics': {'total_trades': 318, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'max_drawdown': 0.50, 'max_drawdown_abs': -200.955321, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501
|
||||||
'results_explanation': ' 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.', # noqa: E501
|
'results_explanation': ' 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.', # noqa: E501
|
||||||
'total_profit': -0.06339929,
|
'total_profit': -0.06339929,
|
||||||
'current_epoch': 7,
|
'current_epoch': 7,
|
||||||
@ -2090,7 +2095,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 20.0, # noqa: E501
|
'loss': 20.0, # noqa: E501
|
||||||
'params_dict': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal', 'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 1149, 'roi_t2': 375, 'roi_t3': 289, 'roi_p1': 0.05571820757172588, 'roi_p2': 0.0606240398618907, 'roi_p3': 0.1729012220156157, 'stoploss': -0.1588514289110401}, # noqa: E501
|
'params_dict': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal', 'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 1149, 'roi_t2': 375, 'roi_t3': 289, 'roi_p1': 0.05571820757172588, 'roi_p2': 0.0606240398618907, 'roi_p3': 0.1729012220156157, 'stoploss': -0.1588514289110401}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.2892434694492323, 289: 0.11634224743361658, 664: 0.05571820757172588, 1813: 0}, 'stoploss': {'stoploss': -0.1588514289110401}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.2892434694492323, 289: 0.11634224743361658, 664: 0.05571820757172588, 1813: 0}, 'stoploss': {'stoploss': -0.1588514289110401}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501
|
'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.52, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501
|
||||||
'results_explanation': ' 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.', # noqa: E501
|
'results_explanation': ' 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.', # noqa: E501
|
||||||
'total_profit': 0.0,
|
'total_profit': 0.0,
|
||||||
'current_epoch': 8,
|
'current_epoch': 8,
|
||||||
@ -2100,7 +2105,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 2.4731817780991223,
|
'loss': 2.4731817780991223,
|
||||||
'params_dict': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1012, 'roi_t2': 584, 'roi_t3': 422, 'roi_p1': 0.036764323603472565, 'roi_p2': 0.10335480573205287, 'roi_p3': 0.10322347377503042, 'stoploss': -0.2780610808108503}, # noqa: E501
|
'params_dict': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1012, 'roi_t2': 584, 'roi_t3': 422, 'roi_p1': 0.036764323603472565, 'roi_p2': 0.10335480573205287, 'roi_p3': 0.10322347377503042, 'stoploss': -0.2780610808108503}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.2433426031105559, 422: 0.14011912933552545, 1006: 0.036764323603472565, 2018: 0}, 'stoploss': {'stoploss': -0.2780610808108503}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.2433426031105559, 422: 0.14011912933552545, 1006: 0.036764323603472565, 2018: 0}, 'stoploss': {'stoploss': -0.2780610808108503}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 229, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501
|
'results_metrics': {'total_trades': 229, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'max_drawdown': 0.41, 'max_drawdown_abs': -150.955321, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501
|
||||||
'results_explanation': ' 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.', # noqa: E501
|
'results_explanation': ' 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.', # noqa: E501
|
||||||
'total_profit': -0.044050070000000004, # noqa: E501
|
'total_profit': -0.044050070000000004, # noqa: E501
|
||||||
'current_epoch': 9,
|
'current_epoch': 9,
|
||||||
@ -2110,7 +2115,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': -0.2604606005845212, # noqa: E501
|
'loss': -0.2604606005845212, # noqa: E501
|
||||||
'params_dict': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 792, 'roi_t2': 464, 'roi_t3': 215, 'roi_p1': 0.04594053535385903, 'roi_p2': 0.09623192684243963, 'roi_p3': 0.04428219070850663, 'stoploss': -0.16992287161634415}, # noqa: E501
|
'params_dict': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 792, 'roi_t2': 464, 'roi_t3': 215, 'roi_p1': 0.04594053535385903, 'roi_p2': 0.09623192684243963, 'roi_p3': 0.04428219070850663, 'stoploss': -0.16992287161634415}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.18645465290480528, 215: 0.14217246219629864, 679: 0.04594053535385903, 1471: 0}, 'stoploss': {'stoploss': -0.16992287161634415}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.18645465290480528, 215: 0.14217246219629864, 679: 0.04594053535385903, 1471: 0}, 'stoploss': {'stoploss': -0.16992287161634415}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 4, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501
|
'results_metrics': {'total_trades': 4, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'max_drawdown': 0.13, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501
|
||||||
'results_explanation': ' 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.', # noqa: E501
|
'results_explanation': ' 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.', # noqa: E501
|
||||||
'total_profit': 0.00021629,
|
'total_profit': 0.00021629,
|
||||||
'current_epoch': 10,
|
'current_epoch': 10,
|
||||||
@ -2121,7 +2126,7 @@ def saved_hyperopt_results():
|
|||||||
'params_dict': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 579, 'roi_t2': 614, 'roi_t3': 273, 'roi_p1': 0.05307643172744114, 'roi_p2': 0.1352282078262871, 'roi_p3': 0.1913307406325751, 'stoploss': -0.25728526022513887}, # noqa: E501
|
'params_dict': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 579, 'roi_t2': 614, 'roi_t3': 273, 'roi_p1': 0.05307643172744114, 'roi_p2': 0.1352282078262871, 'roi_p3': 0.1913307406325751, 'stoploss': -0.25728526022513887}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3796353801863034, 273: 0.18830463955372825, 887: 0.05307643172744114, 1466: 0}, 'stoploss': {'stoploss': -0.25728526022513887}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3796353801863034, 273: 0.18830463955372825, 887: 0.05307643172744114, 1466: 0}, 'stoploss': {'stoploss': -0.25728526022513887}}, # noqa: E501
|
||||||
# New Hyperopt mode!
|
# New Hyperopt mode!
|
||||||
'results_metrics': {'total_trades': 117, 'wins': 67, 'draws': 0, 'losses': 50, 'profit_mean': -0.012698609145299145, 'profit_median': -0.012222, 'profit_total': -0.07436117, 'profit_total_abs': -148.573727, 'holding_avg': timedelta(minutes=4282.5641025641025)}, # noqa: E501
|
'results_metrics': {'total_trades': 117, 'wins': 67, 'draws': 0, 'losses': 50, 'profit_mean': -0.012698609145299145, 'profit_median': -0.012222, 'profit_total': -0.07436117, 'profit_total_abs': -148.573727, 'max_drawdown': 0.52, 'max_drawdown_abs': -224.955321, 'holding_avg': timedelta(minutes=4282.5641025641025)}, # noqa: E501
|
||||||
'results_explanation': ' 117 trades. Avg profit -1.27%. Total profit -0.07436117 BTC (-148.57Σ%). Avg duration 4282.6 min.', # noqa: E501
|
'results_explanation': ' 117 trades. Avg profit -1.27%. Total profit -0.07436117 BTC (-148.57Σ%). Avg duration 4282.6 min.', # noqa: E501
|
||||||
'total_profit': -0.07436117,
|
'total_profit': -0.07436117,
|
||||||
'current_epoch': 11,
|
'current_epoch': 11,
|
||||||
@ -2131,7 +2136,7 @@ def saved_hyperopt_results():
|
|||||||
'loss': 100000,
|
'loss': 100000,
|
||||||
'params_dict': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1156, 'roi_t2': 581, 'roi_t3': 408, 'roi_p1': 0.06860454019988212, 'roi_p2': 0.12473718444931989, 'roi_p3': 0.2896360635226823, 'stoploss': -0.30889015124682806}, # noqa: E501
|
'params_dict': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1156, 'roi_t2': 581, 'roi_t3': 408, 'roi_p1': 0.06860454019988212, 'roi_p2': 0.12473718444931989, 'roi_p3': 0.2896360635226823, 'stoploss': -0.30889015124682806}, # noqa: E501
|
||||||
'params_details': {'buy': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4829777881718843, 408: 0.19334172464920202, 989: 0.06860454019988212, 2145: 0}, 'stoploss': {'stoploss': -0.30889015124682806}}, # noqa: E501
|
'params_details': {'buy': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4829777881718843, 408: 0.19334172464920202, 989: 0.06860454019988212, 2145: 0}, 'stoploss': {'stoploss': -0.30889015124682806}}, # noqa: E501
|
||||||
'results_metrics': {'total_trades': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit_total_abs': 0.0, 'holding_avg': timedelta()}, # noqa: E501
|
'results_metrics': {'total_trades': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.0, 'holding_avg': timedelta()}, # noqa: E501
|
||||||
'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501
|
'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501
|
||||||
'total_profit': 0,
|
'total_profit': 0,
|
||||||
'current_epoch': 12,
|
'current_epoch': 12,
|
||||||
|
@ -8,14 +8,14 @@ from pandas import DataFrame, DateOffset, Timestamp, to_datetime
|
|||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||||
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, BT_DATA_COLUMNS_MID, BT_DATA_COLUMNS_OLD,
|
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism, calculate_csum,
|
||||||
analyze_trade_parallelism, calculate_csum,
|
|
||||||
calculate_market_change, calculate_max_drawdown,
|
calculate_market_change, calculate_max_drawdown,
|
||||||
combine_dataframes_with_mean, create_cum_profit,
|
calculate_underwater, combine_dataframes_with_mean,
|
||||||
extract_trades_of_period, get_latest_backtest_filename,
|
create_cum_profit, extract_trades_of_period,
|
||||||
get_latest_hyperopt_file, load_backtest_data, load_trades,
|
get_latest_backtest_filename, get_latest_hyperopt_file,
|
||||||
load_trades_from_db)
|
load_backtest_data, load_trades, load_trades_from_db)
|
||||||
from freqtrade.data.history import load_data, load_pair_history
|
from freqtrade.data.history import load_data, load_pair_history
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
from tests.conftest import create_mock_trades
|
from tests.conftest import create_mock_trades
|
||||||
from tests.conftest_trades import MOCK_TRADE_COUNT
|
from tests.conftest_trades import MOCK_TRADE_COUNT
|
||||||
|
|
||||||
@ -51,20 +51,14 @@ def test_get_latest_hyperopt_file(testdatadir, mocker):
|
|||||||
assert res == testdatadir.parent / "hyperopt_results.pickle"
|
assert res == testdatadir.parent / "hyperopt_results.pickle"
|
||||||
|
|
||||||
|
|
||||||
def test_load_backtest_data_old_format(testdatadir):
|
def test_load_backtest_data_old_format(testdatadir, mocker):
|
||||||
|
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_test222.json"
|
||||||
bt_data = load_backtest_data(filename)
|
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
|
||||||
assert isinstance(bt_data, DataFrame)
|
|
||||||
assert list(bt_data.columns) == BT_DATA_COLUMNS_OLD + ['profit_abs', 'profit_ratio']
|
|
||||||
assert len(bt_data) == 179
|
|
||||||
|
|
||||||
# Test loading from string (must yield same result)
|
with pytest.raises(OperationalException,
|
||||||
bt_data2 = load_backtest_data(str(filename))
|
match=r"Backtest-results with only trades data are no longer supported."):
|
||||||
assert bt_data.equals(bt_data2)
|
load_backtest_data(filename)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match=r"File .* does not exist\."):
|
|
||||||
load_backtest_data(str("filename") + "nofile")
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_backtest_data_new_format(testdatadir):
|
def test_load_backtest_data_new_format(testdatadir):
|
||||||
@ -72,7 +66,7 @@ def test_load_backtest_data_new_format(testdatadir):
|
|||||||
filename = testdatadir / "backtest-result_new.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
assert isinstance(bt_data, DataFrame)
|
assert isinstance(bt_data, DataFrame)
|
||||||
assert set(bt_data.columns) == set(BT_DATA_COLUMNS_MID)
|
assert set(bt_data.columns) == set(BT_DATA_COLUMNS + ['close_timestamp', 'open_timestamp'])
|
||||||
assert len(bt_data) == 179
|
assert len(bt_data) == 179
|
||||||
|
|
||||||
# Test loading from string (must yield same result)
|
# Test loading from string (must yield same result)
|
||||||
@ -96,7 +90,7 @@ def test_load_backtest_data_multi(testdatadir):
|
|||||||
for strategy in ('StrategyTestV2', 'TestStrategy'):
|
for strategy in ('StrategyTestV2', 'TestStrategy'):
|
||||||
bt_data = load_backtest_data(filename, strategy=strategy)
|
bt_data = load_backtest_data(filename, strategy=strategy)
|
||||||
assert isinstance(bt_data, DataFrame)
|
assert isinstance(bt_data, DataFrame)
|
||||||
assert set(bt_data.columns) == set(BT_DATA_COLUMNS_MID)
|
assert set(bt_data.columns) == set(BT_DATA_COLUMNS + ['close_timestamp', 'open_timestamp'])
|
||||||
assert len(bt_data) == 179
|
assert len(bt_data) == 179
|
||||||
|
|
||||||
# Test loading from string (must yield same result)
|
# Test loading from string (must yield same result)
|
||||||
@ -167,8 +161,8 @@ def test_extract_trades_of_period(testdatadir):
|
|||||||
assert trades1.iloc[-1].close_date == Arrow(2017, 11, 14, 15, 25, 0).datetime
|
assert trades1.iloc[-1].close_date == Arrow(2017, 11, 14, 15, 25, 0).datetime
|
||||||
|
|
||||||
|
|
||||||
def test_analyze_trade_parallelism(default_conf, mocker, testdatadir):
|
def test_analyze_trade_parallelism(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
|
|
||||||
res = analyze_trade_parallelism(bt_data, "5m")
|
res = analyze_trade_parallelism(bt_data, "5m")
|
||||||
@ -234,8 +228,15 @@ def test_combine_dataframes_with_mean(testdatadir):
|
|||||||
assert "mean" in df.columns
|
assert "mean" in df.columns
|
||||||
|
|
||||||
|
|
||||||
|
def test_combine_dataframes_with_mean_no_data(testdatadir):
|
||||||
|
pairs = ["ETH/BTC", "ADA/BTC"]
|
||||||
|
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='6m')
|
||||||
|
with pytest.raises(ValueError, match=r"No objects to concatenate"):
|
||||||
|
combine_dataframes_with_mean(data)
|
||||||
|
|
||||||
|
|
||||||
def test_create_cum_profit(testdatadir):
|
def test_create_cum_profit(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180112")
|
timerange = TimeRange.parse_timerange("20180110-20180112")
|
||||||
|
|
||||||
@ -251,7 +252,7 @@ def test_create_cum_profit(testdatadir):
|
|||||||
|
|
||||||
|
|
||||||
def test_create_cum_profit1(testdatadir):
|
def test_create_cum_profit1(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
# Move close-time to "off" the candle, to make sure the logic still works
|
# Move close-time to "off" the candle, to make sure the logic still works
|
||||||
bt_data.loc[:, 'close_date'] = bt_data.loc[:, 'close_date'] + DateOffset(seconds=20)
|
bt_data.loc[:, 'close_date'] = bt_data.loc[:, 'close_date'] + DateOffset(seconds=20)
|
||||||
@ -273,23 +274,31 @@ def test_create_cum_profit1(testdatadir):
|
|||||||
|
|
||||||
|
|
||||||
def test_calculate_max_drawdown(testdatadir):
|
def test_calculate_max_drawdown(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
drawdown, hdate, lowdate, hval, lval = calculate_max_drawdown(bt_data)
|
_, hdate, lowdate, hval, lval, drawdown = calculate_max_drawdown(
|
||||||
|
bt_data, value_col="profit_abs")
|
||||||
assert isinstance(drawdown, float)
|
assert isinstance(drawdown, float)
|
||||||
assert pytest.approx(drawdown) == 0.21142322
|
assert pytest.approx(drawdown) == 0.12071099
|
||||||
assert isinstance(hdate, Timestamp)
|
assert isinstance(hdate, Timestamp)
|
||||||
assert isinstance(lowdate, Timestamp)
|
assert isinstance(lowdate, Timestamp)
|
||||||
assert isinstance(hval, float)
|
assert isinstance(hval, float)
|
||||||
assert isinstance(lval, float)
|
assert isinstance(lval, float)
|
||||||
assert hdate == Timestamp('2018-01-24 14:25:00', tz='UTC')
|
assert hdate == Timestamp('2018-01-25 01:30:00', tz='UTC')
|
||||||
assert lowdate == Timestamp('2018-01-30 04:45:00', tz='UTC')
|
assert lowdate == Timestamp('2018-01-25 03:50:00', tz='UTC')
|
||||||
|
|
||||||
|
underwater = calculate_underwater(bt_data)
|
||||||
|
assert isinstance(underwater, DataFrame)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match='Trade dataframe empty.'):
|
with pytest.raises(ValueError, match='Trade dataframe empty.'):
|
||||||
drawdown, hdate, lowdate, hval, lval = calculate_max_drawdown(DataFrame())
|
calculate_max_drawdown(DataFrame())
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match='Trade dataframe empty.'):
|
||||||
|
calculate_underwater(DataFrame())
|
||||||
|
|
||||||
|
|
||||||
def test_calculate_csum(testdatadir):
|
def test_calculate_csum(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
csum_min, csum_max = calculate_csum(bt_data)
|
csum_min, csum_max = calculate_csum(bt_data)
|
||||||
|
|
||||||
@ -317,12 +326,13 @@ def test_calculate_max_drawdown2():
|
|||||||
# sort by profit and reset index
|
# sort by profit and reset index
|
||||||
df = df.sort_values('profit').reset_index(drop=True)
|
df = df.sort_values('profit').reset_index(drop=True)
|
||||||
df1 = df.copy()
|
df1 = df.copy()
|
||||||
drawdown, hdate, ldate, hval, lval = calculate_max_drawdown(
|
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
|
||||||
df, date_col='open_date', value_col='profit')
|
df, date_col='open_date', value_col='profit')
|
||||||
# Ensure df has not been altered.
|
# Ensure df has not been altered.
|
||||||
assert df.equals(df1)
|
assert df.equals(df1)
|
||||||
|
|
||||||
assert isinstance(drawdown, float)
|
assert isinstance(drawdown, float)
|
||||||
|
assert isinstance(drawdown_rel, float)
|
||||||
# High must be before low
|
# High must be before low
|
||||||
assert hdate < ldate
|
assert hdate < ldate
|
||||||
# High value must be higher than low value
|
# High value must be higher than low value
|
||||||
|
@ -311,7 +311,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
|||||||
assert td != len(data['UNITTEST/BTC'])
|
assert td != len(data['UNITTEST/BTC'])
|
||||||
start_real = data['UNITTEST/BTC'].iloc[0, 0]
|
start_real = data['UNITTEST/BTC'].iloc[0, 0]
|
||||||
assert log_has(f'Missing data at start for pair '
|
assert log_has(f'Missing data at start for pair '
|
||||||
f'UNITTEST/BTC, data starts at {start_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
f'UNITTEST/BTC at 5m, data starts at {start_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
||||||
caplog)
|
caplog)
|
||||||
# Make sure we start fresh - test missing data at end
|
# Make sure we start fresh - test missing data at end
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
@ -326,7 +326,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
|||||||
# Shift endtime with +5 - as last candle is dropped (partial candle)
|
# Shift endtime with +5 - as last candle is dropped (partial candle)
|
||||||
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5)
|
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5)
|
||||||
assert log_has(f'Missing data at end for pair '
|
assert log_has(f'Missing data at end for pair '
|
||||||
f'UNITTEST/BTC, data ends at {end_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
f'UNITTEST/BTC at 5m, data ends at {end_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
||||||
caplog)
|
caplog)
|
||||||
|
|
||||||
|
|
||||||
|
47
tests/exchange/test_bitpanda.py
Normal file
47
tests/exchange/test_bitpanda.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
from datetime import datetime
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
from tests.conftest import get_patched_exchange
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_trades_for_order(default_conf, mocker):
|
||||||
|
exchange_name = 'bitpanda'
|
||||||
|
order_id = 'ABCD-ABCD'
|
||||||
|
since = datetime(2018, 5, 5, 0, 0, 0)
|
||||||
|
default_conf["dry_run"] = False
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.exchange_has', return_value=True)
|
||||||
|
api_mock = MagicMock()
|
||||||
|
|
||||||
|
api_mock.fetch_my_trades = MagicMock(return_value=[{'id': 'TTR67E-3PFBD-76IISV',
|
||||||
|
'order': 'ABCD-ABCD',
|
||||||
|
'info': {'pair': 'XLTCZBTC',
|
||||||
|
'time': 1519860024.4388,
|
||||||
|
'type': 'buy',
|
||||||
|
'ordertype': 'limit',
|
||||||
|
'price': '20.00000',
|
||||||
|
'cost': '38.62000',
|
||||||
|
'fee': '0.06179',
|
||||||
|
'vol': '5',
|
||||||
|
'id': 'ABCD-ABCD'},
|
||||||
|
'timestamp': 1519860024438,
|
||||||
|
'datetime': '2018-02-28T23:20:24.438Z',
|
||||||
|
'symbol': 'LTC/BTC',
|
||||||
|
'type': 'limit',
|
||||||
|
'side': 'buy',
|
||||||
|
'price': 165.0,
|
||||||
|
'amount': 0.2340606,
|
||||||
|
'fee': {'cost': 0.06179, 'currency': 'BTC'}
|
||||||
|
}])
|
||||||
|
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
|
||||||
|
|
||||||
|
orders = exchange.get_trades_for_order(order_id, 'LTC/BTC', since)
|
||||||
|
assert len(orders) == 1
|
||||||
|
assert orders[0]['price'] == 165
|
||||||
|
assert api_mock.fetch_my_trades.call_count == 1
|
||||||
|
# since argument should be
|
||||||
|
assert isinstance(api_mock.fetch_my_trades.call_args[0][1], int)
|
||||||
|
assert api_mock.fetch_my_trades.call_args[0][0] == 'LTC/BTC'
|
||||||
|
# Same test twice, hardcoded number and doing the same calculation
|
||||||
|
assert api_mock.fetch_my_trades.call_args[0][1] == 1525478395000
|
||||||
|
# bitpanda requires "to" argument.
|
||||||
|
assert 'to' in api_mock.fetch_my_trades.call_args[1]['params']
|
@ -19,36 +19,49 @@ from tests.conftest import get_default_conf
|
|||||||
EXCHANGES = {
|
EXCHANGES = {
|
||||||
'bittrex': {
|
'bittrex': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': False,
|
'hasQuoteVolume': False,
|
||||||
'timeframe': '1h',
|
'timeframe': '1h',
|
||||||
},
|
},
|
||||||
'binance': {
|
'binance': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
'kraken': {
|
'kraken': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
'ftx': {
|
'ftx': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
'kucoin': {
|
'kucoin': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
'gateio': {
|
'gateio': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
'okex': {
|
'okex': {
|
||||||
'pair': 'BTC/USDT',
|
'pair': 'BTC/USDT',
|
||||||
|
'stake_currency': 'USDT',
|
||||||
|
'hasQuoteVolume': True,
|
||||||
|
'timeframe': '5m',
|
||||||
|
},
|
||||||
|
'bitvavo': {
|
||||||
|
'pair': 'BTC/EUR',
|
||||||
|
'stake_currency': 'EUR',
|
||||||
'hasQuoteVolume': True,
|
'hasQuoteVolume': True,
|
||||||
'timeframe': '5m',
|
'timeframe': '5m',
|
||||||
},
|
},
|
||||||
@ -68,6 +81,7 @@ def exchange_conf():
|
|||||||
@pytest.fixture(params=EXCHANGES, scope="class")
|
@pytest.fixture(params=EXCHANGES, scope="class")
|
||||||
def exchange(request, exchange_conf):
|
def exchange(request, exchange_conf):
|
||||||
exchange_conf['exchange']['name'] = request.param
|
exchange_conf['exchange']['name'] = request.param
|
||||||
|
exchange_conf['stake_currency'] = EXCHANGES[request.param]['stake_currency']
|
||||||
exchange = ExchangeResolver.load_exchange(request.param, exchange_conf, validate=True)
|
exchange = ExchangeResolver.load_exchange(request.param, exchange_conf, validate=True)
|
||||||
|
|
||||||
yield exchange, request.param
|
yield exchange, request.param
|
||||||
|
@ -20,7 +20,7 @@ from freqtrade.exchange.exchange import (market_is_active, timeframe_to_minutes,
|
|||||||
timeframe_to_next_date, timeframe_to_prev_date,
|
timeframe_to_next_date, timeframe_to_prev_date,
|
||||||
timeframe_to_seconds)
|
timeframe_to_seconds)
|
||||||
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
|
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
|
||||||
from tests.conftest import get_mock_coro, get_patched_exchange, log_has, log_has_re
|
from tests.conftest import get_mock_coro, get_patched_exchange, log_has, log_has_re, num_log_has_re
|
||||||
|
|
||||||
|
|
||||||
# Make sure to always keep one exchange here which is NOT subclassed!!
|
# Make sure to always keep one exchange here which is NOT subclassed!!
|
||||||
@ -1740,6 +1740,44 @@ async def test__async_get_candle_history(default_conf, mocker, caplog, exchange_
|
|||||||
(arrow.utcnow().int_timestamp - 2000) * 1000)
|
(arrow.utcnow().int_timestamp - 2000) * 1000)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog):
|
||||||
|
caplog.set_level(logging.INFO)
|
||||||
|
api_mock = MagicMock()
|
||||||
|
api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(
|
||||||
|
"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?"
|
||||||
|
"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735"
|
||||||
|
"429 Too Many Requests" '{"code":"429000","msg":"Too Many Requests"}'))
|
||||||
|
exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kucoin")
|
||||||
|
|
||||||
|
msg = "Kucoin 429 error, avoid triggering DDosProtection backoff delay"
|
||||||
|
assert not num_log_has_re(msg, caplog)
|
||||||
|
|
||||||
|
for _ in range(3):
|
||||||
|
with pytest.raises(DDosProtection, match=r'429 Too Many Requests'):
|
||||||
|
await exchange._async_get_candle_history(
|
||||||
|
"ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3)
|
||||||
|
assert num_log_has_re(msg, caplog) == 3
|
||||||
|
|
||||||
|
caplog.clear()
|
||||||
|
# Test regular non-kucoin message
|
||||||
|
api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(
|
||||||
|
"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?"
|
||||||
|
"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735"
|
||||||
|
"429 Too Many Requests" '{"code":"2222222","msg":"Too Many Requests"}'))
|
||||||
|
|
||||||
|
msg = r'_async_get_candle_history\(\) returned exception: .*'
|
||||||
|
msg2 = r'Applying DDosProtection backoff delay: .*'
|
||||||
|
with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)):
|
||||||
|
for _ in range(3):
|
||||||
|
with pytest.raises(DDosProtection, match=r'429 Too Many Requests'):
|
||||||
|
await exchange._async_get_candle_history(
|
||||||
|
"ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3)
|
||||||
|
# Expect the "returned exception" message 12 times (4 retries * 3 (loop))
|
||||||
|
assert num_log_has_re(msg, caplog) == 12
|
||||||
|
assert num_log_has_re(msg2, caplog) == 9
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test__async_get_candle_history_empty(default_conf, mocker, caplog):
|
async def test__async_get_candle_history_empty(default_conf, mocker, caplog):
|
||||||
""" Test empty exchange result """
|
""" Test empty exchange result """
|
||||||
|
@ -426,8 +426,6 @@ tc26 = BTContainer(data=[
|
|||||||
|
|
||||||
# Test 27: Sell with signal sell in candle 3 (ROI at signal candle)
|
# Test 27: Sell with signal sell in candle 3 (ROI at signal candle)
|
||||||
# Stoploss at 10% (irrelevant), ROI at 5% (will trigger) - Wins over Sell-signal
|
# Stoploss at 10% (irrelevant), ROI at 5% (will trigger) - Wins over Sell-signal
|
||||||
# TODO: figure out if sell-signal should win over ROI
|
|
||||||
# Sell-signal wins over stoploss
|
|
||||||
tc27 = BTContainer(data=[
|
tc27 = BTContainer(data=[
|
||||||
# D O H L C V B S
|
# D O H L C V B S
|
||||||
[0, 5000, 5025, 4975, 4987, 6172, 1, 0],
|
[0, 5000, 5025, 4975, 4987, 6172, 1, 0],
|
||||||
@ -436,8 +434,8 @@ tc27 = BTContainer(data=[
|
|||||||
[3, 5010, 5012, 4986, 5010, 6172, 0, 1], # sell-signal
|
[3, 5010, 5012, 4986, 5010, 6172, 0, 1], # sell-signal
|
||||||
[4, 5010, 5251, 4855, 4995, 6172, 0, 0], # Triggers ROI, sell-signal acted on
|
[4, 5010, 5251, 4855, 4995, 6172, 0, 0], # Triggers ROI, sell-signal acted on
|
||||||
[5, 4995, 4995, 4950, 4950, 6172, 0, 0]],
|
[5, 4995, 4995, 4950, 4950, 6172, 0, 0]],
|
||||||
stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.05, use_sell_signal=True,
|
stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.002, use_sell_signal=True,
|
||||||
trades=[BTrade(sell_reason=SellType.ROI, open_tick=1, close_tick=4)]
|
trades=[BTrade(sell_reason=SellType.SELL_SIGNAL, open_tick=1, close_tick=4)]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test 28: trailing_stop should raise so candle 3 causes a stoploss
|
# Test 28: trailing_stop should raise so candle 3 causes a stoploss
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
|
# pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
from copy import deepcopy
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock, PropertyMock
|
from unittest.mock import MagicMock, PropertyMock
|
||||||
@ -648,7 +649,7 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None:
|
|||||||
processed = backtesting.strategy.advise_all_indicators(data)
|
processed = backtesting.strategy.advise_all_indicators(data)
|
||||||
min_date, max_date = get_timerange(processed)
|
min_date, max_date = get_timerange(processed)
|
||||||
result = backtesting.backtest(
|
result = backtesting.backtest(
|
||||||
processed=processed,
|
processed=deepcopy(processed),
|
||||||
start_date=min_date,
|
start_date=min_date,
|
||||||
end_date=max_date,
|
end_date=max_date,
|
||||||
max_open_trades=10,
|
max_open_trades=10,
|
||||||
@ -887,7 +888,7 @@ def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir)
|
|||||||
processed = backtesting.strategy.advise_all_indicators(data)
|
processed = backtesting.strategy.advise_all_indicators(data)
|
||||||
min_date, max_date = get_timerange(processed)
|
min_date, max_date = get_timerange(processed)
|
||||||
backtest_conf = {
|
backtest_conf = {
|
||||||
'processed': processed,
|
'processed': deepcopy(processed),
|
||||||
'start_date': min_date,
|
'start_date': min_date,
|
||||||
'end_date': max_date,
|
'end_date': max_date,
|
||||||
'max_open_trades': 3,
|
'max_open_trades': 3,
|
||||||
@ -909,7 +910,7 @@ def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir)
|
|||||||
'NXT/BTC', '5m')[0]) == len(data['NXT/BTC']) - 1 - backtesting.strategy.startup_candle_count
|
'NXT/BTC', '5m')[0]) == len(data['NXT/BTC']) - 1 - backtesting.strategy.startup_candle_count
|
||||||
|
|
||||||
backtest_conf = {
|
backtest_conf = {
|
||||||
'processed': processed,
|
'processed': deepcopy(processed),
|
||||||
'start_date': min_date,
|
'start_date': min_date,
|
||||||
'end_date': max_date,
|
'end_date': max_date,
|
||||||
'max_open_trades': 1,
|
'max_open_trades': 1,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# pragma pylint: disable=missing-docstring,W0212,C0103
|
# pragma pylint: disable=missing-docstring,W0212,C0103
|
||||||
from datetime import datetime
|
from datetime import datetime, timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import ANY, MagicMock
|
from unittest.mock import ANY, MagicMock
|
||||||
|
|
||||||
@ -22,6 +22,29 @@ from tests.conftest import (get_args, log_has, log_has_re, patch_exchange,
|
|||||||
patched_configuration_load_config_file)
|
patched_configuration_load_config_file)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_result_metrics():
|
||||||
|
return {
|
||||||
|
'trade_count': 1,
|
||||||
|
'total_trades': 1,
|
||||||
|
'avg_profit': 0.1,
|
||||||
|
'total_profit': 0.001,
|
||||||
|
'profit': 0.01,
|
||||||
|
'duration': 20.0,
|
||||||
|
'wins': 1,
|
||||||
|
'draws': 0,
|
||||||
|
'losses': 0,
|
||||||
|
'profit_mean': 0.01,
|
||||||
|
'profit_total_abs': 0.001,
|
||||||
|
'profit_total': 0.01,
|
||||||
|
'holding_avg': timedelta(minutes=20),
|
||||||
|
'max_drawdown': 0.001,
|
||||||
|
'max_drawdown_abs': 0.001,
|
||||||
|
'loss': 0.001,
|
||||||
|
'is_initial_point': 0.001,
|
||||||
|
'is_best': 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, caplog) -> None:
|
def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, caplog) -> None:
|
||||||
patched_configuration_load_config_file(mocker, default_conf)
|
patched_configuration_load_config_file(mocker, default_conf)
|
||||||
|
|
||||||
@ -168,7 +191,8 @@ def test_start_no_hyperopt_allowed(mocker, hyperopt_conf, caplog) -> None:
|
|||||||
start_hyperopt(pargs)
|
start_hyperopt(pargs)
|
||||||
|
|
||||||
|
|
||||||
def test_start_no_data(mocker, hyperopt_conf) -> None:
|
def test_start_no_data(mocker, hyperopt_conf, tmpdir) -> None:
|
||||||
|
hyperopt_conf['user_data_dir'] = Path(tmpdir)
|
||||||
patched_configuration_load_config_file(mocker, hyperopt_conf)
|
patched_configuration_load_config_file(mocker, hyperopt_conf)
|
||||||
mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame))
|
mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame))
|
||||||
mocker.patch(
|
mocker.patch(
|
||||||
@ -177,7 +201,6 @@ def test_start_no_data(mocker, hyperopt_conf) -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
# TODO: migrate to strategy-based hyperopt
|
|
||||||
args = [
|
args = [
|
||||||
'hyperopt',
|
'hyperopt',
|
||||||
'--config', 'config.json',
|
'--config', 'config.json',
|
||||||
@ -189,6 +212,12 @@ def test_start_no_data(mocker, hyperopt_conf) -> None:
|
|||||||
with pytest.raises(OperationalException, match='No data found. Terminating.'):
|
with pytest.raises(OperationalException, match='No data found. Terminating.'):
|
||||||
start_hyperopt(pargs)
|
start_hyperopt(pargs)
|
||||||
|
|
||||||
|
# Cleanup since that failed hyperopt start leaves a lockfile.
|
||||||
|
try:
|
||||||
|
Path(Hyperopt.get_lock_filename(hyperopt_conf)).unlink()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def test_start_filelock(mocker, hyperopt_conf, caplog) -> None:
|
def test_start_filelock(mocker, hyperopt_conf, caplog) -> None:
|
||||||
hyperopt_mock = MagicMock(side_effect=Timeout(Hyperopt.get_lock_filename(hyperopt_conf)))
|
hyperopt_mock = MagicMock(side_effect=Timeout(Hyperopt.get_lock_filename(hyperopt_conf)))
|
||||||
@ -215,14 +244,7 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None:
|
|||||||
hyperopt.print_results(
|
hyperopt.print_results(
|
||||||
{
|
{
|
||||||
'loss': 1,
|
'loss': 1,
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
},
|
|
||||||
'total_profit': 0,
|
'total_profit': 0,
|
||||||
'current_epoch': 2, # This starts from 1 (in a human-friendly manner)
|
'current_epoch': 2, # This starts from 1 (in a human-friendly manner)
|
||||||
'is_initial_point': False,
|
'is_initial_point': False,
|
||||||
@ -231,7 +253,7 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None:
|
|||||||
)
|
)
|
||||||
out, err = capsys.readouterr()
|
out, err = capsys.readouterr()
|
||||||
assert all(x in out
|
assert all(x in out
|
||||||
for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "20.0 m"])
|
for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"])
|
||||||
|
|
||||||
|
|
||||||
def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None:
|
def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None:
|
||||||
@ -288,14 +310,7 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
MagicMock(return_value=[{
|
MagicMock(return_value=[{
|
||||||
'loss': 1, 'results_explanation': 'foo result',
|
'loss': 1, 'results_explanation': 'foo result',
|
||||||
'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0},
|
'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
},
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -352,7 +367,7 @@ def test_hyperopt_format_results(hyperopt):
|
|||||||
'backtest_start_time': 1619718665,
|
'backtest_start_time': 1619718665,
|
||||||
'backtest_end_time': 1619718665,
|
'backtest_end_time': 1619718665,
|
||||||
}
|
}
|
||||||
results_metrics = generate_strategy_stats({'XRP/BTC': None}, '', bt_result,
|
results_metrics = generate_strategy_stats(['XRP/BTC'], '', bt_result,
|
||||||
Arrow(2017, 11, 14, 19, 32, 00),
|
Arrow(2017, 11, 14, 19, 32, 00),
|
||||||
Arrow(2017, 12, 14, 19, 32, 00), market_change=0)
|
Arrow(2017, 12, 14, 19, 32, 00), market_change=0)
|
||||||
|
|
||||||
@ -521,14 +536,7 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
'roi': {}, 'stoploss': {'stoploss': None},
|
'roi': {}, 'stoploss': {'stoploss': None},
|
||||||
'trailing': {'trailing_stop': None}
|
'trailing': {'trailing_stop': None}
|
||||||
},
|
},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -577,14 +585,7 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
'sell': {'sell-mfi-value': None},
|
'sell': {'sell-mfi-value': None},
|
||||||
'roi': {}, 'stoploss': {'stoploss': None}
|
'roi': {}, 'stoploss': {'stoploss': None}
|
||||||
},
|
},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -622,14 +623,7 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
MagicMock(return_value=[{
|
MagicMock(return_value=[{
|
||||||
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
||||||
'params_details': {'roi': {}, 'stoploss': {'stoploss': None}},
|
'params_details': {'roi': {}, 'stoploss': {'stoploss': None}},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -669,14 +663,7 @@ def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> Non
|
|||||||
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
||||||
MagicMock(return_value=[{
|
MagicMock(return_value=[{
|
||||||
'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0},
|
'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -749,14 +736,7 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
||||||
MagicMock(return_value=[{
|
MagicMock(return_value=[{
|
||||||
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
@ -798,14 +778,7 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
|||||||
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
|
||||||
MagicMock(return_value=[{
|
MagicMock(return_value=[{
|
||||||
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
'loss': 1, 'results_explanation': 'foo result', 'params': {},
|
||||||
'results_metrics':
|
'results_metrics': generate_result_metrics(),
|
||||||
{
|
|
||||||
'trade_count': 1,
|
|
||||||
'avg_profit': 0.1,
|
|
||||||
'total_profit': 0.001,
|
|
||||||
'profit': 1.0,
|
|
||||||
'duration': 20.0
|
|
||||||
}
|
|
||||||
}])
|
}])
|
||||||
)
|
)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import datetime
|
|
||||||
import re
|
import re
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -49,7 +48,7 @@ def test_text_table_bt_results():
|
|||||||
' 0:20:00 | 2 0 1 66.7 |'
|
' 0:20:00 | 2 0 1 66.7 |'
|
||||||
)
|
)
|
||||||
|
|
||||||
pair_results = generate_pair_metrics(data={'ETH/BTC': {}}, stake_currency='BTC',
|
pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC',
|
||||||
starting_balance=4, results=results)
|
starting_balance=4, results=results)
|
||||||
assert text_table_bt_results(pair_results, stake_currency='BTC') == result_str
|
assert text_table_bt_results(pair_results, stake_currency='BTC') == result_str
|
||||||
|
|
||||||
@ -103,7 +102,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
|
|||||||
assert strat_stats['backtest_end'] == max_date.strftime(DATETIME_PRINT_FORMAT)
|
assert strat_stats['backtest_end'] == max_date.strftime(DATETIME_PRINT_FORMAT)
|
||||||
assert strat_stats['total_trades'] == len(results['DefStrat']['results'])
|
assert strat_stats['total_trades'] == len(results['DefStrat']['results'])
|
||||||
# Above sample had no loosing trade
|
# Above sample had no loosing trade
|
||||||
assert strat_stats['max_drawdown'] == 0.0
|
assert strat_stats['max_drawdown_account'] == 0.0
|
||||||
|
|
||||||
# Retry with losing trade
|
# Retry with losing trade
|
||||||
results = {'DefStrat': {
|
results = {'DefStrat': {
|
||||||
@ -143,7 +142,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
|
|||||||
assert 'strategy_comparison' in stats
|
assert 'strategy_comparison' in stats
|
||||||
strat_stats = stats['strategy']['DefStrat']
|
strat_stats = stats['strategy']['DefStrat']
|
||||||
|
|
||||||
assert strat_stats['max_drawdown'] == 0.013803
|
assert pytest.approx(strat_stats['max_drawdown_account']) == 1.399999e-08
|
||||||
assert strat_stats['drawdown_start'] == '2017-11-14 22:10:00'
|
assert strat_stats['drawdown_start'] == '2017-11-14 22:10:00'
|
||||||
assert strat_stats['drawdown_end'] == '2017-11-14 22:43:00'
|
assert strat_stats['drawdown_end'] == '2017-11-14 22:43:00'
|
||||||
assert strat_stats['drawdown_end_ts'] == 1510699380000
|
assert strat_stats['drawdown_end_ts'] == 1510699380000
|
||||||
@ -165,7 +164,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
|
|||||||
filename1 = Path(tmpdir / last_fn)
|
filename1 = Path(tmpdir / last_fn)
|
||||||
assert filename1.is_file()
|
assert filename1.is_file()
|
||||||
content = filename1.read_text()
|
content = filename1.read_text()
|
||||||
assert 'max_drawdown' in content
|
assert 'max_drawdown_account' in content
|
||||||
assert 'strategy' in content
|
assert 'strategy' in content
|
||||||
assert 'pairlist' in content
|
assert 'pairlist' in content
|
||||||
|
|
||||||
@ -208,7 +207,7 @@ def test_generate_pair_metrics():
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
pair_results = generate_pair_metrics(data={'ETH/BTC': {}}, stake_currency='BTC',
|
pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC',
|
||||||
starting_balance=2, results=results)
|
starting_balance=2, results=results)
|
||||||
assert isinstance(pair_results, list)
|
assert isinstance(pair_results, list)
|
||||||
assert len(pair_results) == 2
|
assert len(pair_results) == 2
|
||||||
@ -227,9 +226,9 @@ def test_generate_daily_stats(testdatadir):
|
|||||||
assert isinstance(res, dict)
|
assert isinstance(res, dict)
|
||||||
assert round(res['backtest_best_day'], 4) == 0.1796
|
assert round(res['backtest_best_day'], 4) == 0.1796
|
||||||
assert round(res['backtest_worst_day'], 4) == -0.1468
|
assert round(res['backtest_worst_day'], 4) == -0.1468
|
||||||
assert res['winning_days'] == 14
|
assert res['winning_days'] == 19
|
||||||
assert res['draw_days'] == 4
|
assert res['draw_days'] == 0
|
||||||
assert res['losing_days'] == 3
|
assert res['losing_days'] == 2
|
||||||
|
|
||||||
# Select empty dataframe!
|
# Select empty dataframe!
|
||||||
res = generate_daily_stats(bt_data.loc[bt_data['open_date'] == '2000-01-01', :])
|
res = generate_daily_stats(bt_data.loc[bt_data['open_date'] == '2000-01-01', :])
|
||||||
@ -324,51 +323,25 @@ def test_generate_sell_reason_stats():
|
|||||||
assert stop_result['profit_mean_pct'] == round(stop_result['profit_mean'] * 100, 2)
|
assert stop_result['profit_mean_pct'] == round(stop_result['profit_mean'] * 100, 2)
|
||||||
|
|
||||||
|
|
||||||
def test_text_table_strategy(default_conf):
|
def test_text_table_strategy(testdatadir):
|
||||||
default_conf['max_open_trades'] = 2
|
filename = testdatadir / "backtest-result_multistrat.json"
|
||||||
default_conf['dry_run_wallet'] = 3
|
bt_res_data = load_backtest_stats(filename)
|
||||||
results = {}
|
|
||||||
date = datetime.datetime(year=2020, month=1, day=1, hour=12, minute=30)
|
bt_res_data_comparison = bt_res_data.pop('strategy_comparison')
|
||||||
delta = datetime.timedelta(days=1)
|
|
||||||
results['TestStrategy1'] = {'results': pd.DataFrame(
|
|
||||||
{
|
|
||||||
'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'],
|
|
||||||
'close_date': [date, date + delta, date + delta * 2],
|
|
||||||
'profit_ratio': [0.1, 0.2, 0.3],
|
|
||||||
'profit_abs': [0.2, 0.4, 0.5],
|
|
||||||
'trade_duration': [10, 30, 10],
|
|
||||||
'wins': [2, 0, 0],
|
|
||||||
'draws': [0, 0, 0],
|
|
||||||
'losses': [0, 0, 1],
|
|
||||||
'sell_reason': [SellType.ROI, SellType.ROI, SellType.STOP_LOSS]
|
|
||||||
}
|
|
||||||
), 'config': default_conf}
|
|
||||||
results['TestStrategy2'] = {'results': pd.DataFrame(
|
|
||||||
{
|
|
||||||
'pair': ['LTC/BTC', 'LTC/BTC', 'LTC/BTC'],
|
|
||||||
'close_date': [date, date + delta, date + delta * 2],
|
|
||||||
'profit_ratio': [0.4, 0.2, 0.3],
|
|
||||||
'profit_abs': [0.4, 0.4, 0.5],
|
|
||||||
'trade_duration': [15, 30, 15],
|
|
||||||
'wins': [4, 1, 0],
|
|
||||||
'draws': [0, 0, 0],
|
|
||||||
'losses': [0, 0, 1],
|
|
||||||
'sell_reason': [SellType.ROI, SellType.ROI, SellType.STOP_LOSS]
|
|
||||||
}
|
|
||||||
), 'config': default_conf}
|
|
||||||
|
|
||||||
result_str = (
|
result_str = (
|
||||||
'| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC |'
|
'| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC |'
|
||||||
' Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n'
|
' Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n'
|
||||||
'|---------------+--------+----------------+----------------+------------------+'
|
'|----------------+--------+----------------+----------------+------------------+'
|
||||||
'----------------+----------------+-------------------------+-----------------------|\n'
|
'----------------+----------------+-------------------------+-----------------------|\n'
|
||||||
'| TestStrategy1 | 3 | 20.00 | 60.00 | 1.10000000 |'
|
'| StrategyTestV2 | 179 | 0.08 | 14.39 | 0.02608550 |'
|
||||||
' 36.67 | 0:17:00 | 3 0 0 100 | 0.00000000 BTC 0.00% |\n'
|
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n'
|
||||||
'| TestStrategy2 | 3 | 30.00 | 90.00 | 1.30000000 |'
|
'| TestStrategy | 179 | 0.08 | 14.39 | 0.02608550 |'
|
||||||
' 43.33 | 0:20:00 | 3 0 0 100 | 0.00000000 BTC 0.00% |'
|
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |'
|
||||||
)
|
)
|
||||||
|
|
||||||
strategy_results = generate_strategy_comparison(all_results=results)
|
strategy_results = generate_strategy_comparison(bt_stats=bt_res_data['strategy'])
|
||||||
|
assert strategy_results == bt_res_data_comparison
|
||||||
assert text_table_strategy(strategy_results, 'BTC') == result_str
|
assert text_table_strategy(strategy_results, 'BTC') == result_str
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
# pragma pylint: disable=missing-docstring,C0103,protected-access
|
# pragma pylint: disable=missing-docstring,C0103,protected-access
|
||||||
|
|
||||||
|
import logging
|
||||||
import time
|
import time
|
||||||
from unittest.mock import MagicMock, PropertyMock
|
from unittest.mock import MagicMock, PropertyMock
|
||||||
|
|
||||||
@ -14,7 +15,7 @@ from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
|||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
from freqtrade.resolvers import PairListResolver
|
from freqtrade.resolvers import PairListResolver
|
||||||
from tests.conftest import (create_mock_trades, get_patched_exchange, get_patched_freqtradebot,
|
from tests.conftest import (create_mock_trades, get_patched_exchange, get_patched_freqtradebot,
|
||||||
log_has, log_has_re)
|
log_has, log_has_re, num_log_has)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
@ -217,6 +218,34 @@ def test_invalid_blacklist(mocker, markets, static_pl_conf, caplog):
|
|||||||
log_has_re(r"Pair blacklist contains an invalid Wildcard.*", caplog)
|
log_has_re(r"Pair blacklist contains an invalid Wildcard.*", caplog)
|
||||||
|
|
||||||
|
|
||||||
|
def test_remove_logs_for_pairs_already_in_blacklist(mocker, markets, static_pl_conf, caplog):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
freqtrade = get_patched_freqtradebot(mocker, static_pl_conf)
|
||||||
|
mocker.patch.multiple(
|
||||||
|
'freqtrade.exchange.Exchange',
|
||||||
|
exchange_has=MagicMock(return_value=True),
|
||||||
|
markets=PropertyMock(return_value=markets),
|
||||||
|
)
|
||||||
|
freqtrade.pairlists.refresh_pairlist()
|
||||||
|
whitelist = ['ETH/BTC', 'TKN/BTC']
|
||||||
|
caplog.clear()
|
||||||
|
caplog.set_level(logging.INFO)
|
||||||
|
|
||||||
|
# Ensure all except those in whitelist are removed.
|
||||||
|
assert set(whitelist) == set(freqtrade.pairlists.whitelist)
|
||||||
|
assert static_pl_conf['exchange']['pair_blacklist'] == freqtrade.pairlists.blacklist
|
||||||
|
# Ensure that log message wasn't generated.
|
||||||
|
assert not log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...', caplog)
|
||||||
|
|
||||||
|
for _ in range(3):
|
||||||
|
new_whitelist = freqtrade.pairlists.verify_blacklist(
|
||||||
|
whitelist + ['BLK/BTC'], logger.warning)
|
||||||
|
# Ensure that the pair is removed from the white list, and properly logged.
|
||||||
|
assert set(whitelist) == set(new_whitelist)
|
||||||
|
assert num_log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...',
|
||||||
|
caplog) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_conf):
|
def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_conf):
|
||||||
|
|
||||||
mocker.patch.multiple(
|
mocker.patch.multiple(
|
||||||
@ -536,36 +565,41 @@ def test_VolumePairList_whitelist_gen(mocker, whitelist_conf, shitcoinmarkets, t
|
|||||||
assert log_has_re(r'^Removed .* from whitelist, because volatility.*$', caplog)
|
assert log_has_re(r'^Removed .* from whitelist, because volatility.*$', caplog)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("pairlists,base_currency,volumefilter_result", [
|
@pytest.mark.parametrize("pairlists,base_currency,exchange,volumefilter_result", [
|
||||||
# default refresh of 1800 to small for daily candle lookback
|
# default refresh of 1800 to small for daily candle lookback
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_days": 1}],
|
"lookback_days": 1}],
|
||||||
"BTC", "default_refresh_too_short"), # OperationalException expected
|
"BTC", "binance", "default_refresh_too_short"), # OperationalException expected
|
||||||
# ambigous configuration with lookback days and period
|
# ambigous configuration with lookback days and period
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_days": 1, "lookback_period": 1}],
|
"lookback_days": 1, "lookback_period": 1}],
|
||||||
"BTC", "lookback_days_and_period"), # OperationalException expected
|
"BTC", "binance", "lookback_days_and_period"), # OperationalException expected
|
||||||
# negative lookback period
|
# negative lookback period
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_timeframe": "1d", "lookback_period": -1}],
|
"lookback_timeframe": "1d", "lookback_period": -1}],
|
||||||
"BTC", "lookback_period_negative"), # OperationalException expected
|
"BTC", "binance", "lookback_period_negative"), # OperationalException expected
|
||||||
# lookback range exceedes exchange limit
|
# lookback range exceedes exchange limit
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_timeframe": "1m", "lookback_period": 2000, "refresh_period": 3600}],
|
"lookback_timeframe": "1m", "lookback_period": 2000, "refresh_period": 3600}],
|
||||||
"BTC", 'lookback_exceeds_exchange_request_size'), # OperationalException expected
|
"BTC", "binance", "lookback_exceeds_exchange_request_size"), # OperationalException expected
|
||||||
# expecing pairs as given
|
# expecing pairs as given
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}],
|
"lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}],
|
||||||
"BTC", ['HOT/BTC', 'LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC']),
|
"BTC", "binance", ['LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC', 'HOT/BTC']),
|
||||||
# expecting pairs from default tickers, because 1h candles are not available
|
# expecting pairs from default tickers, because 1h candles are not available
|
||||||
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
"lookback_timeframe": "1h", "lookback_period": 2, "refresh_period": 3600}],
|
"lookback_timeframe": "1h", "lookback_period": 2, "refresh_period": 3600}],
|
||||||
"BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'HOT/BTC', 'FUEL/BTC']),
|
"BTC", "binance", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'HOT/BTC', 'FUEL/BTC']),
|
||||||
|
# ftx data is already in Quote currency, therefore won't require conversion
|
||||||
|
([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume",
|
||||||
|
"lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}],
|
||||||
|
"BTC", "ftx", ['HOT/BTC', 'LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC']),
|
||||||
])
|
])
|
||||||
def test_VolumePairList_range(mocker, whitelist_conf, shitcoinmarkets, tickers, ohlcv_history,
|
def test_VolumePairList_range(mocker, whitelist_conf, shitcoinmarkets, tickers, ohlcv_history,
|
||||||
pairlists, base_currency, volumefilter_result, caplog) -> None:
|
pairlists, base_currency, exchange, volumefilter_result) -> None:
|
||||||
whitelist_conf['pairlists'] = pairlists
|
whitelist_conf['pairlists'] = pairlists
|
||||||
whitelist_conf['stake_currency'] = base_currency
|
whitelist_conf['stake_currency'] = base_currency
|
||||||
|
whitelist_conf['exchange']['name'] = exchange
|
||||||
|
|
||||||
ohlcv_history_high_vola = ohlcv_history.copy()
|
ohlcv_history_high_vola = ohlcv_history.copy()
|
||||||
ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, 'close'] = 0.00090
|
ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, 'close'] = 0.00090
|
||||||
@ -574,9 +608,14 @@ def test_VolumePairList_range(mocker, whitelist_conf, shitcoinmarkets, tickers,
|
|||||||
ohlcv_history_medium_volume = ohlcv_history.copy()
|
ohlcv_history_medium_volume = ohlcv_history.copy()
|
||||||
ohlcv_history_medium_volume.loc[ohlcv_history_medium_volume.index == 2, 'volume'] = 5
|
ohlcv_history_medium_volume.loc[ohlcv_history_medium_volume.index == 2, 'volume'] = 5
|
||||||
|
|
||||||
# create candles for high volume with all candles high volume
|
# create candles for high volume with all candles high volume, but very low price.
|
||||||
ohlcv_history_high_volume = ohlcv_history.copy()
|
ohlcv_history_high_volume = ohlcv_history.copy()
|
||||||
ohlcv_history_high_volume.loc[:, 'volume'] = 10
|
ohlcv_history_high_volume.loc[:, 'volume'] = 10
|
||||||
|
ohlcv_history_high_volume.loc[:, 'low'] = ohlcv_history_high_volume.loc[:, 'low'] * 0.01
|
||||||
|
ohlcv_history_high_volume.loc[:, 'high'] = ohlcv_history_high_volume.loc[:, 'high'] * 0.01
|
||||||
|
ohlcv_history_high_volume.loc[:, 'close'] = ohlcv_history_high_volume.loc[:, 'close'] * 0.01
|
||||||
|
|
||||||
|
mocker.patch('freqtrade.exchange.ftx.Ftx.market_is_tradable', return_value=True)
|
||||||
|
|
||||||
ohlcv_data = {
|
ohlcv_data = {
|
||||||
('ETH/BTC', '1d'): ohlcv_history,
|
('ETH/BTC', '1d'): ohlcv_history,
|
||||||
|
@ -424,7 +424,7 @@ def test_rpc_trade_statistics(default_conf, ticker, ticker_sell_up, fee,
|
|||||||
assert stats['trade_count'] == 2
|
assert stats['trade_count'] == 2
|
||||||
assert stats['first_trade_date'] == 'just now'
|
assert stats['first_trade_date'] == 'just now'
|
||||||
assert stats['latest_trade_date'] == 'just now'
|
assert stats['latest_trade_date'] == 'just now'
|
||||||
assert stats['avg_duration'] in ('0:00:00', '0:00:01')
|
assert stats['avg_duration'] in ('0:00:00', '0:00:01', '0:00:02')
|
||||||
assert stats['best_pair'] == 'ETH/BTC'
|
assert stats['best_pair'] == 'ETH/BTC'
|
||||||
assert prec_satoshi(stats['best_rate'], 6.2)
|
assert prec_satoshi(stats['best_rate'], 6.2)
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ def test_rpc_trade_statistics(default_conf, ticker, ticker_sell_up, fee,
|
|||||||
assert stats['trade_count'] == 2
|
assert stats['trade_count'] == 2
|
||||||
assert stats['first_trade_date'] == 'just now'
|
assert stats['first_trade_date'] == 'just now'
|
||||||
assert stats['latest_trade_date'] == 'just now'
|
assert stats['latest_trade_date'] == 'just now'
|
||||||
assert stats['avg_duration'] in ('0:00:00', '0:00:01')
|
assert stats['avg_duration'] in ('0:00:00', '0:00:01', '0:00:02')
|
||||||
assert stats['best_pair'] == 'ETH/BTC'
|
assert stats['best_pair'] == 'ETH/BTC'
|
||||||
assert prec_satoshi(stats['best_rate'], 6.2)
|
assert prec_satoshi(stats['best_rate'], 6.2)
|
||||||
assert isnan(stats['profit_all_coin'])
|
assert isnan(stats['profit_all_coin'])
|
||||||
|
@ -584,7 +584,7 @@ def test_monthly_handle(default_conf, update, ticker, limit_buy_order, fee,
|
|||||||
assert 'Monthly Profit over the last 2 months</b>:' in msg_mock.call_args_list[0][0][0]
|
assert 'Monthly Profit over the last 2 months</b>:' in msg_mock.call_args_list[0][0][0]
|
||||||
assert 'Month ' in msg_mock.call_args_list[0][0][0]
|
assert 'Month ' in msg_mock.call_args_list[0][0][0]
|
||||||
today = datetime.utcnow().date()
|
today = datetime.utcnow().date()
|
||||||
current_month = f"{today.year}-{today.month} "
|
current_month = f"{today.year}-{today.month:02} "
|
||||||
assert current_month in msg_mock.call_args_list[0][0][0]
|
assert current_month in msg_mock.call_args_list[0][0][0]
|
||||||
assert str(' 0.00006217 BTC') in msg_mock.call_args_list[0][0][0]
|
assert str(' 0.00006217 BTC') in msg_mock.call_args_list[0][0][0]
|
||||||
assert str(' 0.933 USD') in msg_mock.call_args_list[0][0][0]
|
assert str(' 0.933 USD') in msg_mock.call_args_list[0][0][0]
|
||||||
|
@ -1904,7 +1904,7 @@ def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_o
|
|||||||
# we might just want to check if we are in a sell condition without
|
# we might just want to check if we are in a sell condition without
|
||||||
# executing
|
# executing
|
||||||
# if ROI is reached we must sell
|
# if ROI is reached we must sell
|
||||||
patch_get_signal(freqtrade, value=(False, True, None, None))
|
patch_get_signal(freqtrade, value=(False, False, None, None))
|
||||||
assert freqtrade.handle_trade(trade)
|
assert freqtrade.handle_trade(trade)
|
||||||
assert log_has("ETH/USDT - Required profit reached. sell_type=SellType.ROI",
|
assert log_has("ETH/USDT - Required profit reached. sell_type=SellType.ROI",
|
||||||
caplog)
|
caplog)
|
||||||
@ -3241,7 +3241,7 @@ def test_ignore_roi_if_buy_signal(default_conf_usdt, limit_buy_order_usdt,
|
|||||||
assert freqtrade.handle_trade(trade) is False
|
assert freqtrade.handle_trade(trade) is False
|
||||||
|
|
||||||
# Test if buy-signal is absent (should sell due to roi = true)
|
# Test if buy-signal is absent (should sell due to roi = true)
|
||||||
patch_get_signal(freqtrade, value=(False, True, None, None))
|
patch_get_signal(freqtrade, value=(False, False, None, None))
|
||||||
assert freqtrade.handle_trade(trade) is True
|
assert freqtrade.handle_trade(trade) is True
|
||||||
assert trade.sell_reason == SellType.ROI.value
|
assert trade.sell_reason == SellType.ROI.value
|
||||||
|
|
||||||
@ -3427,11 +3427,11 @@ def test_disable_ignore_roi_if_buy_signal(default_conf_usdt, limit_buy_order_usd
|
|||||||
trade = Trade.query.first()
|
trade = Trade.query.first()
|
||||||
trade.update(limit_buy_order_usdt)
|
trade.update(limit_buy_order_usdt)
|
||||||
# Sell due to min_roi_reached
|
# Sell due to min_roi_reached
|
||||||
patch_get_signal(freqtrade, value=(True, True, None, None))
|
patch_get_signal(freqtrade, value=(True, False, None, None))
|
||||||
assert freqtrade.handle_trade(trade) is True
|
assert freqtrade.handle_trade(trade) is True
|
||||||
|
|
||||||
# Test if buy-signal is absent
|
# Test if buy-signal is absent
|
||||||
patch_get_signal(freqtrade, value=(False, True, None, None))
|
patch_get_signal(freqtrade, value=(False, False, None, None))
|
||||||
assert freqtrade.handle_trade(trade) is True
|
assert freqtrade.handle_trade(trade) is True
|
||||||
assert trade.sell_reason == SellType.ROI.value
|
assert trade.sell_reason == SellType.ROI.value
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ def test_init_plotscript(default_conf, mocker, testdatadir):
|
|||||||
default_conf['trade_source'] = "file"
|
default_conf['trade_source'] = "file"
|
||||||
default_conf['timeframe'] = "5m"
|
default_conf['timeframe'] = "5m"
|
||||||
default_conf["datadir"] = testdatadir
|
default_conf["datadir"] = testdatadir
|
||||||
default_conf['exportfilename'] = testdatadir / "backtest-result_test.json"
|
default_conf['exportfilename'] = testdatadir / "backtest-result_new.json"
|
||||||
supported_markets = ["TRX/BTC", "ADA/BTC"]
|
supported_markets = ["TRX/BTC", "ADA/BTC"]
|
||||||
ret = init_plotscript(default_conf, supported_markets)
|
ret = init_plotscript(default_conf, supported_markets)
|
||||||
assert "ohlcv" in ret
|
assert "ohlcv" in ret
|
||||||
@ -157,7 +157,7 @@ def test_plot_trades(testdatadir, caplog):
|
|||||||
assert fig == fig1
|
assert fig == fig1
|
||||||
assert log_has("No trades found.", caplog)
|
assert log_has("No trades found.", caplog)
|
||||||
pair = "ADA/BTC"
|
pair = "ADA/BTC"
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
trades = load_backtest_data(filename)
|
trades = load_backtest_data(filename)
|
||||||
trades = trades.loc[trades['pair'] == pair]
|
trades = trades.loc[trades['pair'] == pair]
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ def test_generate_plot_file(mocker, caplog):
|
|||||||
|
|
||||||
|
|
||||||
def test_add_profit(testdatadir):
|
def test_add_profit(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
bt_data = load_backtest_data(filename)
|
bt_data = load_backtest_data(filename)
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180112")
|
timerange = TimeRange.parse_timerange("20180110-20180112")
|
||||||
|
|
||||||
@ -314,7 +314,7 @@ def test_add_profit(testdatadir):
|
|||||||
|
|
||||||
|
|
||||||
def test_generate_profit_graph(testdatadir):
|
def test_generate_profit_graph(testdatadir):
|
||||||
filename = testdatadir / "backtest-result_test.json"
|
filename = testdatadir / "backtest-result_new.json"
|
||||||
trades = load_backtest_data(filename)
|
trades = load_backtest_data(filename)
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180112")
|
timerange = TimeRange.parse_timerange("20180110-20180112")
|
||||||
pairs = ["TRX/BTC", "XLM/BTC"]
|
pairs = ["TRX/BTC", "XLM/BTC"]
|
||||||
@ -336,15 +336,20 @@ def test_generate_profit_graph(testdatadir):
|
|||||||
assert fig.layout.yaxis3.title.text == "Profit BTC"
|
assert fig.layout.yaxis3.title.text == "Profit BTC"
|
||||||
|
|
||||||
figure = fig.layout.figure
|
figure = fig.layout.figure
|
||||||
assert len(figure.data) == 5
|
assert len(figure.data) == 7
|
||||||
|
|
||||||
avgclose = find_trace_in_fig_data(figure.data, "Avg close price")
|
avgclose = find_trace_in_fig_data(figure.data, "Avg close price")
|
||||||
assert isinstance(avgclose, go.Scatter)
|
assert isinstance(avgclose, go.Scatter)
|
||||||
|
|
||||||
profit = find_trace_in_fig_data(figure.data, "Profit")
|
profit = find_trace_in_fig_data(figure.data, "Profit")
|
||||||
assert isinstance(profit, go.Scatter)
|
assert isinstance(profit, go.Scatter)
|
||||||
profit = find_trace_in_fig_data(figure.data, "Max drawdown 10.45%")
|
drawdown = find_trace_in_fig_data(figure.data, "Max drawdown 35.69%")
|
||||||
assert isinstance(profit, go.Scatter)
|
assert isinstance(drawdown, go.Scatter)
|
||||||
|
parallel = find_trace_in_fig_data(figure.data, "Parallel trades")
|
||||||
|
assert isinstance(parallel, go.Scatter)
|
||||||
|
|
||||||
|
underwater = find_trace_in_fig_data(figure.data, "Underwater Plot")
|
||||||
|
assert isinstance(underwater, go.Scatter)
|
||||||
|
|
||||||
for pair in pairs:
|
for pair in pairs:
|
||||||
profit_pair = find_trace_in_fig_data(figure.data, f"Profit {pair}")
|
profit_pair = find_trace_in_fig_data(figure.data, f"Profit {pair}")
|
||||||
@ -376,7 +381,7 @@ def test_load_and_plot_trades(default_conf, mocker, caplog, testdatadir):
|
|||||||
|
|
||||||
default_conf['trade_source'] = 'file'
|
default_conf['trade_source'] = 'file'
|
||||||
default_conf["datadir"] = testdatadir
|
default_conf["datadir"] = testdatadir
|
||||||
default_conf['exportfilename'] = testdatadir / "backtest-result_test.json"
|
default_conf['exportfilename'] = testdatadir / "backtest-result_new.json"
|
||||||
default_conf['indicators1'] = ["sma5", "ema10"]
|
default_conf['indicators1'] = ["sma5", "ema10"]
|
||||||
default_conf['indicators2'] = ["macd"]
|
default_conf['indicators2'] = ["macd"]
|
||||||
default_conf['pairs'] = ["ETH/BTC", "LTC/BTC"]
|
default_conf['pairs'] = ["ETH/BTC", "LTC/BTC"]
|
||||||
@ -447,7 +452,7 @@ def test_plot_profit(default_conf, mocker, testdatadir):
|
|||||||
match=r"No trades found, cannot generate Profit-plot.*"):
|
match=r"No trades found, cannot generate Profit-plot.*"):
|
||||||
plot_profit(default_conf)
|
plot_profit(default_conf)
|
||||||
|
|
||||||
default_conf['exportfilename'] = testdatadir / "backtest-result_test.json"
|
default_conf['exportfilename'] = testdatadir / "backtest-result_new.json"
|
||||||
|
|
||||||
plot_profit(default_conf)
|
plot_profit(default_conf)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ def test_worker_stopped(mocker, default_conf, caplog) -> None:
|
|||||||
worker.freqtrade.state = State.STOPPED
|
worker.freqtrade.state = State.STOPPED
|
||||||
state = worker._worker(old_state=State.RUNNING)
|
state = worker._worker(old_state=State.RUNNING)
|
||||||
assert state is State.STOPPED
|
assert state is State.STOPPED
|
||||||
assert log_has('Changing state to: STOPPED', caplog)
|
assert log_has('Changing state from RUNNING to: STOPPED', caplog)
|
||||||
assert mock_throttle.call_count == 1
|
assert mock_throttle.call_count == 1
|
||||||
|
|
||||||
|
|
||||||
|
File diff suppressed because one or more lines are too long
2
tests/testdata/backtest-result_new.json
vendored
2
tests/testdata/backtest-result_new.json
vendored
File diff suppressed because one or more lines are too long
1
tests/testdata/backtest-result_test.json
vendored
1
tests/testdata/backtest-result_test.json
vendored
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user