Merge pull request #1 from freqtrade/develop

Up2Date
This commit is contained in:
baties 2020-05-18 20:03:57 +04:30 committed by GitHub
commit 6e5b3cd695
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
199 changed files with 13604 additions and 6104 deletions

View File

@ -7,6 +7,8 @@ on:
- develop
- github_actions_tests
tags:
release:
types: [published]
pull_request:
schedule:
- cron: '0 5 * * 4'
@ -18,10 +20,10 @@ jobs:
strategy:
matrix:
os: [ ubuntu-18.04, macos-latest ]
python-version: [3.7]
python-version: [3.7, 3.8]
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
@ -64,19 +66,17 @@ jobs:
pip install -e .
- name: Tests
env:
COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }}
COVERALLS_SERVICE_NAME: travis-ci
TRAVIS: "true"
run: |
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
- name: Coveralls
if: (startsWith(matrix.os, 'ubuntu') && matrix.python-version == '3.8')
env:
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
run: |
# Allow failure for coveralls
# Fake travis environment to get coveralls working correctly
export TRAVIS_PULL_REQUEST="https://github.com/${GITHUB_REPOSITORY}/pull/$(cat $GITHUB_EVENT_PATH | jq -r .number)"
export TRAVIS_BRANCH=${GITHUB_REF#"ref/heads"}
export CI_BRANCH=${GITHUB_REF#"ref/heads"}
echo "${TRAVIS_BRANCH}"
coveralls || true
coveralls -v || true
- name: Backtesting
run: |
@ -115,10 +115,10 @@ jobs:
strategy:
matrix:
os: [ windows-latest ]
python-version: [3.7]
python-version: [3.7, 3.8]
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
@ -130,8 +130,7 @@ jobs:
if: startsWith(runner.os, 'Windows')
with:
path: ~\AppData\Local\pip\Cache
key: ${{ runner.os }}-pip
restore-keys: ${{ runner.os }}-pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
- name: Installation
run: |
@ -175,7 +174,7 @@ jobs:
docs_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v2
- name: Documentation syntax
run: |
@ -193,15 +192,40 @@ jobs:
deploy:
needs: [ build, build_windows, docs_check ]
runs-on: ubuntu-18.04
if: (github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'freqtrade/freqtrade'
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
- name: Build distribution
run: |
pip install -U setuptools wheel
python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@master
if: (steps.extract_branch.outputs.branch == 'master' || github.event_name == 'release')
with:
user: __token__
password: ${{ secrets.pypi_test_password }}
repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@master
if: (steps.extract_branch.outputs.branch == 'master' || github.event_name == 'release')
with:
user: __token__
password: ${{ secrets.pypi_password }}
- name: Build and test and push docker image
env:
IMAGE_NAME: freqtradeorg/freqtrade

1
.gitignore vendored
View File

@ -6,7 +6,6 @@ user_data/*
!user_data/strategy/sample_strategy.py
!user_data/notebooks
user_data/notebooks/*
!user_data/notebooks/*example.ipynb
freqtrade-plot.html
freqtrade-profit-plot.html

View File

@ -1,4 +1,3 @@
sudo: true
os:
- linux
dist: xenial
@ -11,10 +10,10 @@ env:
global:
- IMAGE_NAME=freqtradeorg/freqtrade
install:
- cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
- cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies; cd ..
- export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
- export TA_LIBRARY_PATH=${HOME}/dependencies/lib
- export TA_INCLUDE_PATH=${HOME}/dependencies/lib/include
- export TA_INCLUDE_PATH=${HOME}/dependencies/include
- pip install -r requirements-dev.txt
- pip install -e .
jobs:

View File

@ -48,7 +48,7 @@ pytest tests/test_<file_name>.py::test_<method_name>
#### Run Flake8
```bash
flake8 freqtrade
flake8 freqtrade tests scripts
```
We receive a lot of code that fails the `flake8` checks.
@ -109,11 +109,11 @@ Exceptions:
Contributors may be given commit privileges. Preference will be given to those with:
1. Past contributions to FreqTrade and other related open-source projects. Contributions to FreqTrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Quantity and quality are considered.
1. Past contributions to Freqtrade and other related open-source projects. Contributions to Freqtrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Quantity and quality are considered.
1. A coding style that the other core committers find simple, minimal, and clean.
1. Access to resources for cross-platform development and testing.
1. Time to devote to the project regularly.
Being a Committer does not grant write permission on `develop` or `master` for security reasons (Users trust FreqTrade with their Exchange API keys).
Being a Committer does not grant write permission on `develop` or `master` for security reasons (Users trust Freqtrade with their Exchange API keys).
After being Committer for some time, a Committer may be named Core Committer and given full repository access.

View File

@ -1,4 +1,4 @@
FROM python:3.7.5-slim-stretch
FROM python:3.8.3-slim-buster
RUN apt-get update \
&& apt-get -y install curl build-essential libssl-dev \

View File

@ -2,3 +2,4 @@ include LICENSE
include README.md
include config.json.example
recursive-include freqtrade *.py
recursive-include freqtrade/templates/ *.j2 *.ipynb

View File

@ -1,6 +1,6 @@
# Freqtrade
[![Build Status](https://travis-ci.org/freqtrade/freqtrade.svg?branch=develop)](https://travis-ci.org/freqtrade/freqtrade)
[![Freqtrade CI](https://github.com/freqtrade/freqtrade/workflows/Freqtrade%20CI/badge.svg)](https://github.com/freqtrade/freqtrade/actions/)
[![Coverage Status](https://coveralls.io/repos/github/freqtrade/freqtrade/badge.svg?branch=develop&service=github)](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
[![Documentation](https://readthedocs.org/projects/freqtrade/badge/)](https://www.freqtrade.io)
[![Maintainability](https://api.codeclimate.com/v1/badges/5737e6d668200b7518ff/maintainability)](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
@ -25,7 +25,8 @@ hesitate to read the source code and understand the mechanism of this bot.
## Exchange marketplaces supported
- [X] [Bittrex](https://bittrex.com/)
- [X] [Binance](https://www.binance.com/) ([*Note for binance users](#a-note-on-binance))
- [X] [Binance](https://www.binance.com/) ([*Note for binance users](docs/exchanges.md#blacklists))
- [X] [Kraken](https://kraken.com/)
- [ ] [113 others to tests](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
## Documentation

View File

@ -1,11 +0,0 @@
#!/usr/bin/env python3
import sys
import warnings
from freqtrade.main import main
warnings.warn(
"Deprecated - To continue to run the bot like this, please run `pip install -e .` again.",
DeprecationWarning)
main(sys.argv[1:])

Binary file not shown.

Binary file not shown.

View File

@ -2,7 +2,16 @@
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
# Invoke-WebRequest -Uri "https://download.lfd.uci.edu/pythonlibs/xxxxxxx/TA_Lib-0.4.17-cp37-cp37m-win_amd64.whl" -OutFile "TA_Lib-0.4.17-cp37-cp37m-win_amd64.whl"
pip install build_helpers\TA_Lib-0.4.17-cp37-cp37m-win_amd64.whl
python -m pip install --upgrade pip
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
if ($pyv -eq '3.7') {
pip install build_helpers\TA_Lib-0.4.18-cp37-cp37m-win_amd64.whl
}
if ($pyv -eq '3.8') {
pip install build_helpers\TA_Lib-0.4.18-cp38-cp38-win_amd64.whl
}
pip install -r requirements-dev.txt
pip install -e .

View File

@ -23,7 +23,7 @@ if [ $? -ne 0 ]; then
fi
# Run backtest
docker run --rm -v $(pwd)/config.json.example:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy DefaultStrategy
docker run --rm -v $(pwd)/config.json.example:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy DefaultStrategy
if [ $? -ne 0 ]; then
echo "failed running backtest"

View File

@ -2,9 +2,11 @@
"max_open_trades": 3,
"stake_currency": "BTC",
"stake_amount": 0.05,
"tradable_balance_ratio": 0.99,
"fiat_display_currency": "USD",
"ticker_interval" : "5m",
"ticker_interval": "5m",
"dry_run": false,
"cancel_open_orders_on_exit": false,
"trailing_stop": false,
"unfilledtimeout": {
"buy": 10,
@ -22,7 +24,7 @@
"ask_strategy":{
"use_order_book": false,
"order_book_min": 1,
"order_book_max": 9,
"order_book_max": 1,
"use_sell_signal": true,
"sell_profit_only": false,
"ignore_roi_if_buy_signal": false
@ -43,7 +45,7 @@
"DASH/BTC",
"ZEC/BTC",
"XLM/BTC",
"NXT/BTC",
"XRP/BTC",
"TRX/BTC",
"ADA/BTC",
"XMR/BTC"
@ -59,7 +61,6 @@
"enabled": false,
"process_throttle_secs": 3600,
"calculate_since_number_of_days": 7,
"capital_available_percentage": 0.5,
"allowed_risk": 0.01,
"stoploss_range_min": -0.01,
"stoploss_range_max": -0.1,

View File

@ -2,9 +2,11 @@
"max_open_trades": 3,
"stake_currency": "BTC",
"stake_amount": 0.05,
"tradable_balance_ratio": 0.99,
"fiat_display_currency": "USD",
"ticker_interval" : "5m",
"ticker_interval": "5m",
"dry_run": true,
"cancel_open_orders_on_exit": false,
"trailing_stop": false,
"unfilledtimeout": {
"buy": 10,
@ -22,7 +24,7 @@
"ask_strategy":{
"use_order_book": false,
"order_book_min": 1,
"order_book_max": 9,
"order_book_max": 1,
"use_sell_signal": true,
"sell_profit_only": false,
"ignore_roi_if_buy_signal": false
@ -64,7 +66,6 @@
"enabled": false,
"process_throttle_secs": 3600,
"calculate_since_number_of_days": 7,
"capital_available_percentage": 0.5,
"allowed_risk": 0.01,
"stoploss_range_min": -0.01,
"stoploss_range_max": -0.1,

View File

@ -2,9 +2,13 @@
"max_open_trades": 3,
"stake_currency": "BTC",
"stake_amount": 0.05,
"tradable_balance_ratio": 0.99,
"fiat_display_currency": "USD",
"amount_reserve_percent" : 0.05,
"amount_reserve_percent": 0.05,
"amend_last_stake_amount": false,
"last_stake_amount_min_ratio": 0.5,
"dry_run": false,
"cancel_open_orders_on_exit": false,
"ticker_interval": "5m",
"trailing_stop": false,
"trailing_stop_positive": 0.005,
@ -22,6 +26,7 @@
"sell": 30
},
"bid_strategy": {
"price_side": "bid",
"use_order_book": false,
"ask_last_balance": 0.0,
"order_book_top": 1,
@ -31,9 +36,10 @@
}
},
"ask_strategy":{
"price_side": "ask",
"use_order_book": false,
"order_book_min": 1,
"order_book_max": 9,
"order_book_max": 1,
"use_sell_signal": true,
"sell_profit_only": false,
"ignore_roi_if_buy_signal": false
@ -59,8 +65,8 @@
"refresh_period": 1800
},
{"method": "PrecisionFilter"},
{"method": "PriceFilter", "low_price_ratio": 0.01
}
{"method": "PriceFilter", "low_price_ratio": 0.01},
{"method": "SpreadFilter", "max_spread_ratio": 0.005}
],
"exchange": {
"name": "bittrex",
@ -96,7 +102,6 @@
"enabled": false,
"process_throttle_secs": 3600,
"calculate_since_number_of_days": 7,
"capital_available_percentage": 0.5,
"allowed_risk": 0.01,
"stoploss_range_min": -0.01,
"stoploss_range_max": -0.1,
@ -116,6 +121,7 @@
"enabled": false,
"listen_ip_address": "127.0.0.1",
"listen_port": 8080,
"jwt_secret_key": "somethingrandom",
"username": "freqtrader",
"password": "SuperSecurePassword"
},
@ -127,5 +133,7 @@
"heartbeat_interval": 60
},
"strategy": "DefaultStrategy",
"strategy_path": "user_data/strategies/"
"strategy_path": "user_data/strategies/",
"dataformat_ohlcv": "json",
"dataformat_trades": "jsongz"
}

View File

@ -2,9 +2,11 @@
"max_open_trades": 5,
"stake_currency": "EUR",
"stake_amount": 10,
"tradable_balance_ratio": 0.99,
"fiat_display_currency": "EUR",
"ticker_interval" : "5m",
"ticker_interval": "5m",
"dry_run": true,
"cancel_open_orders_on_exit": false,
"trailing_stop": false,
"unfilledtimeout": {
"buy": 10,
@ -22,7 +24,7 @@
"ask_strategy":{
"use_order_book": false,
"order_book_min": 1,
"order_book_max": 9,
"order_book_max": 1,
"use_sell_signal": true,
"sell_profit_only": false,
"ignore_roi_if_buy_signal": false
@ -70,7 +72,6 @@
"enabled": false,
"process_throttle_secs": 3600,
"calculate_since_number_of_days": 7,
"capital_available_percentage": 0.5,
"allowed_risk": 0.01,
"stoploss_range_min": -0.01,
"stoploss_range_max": -0.1,

View File

@ -3,6 +3,19 @@ version: '3'
services:
freqtrade:
image: freqtradeorg/freqtrade:master
# image: freqtradeorg/freqtrade:develop
# Build step - only needed when additional dependencies are needed
# build:
# context: .
# dockerfile: "./Dockerfile.technical"
restart: unless-stopped
container_name: freqtrade
volumes:
- "./user_data:/freqtrade/user_data"
- "./config.json:/freqtrade/config.json"
# Default command used when running `docker compose up`
command: >
trade
--logfile /freqtrade/user_data/logs/freqtrade.log
--db-url sqlite:////freqtrade/user_data/tradesv3.sqlite
--config /freqtrade/user_data/config.json
--strategy SampleStrategy

View File

@ -4,6 +4,34 @@ This page explains some advanced Hyperopt topics that may require higher
coding skills and Python knowledge than creation of an ordinal hyperoptimization
class.
## Derived hyperopt classes
Custom hyperop classes can be derived in the same way [it can be done for strategies](strategy-customization.md#derived-strategies).
Applying to hyperoptimization, as an example, you may override how dimensions are defined in your optimization hyperspace:
```python
class MyAwesomeHyperOpt(IHyperOpt):
...
# Uses default stoploss dimension
class MyAwesomeHyperOpt2(MyAwesomeHyperOpt):
@staticmethod
def stoploss_space() -> List[Dimension]:
# Override boundaries for stoploss
return [
Real(-0.33, -0.01, name='stoploss'),
]
```
and then quickly switch between hyperopt classes, running optimization process with hyperopt class you need in each particular case:
```
$ freqtrade hyperopt --hyperopt MyAwesomeHyperOpt ...
or
$ freqtrade hyperopt --hyperopt MyAwesomeHyperOpt2 ...
```
## Creating and using a custom loss function
To use a custom loss function class, make sure that the function `hyperopt_loss_function` is defined in your custom hyperopt loss class.

View File

@ -37,30 +37,30 @@ as the watchdog.
## Advanced Logging
On many Linux systems the bot can be configured to send its log messages to `syslog` or `journald` system services. Logging to a remote `syslog` server is also available on Windows. The special values for the `--logfilename` command line option can be used for this.
On many Linux systems the bot can be configured to send its log messages to `syslog` or `journald` system services. Logging to a remote `syslog` server is also available on Windows. The special values for the `--logfile` command line option can be used for this.
### Logging to syslog
To send Freqtrade log messages to a local or remote `syslog` service use the `--logfilename` command line option with the value in the following format:
To send Freqtrade log messages to a local or remote `syslog` service use the `--logfile` command line option with the value in the following format:
* `--logfilename syslog:<syslog_address>` -- send log messages to `syslog` service using the `<syslog_address>` as the syslog address.
* `--logfile syslog:<syslog_address>` -- send log messages to `syslog` service using the `<syslog_address>` as the syslog address.
The syslog address can be either a Unix domain socket (socket filename) or a UDP socket specification, consisting of IP address and UDP port, separated by the `:` character.
So, the following are the examples of possible usages:
* `--logfilename syslog:/dev/log` -- log to syslog (rsyslog) using the `/dev/log` socket, suitable for most systems.
* `--logfilename syslog` -- same as above, the shortcut for `/dev/log`.
* `--logfilename syslog:/var/run/syslog` -- log to syslog (rsyslog) using the `/var/run/syslog` socket. Use this on MacOS.
* `--logfilename syslog:localhost:514` -- log to local syslog using UDP socket, if it listens on port 514.
* `--logfilename syslog:<ip>:514` -- log to remote syslog at IP address and port 514. This may be used on Windows for remote logging to an external syslog server.
* `--logfile syslog:/dev/log` -- log to syslog (rsyslog) using the `/dev/log` socket, suitable for most systems.
* `--logfile syslog` -- same as above, the shortcut for `/dev/log`.
* `--logfile syslog:/var/run/syslog` -- log to syslog (rsyslog) using the `/var/run/syslog` socket. Use this on MacOS.
* `--logfile syslog:localhost:514` -- log to local syslog using UDP socket, if it listens on port 514.
* `--logfile syslog:<ip>:514` -- log to remote syslog at IP address and port 514. This may be used on Windows for remote logging to an external syslog server.
Log messages are send to `syslog` with the `user` facility. So you can see them with the following commands:
* `tail -f /var/log/user`, or
* install a comprehensive graphical viewer (for instance, 'Log File Viewer' for Ubuntu).
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfilename syslog` or `--logfilename journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfile syslog` or `--logfile journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.
For `rsyslog` the messages from the bot can be redirected into a separate dedicated log file. To achieve this, add
```
@ -78,9 +78,9 @@ $RepeatedMsgReduction on
This needs the `systemd` python package installed as the dependency, which is not available on Windows. Hence, the whole journald logging functionality is not available for a bot running on Windows.
To send Freqtrade log messages to `journald` system service use the `--logfilename` command line option with the value in the following format:
To send Freqtrade log messages to `journald` system service use the `--logfile` command line option with the value in the following format:
* `--logfilename journald` -- send log messages to `journald`.
* `--logfile journald` -- send log messages to `journald`.
Log messages are send to `journald` with the `user` facility. So you can see them with the following commands:
@ -89,4 +89,4 @@ Log messages are send to `journald` with the `user` facility. So you can see the
There are many other options in the `journalctl` utility to filter the messages, see manual pages for this utility.
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfilename syslog` or `--logfilename journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.
On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfile syslog` or `--logfile journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 173 KiB

After

Width:  |  Height:  |  Size: 211 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

View File

@ -11,30 +11,34 @@ Now you have good Buy and Sell strategies and some historic data, you want to te
real data. This is what we call
[backtesting](https://en.wikipedia.org/wiki/Backtesting).
Backtesting will use the crypto-currencies (pairs) from your config file and load ticker data from `user_data/data/<exchange>` by default.
If no data is available for the exchange / pair / ticker interval combination, backtesting will ask you to download them first using `freqtrade download-data`.
Backtesting will use the crypto-currencies (pairs) from your config file and load historical candle (OHCLV) data from `user_data/data/<exchange>` by default.
If no data is available for the exchange / pair / timeframe (ticker interval) combination, backtesting will ask you to download them first using `freqtrade download-data`.
For details on downloading, please refer to the [Data Downloading](data-download.md) section in the documentation.
The result of backtesting will confirm if your bot has better odds of making a profit than a loss.
!!! Tip "Using dynamic pairlists for backtesting"
While using dynamic pairlists during backtesting is not possible, a dynamic pairlist using current data can be generated via the [`test-pairlist`](utils.md#test-pairlist) command, and needs to be specified as `"pair_whitelist"` attribute in the configuration.
!!! Warning "Using dynamic pairlists for backtesting"
Using dynamic pairlists is possible, however it relies on the current market conditions - which will not reflect the historic status of the pairlist.
Also, when using pairlists other than StaticPairlist, reproducability of backtesting-results cannot be guaranteed.
Please read the [pairlists documentation](configuration.md#pairlists) for more information.
To achieve reproducible results, best generate a pairlist via the [`test-pairlist`](utils.md#test-pairlist) command and use that as static pairlist.
### Run a backtesting against the currencies listed in your config file
#### With 5 min tickers (Per default)
#### With 5 min candle (OHLCV) data (per default)
```bash
freqtrade backtesting
```
#### With 1 min tickers
#### With 1 min candle (OHLCV) data
```bash
freqtrade backtesting --ticker-interval 1m
```
#### Using a different on-disk ticker-data source
#### Using a different on-disk historical candle (OHLCV) data source
Assume you downloaded the history data from the Bittrex exchange and kept it in the `user_data/data/bittrex-20180101` directory.
You can then use this data for backtesting as follows:
@ -78,13 +82,17 @@ Please also read about the [strategy startup period](strategy-customization.md#s
#### Supplying custom fee value
Sometimes your account has certain fee rebates (fee reductions starting with a certain account size or monthly volume), which are not visible to ccxt.
To account for this in backtesting, you can use `--fee 0.001` to supply this value to backtesting.
This fee must be a percentage, and will be applied twice (once for trade entry, and once for trade exit).
To account for this in backtesting, you can use the `--fee` command line option to supply this value to backtesting.
This fee must be a ratio, and will be applied twice (once for trade entry, and once for trade exit).
For example, if the buying and selling commission fee is 0.1% (i.e., 0.001 written as ratio), then you would run backtesting as the following:
```bash
freqtrade backtesting --fee 0.001
```
!!! Note
Only supply this option (or the corresponding configuration parameter) if you want to experiment with different fee values. By default, Backtesting fetches the default fee from the exchange pair/market info.
#### Running backtest with smaller testset by using timerange
@ -115,45 +123,46 @@ A backtesting result will look like that:
```
========================================================= BACKTESTING REPORT ========================================================
| pair | buy count | avg profit % | cum profit % | tot profit BTC | tot profit % | avg duration | profit | loss |
|:---------|------------:|---------------:|---------------:|-----------------:|---------------:|:---------------|---------:|-------:|
| ADA/BTC | 35 | -0.11 | -3.88 | -0.00019428 | -1.94 | 4:35:00 | 14 | 21 |
| ARK/BTC | 11 | -0.41 | -4.52 | -0.00022647 | -2.26 | 2:03:00 | 3 | 8 |
| BTS/BTC | 32 | 0.31 | 9.78 | 0.00048938 | 4.89 | 5:05:00 | 18 | 14 |
| DASH/BTC | 13 | -0.08 | -1.07 | -0.00005343 | -0.53 | 4:39:00 | 6 | 7 |
| ENG/BTC | 18 | 1.36 | 24.54 | 0.00122807 | 12.27 | 2:50:00 | 8 | 10 |
| EOS/BTC | 36 | 0.08 | 3.06 | 0.00015304 | 1.53 | 3:34:00 | 16 | 20 |
| ETC/BTC | 26 | 0.37 | 9.51 | 0.00047576 | 4.75 | 6:14:00 | 11 | 15 |
| ETH/BTC | 33 | 0.30 | 9.96 | 0.00049856 | 4.98 | 7:31:00 | 16 | 17 |
| IOTA/BTC | 32 | 0.03 | 1.09 | 0.00005444 | 0.54 | 3:12:00 | 14 | 18 |
| LSK/BTC | 15 | 1.75 | 26.26 | 0.00131413 | 13.13 | 2:58:00 | 6 | 9 |
| LTC/BTC | 32 | -0.04 | -1.38 | -0.00006886 | -0.69 | 4:49:00 | 11 | 21 |
| NANO/BTC | 17 | 1.26 | 21.39 | 0.00107058 | 10.70 | 1:55:00 | 10 | 7 |
| NEO/BTC | 23 | 0.82 | 18.97 | 0.00094936 | 9.48 | 2:59:00 | 10 | 13 |
| REQ/BTC | 9 | 1.17 | 10.54 | 0.00052734 | 5.27 | 3:47:00 | 4 | 5 |
| XLM/BTC | 16 | 1.22 | 19.54 | 0.00097800 | 9.77 | 3:15:00 | 7 | 9 |
| XMR/BTC | 23 | -0.18 | -4.13 | -0.00020696 | -2.07 | 5:30:00 | 12 | 11 |
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 | 23 |
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 | 15 |
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 243 |
| Pair | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses |
|:---------|-------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|--------:|
| ADA/BTC | 35 | -0.11 | -3.88 | -0.00019428 | -1.94 | 4:35:00 | 14 | 0 | 21 |
| ARK/BTC | 11 | -0.41 | -4.52 | -0.00022647 | -2.26 | 2:03:00 | 3 | 0 | 8 |
| BTS/BTC | 32 | 0.31 | 9.78 | 0.00048938 | 4.89 | 5:05:00 | 18 | 0 | 14 |
| DASH/BTC | 13 | -0.08 | -1.07 | -0.00005343 | -0.53 | 4:39:00 | 6 | 0 | 7 |
| ENG/BTC | 18 | 1.36 | 24.54 | 0.00122807 | 12.27 | 2:50:00 | 8 | 0 | 10 |
| EOS/BTC | 36 | 0.08 | 3.06 | 0.00015304 | 1.53 | 3:34:00 | 16 | 0 | 20 |
| ETC/BTC | 26 | 0.37 | 9.51 | 0.00047576 | 4.75 | 6:14:00 | 11 | 0 | 15 |
| ETH/BTC | 33 | 0.30 | 9.96 | 0.00049856 | 4.98 | 7:31:00 | 16 | 0 | 17 |
| IOTA/BTC | 32 | 0.03 | 1.09 | 0.00005444 | 0.54 | 3:12:00 | 14 | 0 | 18 |
| LSK/BTC | 15 | 1.75 | 26.26 | 0.00131413 | 13.13 | 2:58:00 | 6 | 0 | 9 |
| LTC/BTC | 32 | -0.04 | -1.38 | -0.00006886 | -0.69 | 4:49:00 | 11 | 0 | 21 |
| NANO/BTC | 17 | 1.26 | 21.39 | 0.00107058 | 10.70 | 1:55:00 | 10 | 0 | 7 |
| NEO/BTC | 23 | 0.82 | 18.97 | 0.00094936 | 9.48 | 2:59:00 | 10 | 0 | 13 |
| REQ/BTC | 9 | 1.17 | 10.54 | 0.00052734 | 5.27 | 3:47:00 | 4 | 0 | 5 |
| XLM/BTC | 16 | 1.22 | 19.54 | 0.00097800 | 9.77 | 3:15:00 | 7 | 0 | 9 |
| XMR/BTC | 23 | -0.18 | -4.13 | -0.00020696 | -2.07 | 5:30:00 | 12 | 0 | 11 |
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 | 0 | 23 |
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 | 0 | 15 |
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 |
========================================================= SELL REASON STATS =========================================================
| Sell Reason | Count |
|:-------------------|--------:|
| trailing_stop_loss | 205 |
| stop_loss | 166 |
| sell_signal | 56 |
| force_sell | 2 |
| Sell Reason | Sells | Wins | Draws | Losses |
|:-------------------|--------:|------:|-------:|--------:|
| trailing_stop_loss | 205 | 150 | 0 | 55 |
| stop_loss | 166 | 0 | 0 | 166 |
| sell_signal | 56 | 36 | 0 | 20 |
| force_sell | 2 | 0 | 0 | 2 |
====================================================== LEFT OPEN TRADES REPORT ======================================================
| pair | buy count | avg profit % | cum profit % | tot profit BTC | tot profit % | avg duration | profit | loss |
|:---------|------------:|---------------:|---------------:|-----------------:|---------------:|:---------------|---------:|-------:|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 | 0 |
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 | 0 |
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 | 0 |
| Pair | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses |
|:---------|-------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|--------:|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 | 0 | 0 |
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 | 0 | 0 |
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 | 0 | 0 |
```
The 1st table contains all trades the bot made, including "left open trades".
The 2nd table contains a recap of sell reasons.
This table can tell you which area needs some additional work (i.e. all `sell_signal` trades are losses, so we should disable the sell-signal or work on improving that).
The 3rd table contains all trades the bot had to `forcesell` at the end of the backtest period to present a full picture.
This is necessary to simulate realistic behaviour, since the backtest period has to end at some point, while realistically, you could leave the bot running forever.
@ -193,7 +202,7 @@ Since backtesting lacks some detailed information about what happens within a ca
- Buys happen at open-price
- Sell signal sells happen at open-price of the following candle
- Low happens before high for stoploss, protecting capital first.
- Low happens before high for stoploss, protecting capital first
- ROI
- sells are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the sell will be at 2%)
- sells are never "below the candle", so a ROI of 2% may result in a sell at 2.4% if low was at 2.4% profit
@ -203,6 +212,7 @@ Since backtesting lacks some detailed information about what happens within a ca
- High happens first - adjusting stoploss
- Low uses the adjusted stoploss (so sells with large high-low difference are backtested correctly)
- Sell-reason does not explain if a trade was positive or negative, just what triggered the sell (this can look odd if negative ROI values are used)
- Stoploss (and trailing stoploss) is evaluated before ROI within one candle. So you can often see more trades with the `stoploss` and/or `trailing_stop` sell reason comparing to the results obtained with the same strategy in the Dry Run/Live Trade modes.
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
Also, keep in mind that past results don't guarantee future success.
@ -218,7 +228,7 @@ You can then load the trades to perform further analysis as shown in our [data a
To compare multiple strategies, a list of Strategies can be provided to backtesting.
This is limited to 1 ticker-interval per run, however, data is only loaded once from disk so if you have multiple
This is limited to 1 timeframe (ticker interval) value per run. However, data is only loaded once from disk so if you have multiple
strategies you'd like to compare, this will give a nice runtime boost.
All listed Strategies need to be in the same directory.
@ -232,11 +242,11 @@ There will be an additional table comparing win/losses of the different strategi
Detailed output for all strategies one after the other will be available, so make sure to scroll up to see the details per strategy.
```
=========================================================== Strategy Summary ===========================================================
| Strategy | buy count | avg profit % | cum profit % | tot profit BTC | tot profit % | avg duration | profit | loss |
|:------------|------------:|---------------:|---------------:|-----------------:|---------------:|:---------------|---------:|-------:|
| Strategy1 | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 243 |
| Strategy2 | 1487 | -0.13 | -197.58 | -0.00988917 | -98.79 | 4:43:00 | 662 | 825 |
=========================================================== STRATEGY SUMMARY ===========================================================
| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses |
|:------------|-------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|-------:|
| Strategy1 | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 |
| Strategy2 | 1487 | -0.13 | -197.58 | -0.00988917 | -98.79 | 4:43:00 | 662 | 0 | 825 |
```
## Next step

View File

@ -45,19 +45,23 @@ optional arguments:
-h, --help show this help message and exit
--db-url PATH Override trades database URL, this is useful in custom
deployments (default: `sqlite:///tradesv3.sqlite` for
Live Run mode, `sqlite://` for Dry Run).
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
Dry Run).
--sd-notify Notify systemd service manager.
--dry-run Enforce dry-run for trading (removes Exchange secrets
and simulates trades).
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
@ -68,6 +72,8 @@ Strategy arguments:
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
.
```
### How to specify which configuration file be used?
@ -138,10 +144,10 @@ It is recommended to use version control to keep track of changes to your strate
### How to use **--strategy**?
This parameter will allow you to load your custom strategy class.
Per default without `--strategy` or `-s` the bot will load the
`DefaultStrategy` included with the bot (`freqtrade/strategy/default_strategy.py`).
To test the bot installation, you can use the `SampleStrategy` installed by the `create-userdir` subcommand (usually `user_data/strategy/sample_strategy.py`).
The bot will search your strategy file within `user_data/strategies` and `freqtrade/strategy`.
The bot will search your strategy file within `user_data/strategies`.
To use other directories, please read the next section about `--strategy-path`.
To load a strategy, simply pass the class name (e.g.: `CustomStrategy`) in this parameter.
@ -192,8 +198,8 @@ Backtesting also uses the config specified via `-c/--config`.
usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [-s NAME]
[--strategy-path PATH] [-i TICKER_INTERVAL]
[--timerange TIMERANGE] [--max_open_trades INT]
[--stake_amount STAKE_AMOUNT] [--fee FLOAT]
[--timerange TIMERANGE] [--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[--eps] [--dmmp]
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
[--export EXPORT] [--export-filename PATH]
@ -205,10 +211,12 @@ optional arguments:
`1d`).
--timerange TIMERANGE
Specify what timerange of data to use.
--max_open_trades INT
Specify max_open_trades to use.
--stake_amount STAKE_AMOUNT
Specify stake_amount.
--max-open-trades INT
Override the value of the `max_open_trades`
configuration setting.
--stake-amount STAKE_AMOUNT
Override the value of the `stake_amount` configuration
setting.
--fee FLOAT Specify fee ratio. Will be applied twice (on trade
entry and exit).
--eps, --enable-position-stacking
@ -236,12 +244,15 @@ optional arguments:
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
@ -264,17 +275,17 @@ Check the corresponding [Data Downloading](data-download.md) section for more de
## Hyperopt commands
To optimize your strategy, you can use hyperopt parameter hyperoptimization
to find optimal parameter values for your stategy.
to find optimal parameter values for your strategy.
```
usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--userdir PATH] [-s NAME] [--strategy-path PATH]
[-i TICKER_INTERVAL] [--timerange TIMERANGE]
[--max_open_trades INT]
[--stake_amount STAKE_AMOUNT] [--fee FLOAT]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[--hyperopt NAME] [--hyperopt-path PATH] [--eps]
[-e INT]
[--spaces {all,buy,sell,roi,stoploss} [{all,buy,sell,roi,stoploss} ...]]
[--spaces {all,buy,sell,roi,stoploss,trailing,default} [{all,buy,sell,roi,stoploss,trailing,default} ...]]
[--dmmp] [--print-all] [--no-color] [--print-json]
[-j JOBS] [--random-state INT] [--min-trades INT]
[--continue] [--hyperopt-loss NAME]
@ -286,10 +297,12 @@ optional arguments:
`1d`).
--timerange TIMERANGE
Specify what timerange of data to use.
--max_open_trades INT
Specify max_open_trades to use.
--stake_amount STAKE_AMOUNT
Specify stake_amount.
--max-open-trades INT
Override the value of the `max_open_trades`
configuration setting.
--stake-amount STAKE_AMOUNT
Override the value of the `stake_amount` configuration
setting.
--fee FLOAT Specify fee ratio. Will be applied twice (on trade
entry and exit).
--hyperopt NAME Specify hyperopt class name which will be used by the
@ -300,9 +313,9 @@ optional arguments:
Allow buying the same pair multiple times (position
stacking).
-e INT, --epochs INT Specify number of epochs (default: 100).
--spaces {all,buy,sell,roi,stoploss} [{all,buy,sell,roi,stoploss} ...]
--spaces {all,buy,sell,roi,stoploss,trailing,default} [{all,buy,sell,roi,stoploss,trailing,default} ...]
Specify which parameters to hyperopt. Space-separated
list. Default: `all`.
list.
--dmmp, --disable-max-market-positions
Disable applying `max_open_trades` during backtest
(same as setting `max_open_trades` to a very high
@ -310,7 +323,7 @@ optional arguments:
--print-all Print all results, not only the best ones.
--no-color Disable colorization of hyperopt results. May be
useful if you are redirecting output to a file.
--print-json Print best result detailization in JSON format.
--print-json Print best results in JSON format.
-j JOBS, --job-workers JOBS
The number of concurrently running jobs for
hyperoptimization (hyperopt worker processes). If -1
@ -328,18 +341,23 @@ optional arguments:
class (IHyperOptLoss). Different functions can
generate completely different results, since the
target for optimization is different. Built-in
Hyperopt-loss-functions are: DefaultHyperOptLoss,
OnlyProfitHyperOptLoss, SharpeHyperOptLoss (default:
`DefaultHyperOptLoss`).
Hyperopt-loss-functions are:
DefaultHyperOptLoss, OnlyProfitHyperOptLoss,
SharpeHyperOptLoss, SharpeHyperOptLossDaily,
SortinoHyperOptLoss, SortinoHyperOptLossDaily.
(default: `DefaultHyperOptLoss`).
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
@ -350,6 +368,7 @@ Strategy arguments:
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
```
## Edge commands
@ -360,7 +379,7 @@ To know your trade expectancy and winrate against historical data, you can use E
usage: freqtrade edge [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--userdir PATH] [-s NAME] [--strategy-path PATH]
[-i TICKER_INTERVAL] [--timerange TIMERANGE]
[--max_open_trades INT] [--stake_amount STAKE_AMOUNT]
[--max-open-trades INT] [--stake-amount STAKE_AMOUNT]
[--fee FLOAT] [--stoplosses STOPLOSS_RANGE]
optional arguments:
@ -370,10 +389,12 @@ optional arguments:
`1d`).
--timerange TIMERANGE
Specify what timerange of data to use.
--max_open_trades INT
Specify max_open_trades to use.
--stake_amount STAKE_AMOUNT
Specify stake_amount.
--max-open-trades INT
Override the value of the `max_open_trades`
configuration setting.
--stake-amount STAKE_AMOUNT
Override the value of the `stake_amount` configuration
setting.
--fee FLOAT Specify fee ratio. Will be applied twice (on trade
entry and exit).
--stoplosses STOPLOSS_RANGE
@ -384,12 +405,15 @@ optional arguments:
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
@ -400,6 +424,7 @@ Strategy arguments:
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
```
To understand edge and how to read the results, please read the [edge documentation](edge.md).

View File

@ -34,78 +34,88 @@ The prevelance for all Options is as follows:
- CLI arguments override any other option
- Configuration files are used in sequence (last file wins), and override Strategy configurations.
- Strategy configurations are only used if they are not set via configuration or via command line arguments. These options are market with [Strategy Override](#parameters-in-the-strategy) in the below table.
- Strategy configurations are only used if they are not set via configuration or via command line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
Mandatory parameters are marked as **Required**, which means that they are required to be set in one of the possible ways.
| Command | Description |
|----------|-------------|
| `max_open_trades` | **Required.** Number of trades open your bot will have. If -1 then it is ignored (i.e. potentially unlimited open trades).<br> ***Datatype:*** *Positive integer or -1.*
| `stake_currency` | **Required.** Crypto-currency used for trading. [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *String*
| `stake_amount` | **Required.** Amount of crypto-currency your bot will use for each trade. Set it to `"unlimited"` to allow the bot to use all available balance. [More information below](#understand-stake_amount). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Positive float or `"unlimited"`.*
| `amount_reserve_percent` | Reserve some amount in min pair stake amount. The bot will reserve `amount_reserve_percent` + stoploss value when calculating min pair stake amount in order to avoid possible trade refusals. <br>*Defaults to `0.05` (5%).* <br> ***Datatype:*** *Positive Float as ratio.*
| `ticker_interval` | The ticker interval to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *String*
| `fiat_display_currency` | Fiat currency used to show your profits. [More information below](#what-values-can-be-used-for-fiat_display_currency). <br> ***Datatype:*** *String*
| `dry_run` | **Required.** Define if the bot must be in Dry Run or production mode. <br>*Defaults to `true`.* <br> ***Datatype:*** *Boolean*
| `dry_run_wallet` | Define the starting amount in stake currency for the simulated wallet used by the bot running in the Dry Run mode.<br>*Defaults to `1000`.* <br> ***Datatype:*** *Float*
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> ***Datatype:*** *Boolean*
| `minimal_roi` | **Required.** Set the threshold in percent the bot will use to sell a trade. [More information below](#understand-minimal_roi). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Dict*
| `stoploss` | **Required.** Value of the stoploss in percent used by the bot. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Float (as ratio)*
| `trailing_stop` | Enables trailing stoploss (based on `stoploss` in either configuration or strategy file). More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Boolean*
| `trailing_stop_positive` | Changes stoploss once profit has been reached. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Float*
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> ***Datatype:*** *Float*
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> ***Datatype:*** *Boolean*
| `unfilledtimeout.buy` | **Required.** How long (in minutes) the bot will wait for an unfilled buy order to complete, after which the order will be cancelled. <br> ***Datatype:*** *Integer*
| `unfilledtimeout.sell` | **Required.** How long (in minutes) the bot will wait for an unfilled sell order to complete, after which the order will be cancelled. <br> ***Datatype:*** *Integer*
| `bid_strategy.ask_last_balance` | **Required.** Set the bidding price. More information [below](#understand-ask_last_balance).
| `bid_strategy.use_order_book` | Enable buying using the rates in Order Book Bids. <br> ***Datatype:*** *Boolean*
| `bid_strategy.order_book_top` | Bot will use the top N rate in Order Book Bids. I.e. a value of 2 will allow the bot to pick the 2nd bid rate in Order Book Bids. *Defaults to `1`.* <br> ***Datatype:*** *Positive Integer*
| `bid_strategy. check_depth_of_market.enabled` | Do not buy if the difference of buy orders and sell orders is met in Order Book. <br>*Defaults to `false`.* <br> ***Datatype:*** *Boolean*
| `bid_strategy. check_depth_of_market.bids_to_ask_delta` | The % difference of buy orders and sell orders found in Order Book. A value lesser than 1 means sell orders is greater, while value greater than 1 means buy orders is higher. *Defaults to `0`.* <br> ***Datatype:*** *Float (as ratio)*
| `ask_strategy.use_order_book` | Enable selling of open trades using Order Book Asks. <br> ***Datatype:*** *Boolean*
| `ask_strategy.order_book_min` | Bot will scan from the top min to max Order Book Asks searching for a profitable rate. <br>*Defaults to `1`.* <br> ***Datatype:*** *Positive Integer*
| `ask_strategy.order_book_max` | Bot will scan from the top min to max Order Book Asks searching for a profitable rate. <br>*Defaults to `1`.* <br> ***Datatype:*** *Positive Integer*
| `ask_strategy.use_sell_signal` | Use sell signals produced by the strategy in addition to the `minimal_roi`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> ***Datatype:*** *Boolean*
| `ask_strategy.sell_profit_only` | Wait until the bot makes a positive profit before taking a sell decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> ***Datatype:*** *Boolean*
| `ask_strategy.ignore_roi_if_buy_signal` | Do not sell if the buy signal is still active. This setting takes preference over `minimal_roi` and `use_sell_signal`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> ***Datatype:*** *Boolean*
| `order_types` | Configure order-types depending on the action (`"buy"`, `"sell"`, `"stoploss"`, `"stoploss_on_exchange"`). [More information below](#understand-order_types). [Strategy Override](#parameters-in-the-strategy).<br> ***Datatype:*** *Dict*
| `order_time_in_force` | Configure time in force for buy and sell orders. [More information below](#understand-order_time_in_force). [Strategy Override](#parameters-in-the-strategy). <br> ***Datatype:*** *Dict*
| `exchange.name` | **Required.** Name of the exchange class to use. [List below](#user-content-what-values-for-exchangename). <br> ***Datatype:*** *String*
| `exchange.sandbox` | Use the 'sandbox' version of the exchange, where the exchange provides a sandbox for risk-free integration. See [here](sandbox-testing.md) in more details.<br> ***Datatype:*** *Boolean*
| `exchange.key` | API key to use for the exchange. Only required when you are in production mode. **Keep it in secret, do not disclose publicly.** <br> ***Datatype:*** *String*
| `exchange.secret` | API secret to use for the exchange. Only required when you are in production mode. **Keep it in secret, do not disclose publicly.** <br> ***Datatype:*** *String*
| `exchange.password` | API password to use for the exchange. Only required when you are in production mode and for exchanges that use password for API requests. **Keep it in secret, do not disclose publicly.** <br> ***Datatype:*** *String*
| `exchange.pair_whitelist` | List of pairs to use by the bot for trading and to check for potential trades during backtesting. Not used by VolumePairList (see [below](#dynamic-pairlists)). <br> ***Datatype:*** *List*
| `exchange.pair_blacklist` | List of pairs the bot must absolutely avoid for trading and backtesting (see [below](#dynamic-pairlists)). <br> ***Datatype:*** *List*
| `exchange.ccxt_config` | Additional CCXT parameters passed to the regular ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> ***Datatype:*** *Dict*
| `exchange.ccxt_async_config` | Additional CCXT parameters passed to the async ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> ***Datatype:*** *Dict*
| `exchange.markets_refresh_interval` | The interval in minutes in which markets are reloaded. <br>*Defaults to `60` minutes.* <br> ***Datatype:*** *Positive Integer*
| Parameter | Description |
|------------|-------------|
| `max_open_trades` | **Required.** Number of open trades your bot is allowed to have. Only one open trade per pair is possible, so the length of your pairlist is another limitation which can apply. If -1 then it is ignored (i.e. potentially unlimited open trades, limited by the pairlist). [More information below](#configuring-amount-per-trade).<br> **Datatype:** Positive integer or -1.
| `stake_currency` | **Required.** Crypto-currency used for trading. [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
| `stake_amount` | **Required.** Amount of crypto-currency your bot will use for each trade. Set it to `"unlimited"` to allow the bot to use all available balance. [More information below](#configuring-amount-per-trade). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Positive float or `"unlimited"`.
| `tradable_balance_ratio` | Ratio of the total account balance the bot is allowed to trade. [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.99` 99%).*<br> **Datatype:** Positive float between `0.1` and `1.0`.
| `amend_last_stake_amount` | Use reduced last stake amount if necessary. [More information below](#configuring-amount-per-trade). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `last_stake_amount_min_ratio` | Defines minimum stake amount that has to be left and executed. Applies only to the last stake amount when it's amended to a reduced value (i.e. if `amend_last_stake_amount` is set to `true`). [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.5`.* <br> **Datatype:** Float (as ratio)
| `amount_reserve_percent` | Reserve some amount in min pair stake amount. The bot will reserve `amount_reserve_percent` + stoploss value when calculating min pair stake amount in order to avoid possible trade refusals. <br>*Defaults to `0.05` (5%).* <br> **Datatype:** Positive Float as ratio.
| `ticker_interval` | The timeframe (ticker interval) to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
| `fiat_display_currency` | Fiat currency used to show your profits. [More information below](#what-values-can-be-used-for-fiat_display_currency). <br> **Datatype:** String
| `dry_run` | **Required.** Define if the bot must be in Dry Run or production mode. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
| `dry_run_wallet` | Define the starting amount in stake currency for the simulated wallet used by the bot running in the Dry Run mode.<br>*Defaults to `1000`.* <br> **Datatype:** Float
| `cancel_open_orders_on_exit` | Cancel open orders when the `/stop` RPC command is issued, `Ctrl+C` is pressed or the bot dies unexpectedly. When set to `true`, this allows you to use `/stop` to cancel unfilled and partially filled orders in the event of a market crash. It does not impact open positions. <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `minimal_roi` | **Required.** Set the threshold in percent the bot will use to sell a trade. [More information below](#understand-minimal_roi). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
| `stoploss` | **Required.** Value of the stoploss in percent used by the bot. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Float (as ratio)
| `trailing_stop` | Enables trailing stoploss (based on `stoploss` in either configuration or strategy file). More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Boolean
| `trailing_stop_positive` | Changes stoploss once profit has been reached. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Float
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> **Datatype:** Float
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `unfilledtimeout.buy` | **Required.** How long (in minutes) the bot will wait for an unfilled buy order to complete, after which the order will be cancelled. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.sell` | **Required.** How long (in minutes) the bot will wait for an unfilled sell order to complete, after which the order will be cancelled. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `bid_strategy.price_side` | Select the side of the spread the bot should look at to get the buy rate. [More information below](#buy-price-side).<br> *Defaults to `bid`.* <br> **Datatype:** String (either `ask` or `bid`).
| `bid_strategy.ask_last_balance` | **Required.** Set the bidding price. More information [below](#buy-price-without-orderbook-enabled).
| `bid_strategy.use_order_book` | Enable buying using the rates in [Order Book Bids](#buy-price-with-orderbook-enabled). <br> **Datatype:** Boolean
| `bid_strategy.order_book_top` | Bot will use the top N rate in Order Book Bids to buy. I.e. a value of 2 will allow the bot to pick the 2nd bid rate in [Order Book Bids](#buy-price-with-orderbook-enabled). <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `bid_strategy. check_depth_of_market.enabled` | Do not buy if the difference of buy orders and sell orders is met in Order Book. [Check market depth](#check-depth-of-market). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `bid_strategy. check_depth_of_market.bids_to_ask_delta` | The difference ratio of buy orders and sell orders found in Order Book. A value below 1 means sell order size is greater, while value greater than 1 means buy order size is higher. [Check market depth](#check-depth-of-market) <br> *Defaults to `0`.* <br> **Datatype:** Float (as ratio)
| `ask_strategy.price_side` | Select the side of the spread the bot should look at to get the sell rate. [More information below](#sell-price-side).<br> *Defaults to `ask`.* <br> **Datatype:** String (either `ask` or `bid`).
| `ask_strategy.use_order_book` | Enable selling of open trades using [Order Book Asks](#sell-price-with-orderbook-enabled). <br> **Datatype:** Boolean
| `ask_strategy.order_book_min` | Bot will scan from the top min to max Order Book Asks searching for a profitable rate. <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `ask_strategy.order_book_max` | Bot will scan from the top min to max Order Book Asks searching for a profitable rate. <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `ask_strategy.use_sell_signal` | Use sell signals produced by the strategy in addition to the `minimal_roi`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
| `ask_strategy.sell_profit_only` | Wait until the bot makes a positive profit before taking a sell decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `ask_strategy.ignore_roi_if_buy_signal` | Do not sell if the buy signal is still active. This setting takes preference over `minimal_roi` and `use_sell_signal`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `order_types` | Configure order-types depending on the action (`"buy"`, `"sell"`, `"stoploss"`, `"stoploss_on_exchange"`). [More information below](#understand-order_types). [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Dict
| `order_time_in_force` | Configure time in force for buy and sell orders. [More information below](#understand-order_time_in_force). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
| `exchange.name` | **Required.** Name of the exchange class to use. [List below](#user-content-what-values-for-exchangename). <br> **Datatype:** String
| `exchange.sandbox` | Use the 'sandbox' version of the exchange, where the exchange provides a sandbox for risk-free integration. See [here](sandbox-testing.md) in more details.<br> **Datatype:** Boolean
| `exchange.key` | API key to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `exchange.secret` | API secret to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `exchange.password` | API password to use for the exchange. Only required when you are in production mode and for exchanges that use password for API requests.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `exchange.pair_whitelist` | List of pairs to use by the bot for trading and to check for potential trades during backtesting. Not used by VolumePairList (see [below](#dynamic-pairlists)). <br> **Datatype:** List
| `exchange.pair_blacklist` | List of pairs the bot must absolutely avoid for trading and backtesting (see [below](#dynamic-pairlists)). <br> **Datatype:** List
| `exchange.ccxt_config` | Additional CCXT parameters passed to the regular ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
| `exchange.ccxt_async_config` | Additional CCXT parameters passed to the async ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
| `exchange.markets_refresh_interval` | The interval in minutes in which markets are reloaded. <br>*Defaults to `60` minutes.* <br> **Datatype:** Positive Integer
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation.
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> ***Datatype:*** *Boolean*
| `pairlists` | Define one or more pairlists to be used. [More information below](#dynamic-pairlists). <br>*Defaults to `StaticPairList`.* <br> ***Datatype:*** *List of Dicts*
| `telegram.enabled` | Enable the usage of Telegram. <br> ***Datatype:*** *Boolean*
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. **Keep it in secret, do not disclose publicly.** <br> ***Datatype:*** *String*
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. **Keep it in secret, do not disclose publicly.** <br> ***Datatype:*** *String*
| `webhook.enabled` | Enable usage of Webhook notifications <br> ***Datatype:*** *Boolean*
| `webhook.url` | URL for the webhook. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> ***Datatype:*** *String*
| `webhook.webhookbuy` | Payload to send on buy. Only required if `webhook.enabled` is `true`. See the [webhook documentationV](webhook-config.md) for more details. <br> ***Datatype:*** *String*
| `webhook.webhooksell` | Payload to send on sell. Only required if `webhook.enabled` is `true`. See the [webhook documentationV](webhook-config.md) for more details. <br> ***Datatype:*** *String*
| `webhook.webhookstatus` | Payload to send on status calls. Only required if `webhook.enabled` is `true`. See the [webhook documentationV](webhook-config.md) for more details. <br> ***Datatype:*** *String*
| `api_server.enabled` | Enable usage of API Server. See the [API Server documentation](rest-api.md) for more details. <br> ***Datatype:*** *Boolean*
| `api_server.listen_ip_address` | Bind IP address. See the [API Server documentation](rest-api.md) for more details. <br> ***Datatype:*** *IPv4*
| `api_server.listen_port` | Bind Port. See the [API Server documentation](rest-api.md) for more details. <br> ***Datatype:*** *Integer between 1024 and 65535*
| `api_server.username` | Username for API server. See the [API Server documentation](rest-api.md) for more details. **Keep it in secret, do not disclose publicly.**<br> ***Datatype:*** *String*
| `api_server.password` | Password for API server. See the [API Server documentation](rest-api.md) for more details. **Keep it in secret, do not disclose publicly.**<br> ***Datatype:*** *String*
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite://` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> ***Datatype:*** *String, SQLAlchemy connect string*
| `initial_state` | Defines the initial application state. More information below. <br>*Defaults to `stopped`.* <br> ***Datatype:*** *Enum, either `stopped` or `running`*
| `forcebuy_enable` | Enables the RPC Commands to force a buy. More information below. <br> ***Datatype:*** *Boolean*
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> ***Datatype:*** *ClassName*
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> ***Datatype:*** *String*
| `internals.process_throttle_secs` | Set the process throttle. Value in second. <br>*Defaults to `5` seconds.* <br> ***Datatype:*** *Positive Integer*
| `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages. <br>*Defaults to `60` seconds.* <br> ***Datatype:*** *Positive Integer or 0*
| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](installation.md#7-optional-configure-freqtrade-as-a-systemd-service) for more details. <br> ***Datatype:*** *Boolean*
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> ***Datatype:*** *String*
| `user_data_dir` | Directory containing user data. <br> *Defaults to `./user_data/`*. <br> ***Datatype:*** *String*
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
| `pairlists` | Define one or more pairlists to be used. [More information below](#dynamic-pairlists). <br>*Defaults to `StaticPairList`.* <br> **Datatype:** List of Dicts
| `telegram.enabled` | Enable the usage of Telegram. <br> **Datatype:** Boolean
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `webhook.enabled` | Enable usage of Webhook notifications <br> **Datatype:** Boolean
| `webhook.url` | URL for the webhook. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `webhook.webhookbuy` | Payload to send on buy. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `webhook.webhookbuycancel` | Payload to send on buy order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `webhook.webhooksell` | Payload to send on sell. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `webhook.webhooksellcancel` | Payload to send on sell order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `webhook.webhookstatus` | Payload to send on status calls. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
| `api_server.enabled` | Enable usage of API Server. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** Boolean
| `api_server.listen_ip_address` | Bind IP address. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** IPv4
| `api_server.listen_port` | Bind Port. See the [API Server documentation](rest-api.md) for more details. <br>**Datatype:** Integer between 1024 and 65535
| `api_server.username` | Username for API server. See the [API Server documentation](rest-api.md) for more details. <br>**Keep it in secret, do not disclose publicly.**<br> **Datatype:** String
| `api_server.password` | Password for API server. See the [API Server documentation](rest-api.md) for more details. <br>**Keep it in secret, do not disclose publicly.**<br> **Datatype:** String
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
| `initial_state` | Defines the initial application state. More information below. <br>*Defaults to `stopped`.* <br> **Datatype:** Enum, either `stopped` or `running`
| `forcebuy_enable` | Enables the RPC Commands to force a buy. More information below. <br> **Datatype:** Boolean
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> **Datatype:** ClassName
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> **Datatype:** String
| `internals.process_throttle_secs` | Set the process throttle. Value in second. <br>*Defaults to `5` seconds.* <br> **Datatype:** Positive Integer
| `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages. <br>*Defaults to `60` seconds.* <br> **Datatype:** Positive Integer or 0
| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](installation.md#7-optional-configure-freqtrade-as-a-systemd-service) for more details. <br> **Datatype:** Boolean
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
| `user_data_dir` | Directory containing user data. <br> *Defaults to `./user_data/`*. <br> **Datatype:** String
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `json`*. <br> **Datatype:** String
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `jsongz`*. <br> **Datatype:** String
### Parameters in the strategy
@ -124,24 +134,63 @@ Values set in the configuration file always overwrite values set in the strategy
* `order_time_in_force`
* `stake_currency`
* `stake_amount`
* `unfilledtimeout`
* `use_sell_signal` (ask_strategy)
* `sell_profit_only` (ask_strategy)
* `ignore_roi_if_buy_signal` (ask_strategy)
### Understand stake_amount
### Configuring amount per trade
The `stake_amount` configuration parameter is an amount of crypto-currency your bot will use for each trade.
There are several methods to configure how much of the stake currency the bot will use to enter a trade. All methods respect the [available balance configuration](#available-balance) as explained below.
The minimal configuration value is 0.0001. Please check your exchange's trading minimums to avoid problems.
#### Available balance
By default, the bot assumes that the `complete amount - 1%` is at it's disposal, and when using [dynamic stake amount](#dynamic-stake-amount), it will split the complete balance into `max_open_trades` buckets per trade.
Freqtrade will reserve 1% for eventual fees when entering a trade and will therefore not touch that by default.
You can configure the "untouched" amount by using the `tradable_balance_ratio` setting.
For example, if you have 10 ETH available in your wallet on the exchange and `tradable_balance_ratio=0.5` (which is 50%), then the bot will use a maximum amount of 5 ETH for trading and considers this as available balance. The rest of the wallet is untouched by the trades.
!!! Warning
The `tradable_balance_ratio` setting applies to the current balance (free balance + tied up in trades). Therefore, assuming the starting balance of 1000, a configuration with `tradable_balance_ratio=0.99` will not guarantee that 10 currency units will always remain available on the exchange. For example, the free amount may reduce to 5 units if the total balance is reduced to 500 (either by a losing streak, or by withdrawing balance).
#### Amend last stake amount
Assuming we have the tradable balance of 1000 USDT, `stake_amount=400`, and `max_open_trades=3`.
The bot would open 2 trades, and will be unable to fill the last trading slot, since the requested 400 USDT are no longer available, since 800 USDT are already tied in other trades.
To overcome this, the option `amend_last_stake_amount` can be set to `True`, which will enable the bot to reduce stake_amount to the available balance in order to fill the last trade slot.
In the example above this would mean:
- Trade1: 400 USDT
- Trade2: 400 USDT
- Trade3: 200 USDT
!!! Note
This option only applies with [Static stake amount](#static-stake-amount) - since [Dynamic stake amount](#dynamic-stake-amount) divides the balances evenly.
!!! Note
The minimum last stake amount can be configured using `amend_last_stake_amount` - which defaults to 0.5 (50%). This means that the minimum stake amount that's ever used is `stake_amount * 0.5`. This avoids very low stake amounts, that are close to the minimum tradable amount for the pair and can be refused by the exchange.
#### Static stake amount
The `stake_amount` configuration statically configures the amount of stake-currency your bot will use for each trade.
The minimal configuration value is 0.0001, however, please check your exchange's trading minimums for the stake currency you're using to avoid problems.
This setting works in combination with `max_open_trades`. The maximum capital engaged in trades is `stake_amount * max_open_trades`.
For example, the bot will at most use (0.05 BTC x 3) = 0.15 BTC, assuming a configuration of `max_open_trades=3` and `stake_amount=0.05`.
To allow the bot to trade all the available `stake_currency` in your account set
!!! Note
This setting respects the [available balance configuration](#available-balance).
```json
"stake_amount" : "unlimited",
```
#### Dynamic stake amount
Alternatively, you can use a dynamic stake amount, which will use the available balance on the exchange, and divide that equally by the amount of allowed trades (`max_open_trades`).
To configure this, set `stake_amount="unlimited"`. We also recommend to set `tradable_balance_ratio=0.99` (99%) - to keep a minimum balance for eventual fees.
In this case a trade amount is calculated as:
@ -149,6 +198,16 @@ In this case a trade amount is calculated as:
currency_balance / (max_open_trades - current_open_trades)
```
To allow the bot to trade all the available `stake_currency` in your account (minus `tradable_balance_ratio`) set
```json
"stake_amount" : "unlimited",
"tradable_balance_ratio": 0.99,
```
!!! Note
This configuration will allow increasing / decreasing stakes depending on the performance of the bot (lower stake if bot is loosing, higher stakes if the bot has a winning record, since higher balances are available).
!!! Note "When using Dry-Run Mode"
When using `"stake_amount" : "unlimited",` in combination with Dry-Run, the balance will be simulated starting with a stake of `dry_run_wallet` which will evolve over time. It is therefore important to set `dry_run_wallet` to a sensible value (like 0.05 or 0.01 for BTC and 1000 or 100 for USDT, for example), otherwise it may simulate trades with 100 BTC (or more) or 0.05 USDT (or less) at once - which may not correspond to your real available balance or is less than the exchange minimal limit for the order amount for the stake currency.
@ -207,13 +266,6 @@ before asking the strategy if we should buy or a sell an asset. After each wait
every opened trade wether or not we should sell, and for all the remaining pairs (either the dynamic list of pairs or
the static list of pairs) if we should buy.
### Understand ask_last_balance
The `ask_last_balance` configuration parameter sets the bidding price. Value `0.0` will use `ask` price, `1.0` will
use the `last` price and values between those interpolate between ask and last
price. Using `ask` price will guarantee quick success in bid, but bot will also
end up paying more then would probably have been necessary.
### Understand order_types
The `order_types` configuration parameter maps actions (`buy`, `sell`, `stoploss`) to order-types (`market`, `limit`, ...) as well as configures stoploss to be on the exchange and defines stoploss on exchange update interval in seconds.
@ -233,7 +285,7 @@ If this is configured, the following 4 values (`buy`, `sell`, `stoploss` and
The below is the default which is used if this is not configured in either strategy or configuration file.
Since `stoploss_on_exchange` uses limit orders, the exchange needs 2 prices, the stoploss_price and the Limit price.
`stoploss` defines the stop-price - and limit should be slightly below this. This defaults to 0.99 / 1%.
`stoploss` defines the stop-price - and limit should be slightly below this. This defaults to 0.99 / 1% (configurable via `stoploss_on_exchange_limit_ratio`).
Calculation example: we bought the asset at 100$.
Stop-price is 95$, then limit would be `95 * 0.99 = 94.05$` - so the stoploss will happen between 95$ and 94.05$.
@ -291,7 +343,7 @@ This is most of the time the default time in force. It means the order will rema
on exchange till it is canceled by user. It can be fully or partially fulfilled.
If partially fulfilled, the remaining will stay on the exchange till cancelled.
**FOK (Full Or Kill):**
**FOK (Fill Or Kill):**
It means if the order is not executed immediately AND fully then it is canceled by the exchange.
@ -321,16 +373,18 @@ The possible values are: `gtc` (default), `fok` or `ioc`.
Freqtrade is based on [CCXT library](https://github.com/ccxt/ccxt) that supports over 100 cryptocurrency
exchange markets and trading APIs. The complete up-to-date list can be found in the
[CCXT repo homepage](https://github.com/ccxt/ccxt/tree/master/python). However, the bot was tested
with only Bittrex and Binance.
The bot was tested with the following exchanges:
[CCXT repo homepage](https://github.com/ccxt/ccxt/tree/master/python).
However, the bot was tested by the development team with only Bittrex, Binance and Kraken,
so the these are the only officially supported exhanges:
- [Bittrex](https://bittrex.com/): "bittrex"
- [Binance](https://www.binance.com/): "binance"
- [Kraken](https://kraken.com/): "kraken"
Feel free to test other exchanges and submit your PR to improve the bot.
Some exchanges require special configuration, which can be found on the [Exchange-specific Notes](exchanges.md) documentation page.
#### Sample exchange configuration
A exchange configuration for "binance" would look as follows:
@ -360,7 +414,7 @@ Advanced options can be configured using the `_ft_has_params` setting, which wil
Available options are listed in the exchange-class as `_ft_has_default`.
For example, to test the order type `FOK` with Kraken, and modify candle_limit to 200 (so you only get 200 candles per call):
For example, to test the order type `FOK` with Kraken, and modify candle limit to 200 (so you only get 200 candles per API call):
```json
"exchange": {
@ -393,6 +447,108 @@ The valid values are:
"BTC", "ETH", "XRP", "LTC", "BCH", "USDT"
```
## Prices used for orders
Prices for regular orders can be controlled via the parameter structures `bid_strategy` for buying and `ask_strategy` for selling.
Prices are always retrieved right before an order is placed, either by querying the exchange tickers or by using the orderbook data.
!!! Note
Orderbook data used by Freqtrade are the data retrieved from exchange by the ccxt's function `fetch_order_book()`, i.e. are usually data from the L2-aggregated orderbook, while the ticker data are the structures returned by the ccxt's `fetch_ticker()`/`fetch_tickers()` functions. Refer to the ccxt library [documentation](https://github.com/ccxt/ccxt/wiki/Manual#market-data) for more details.
### Buy price
#### Check depth of market
When check depth of market is enabled (`bid_strategy.check_depth_of_market.enabled=True`), the buy signals are filtered based on the orderbook depth (sum of all amounts) for each orderbook side.
Orderbook `bid` (buy) side depth is then divided by the orderbook `ask` (sell) side depth and the resulting delta is compared to the value of the `bid_strategy.check_depth_of_market.bids_to_ask_delta` parameter. The buy order is only executed if the orderbook delta is greater than or equal to the configured delta value.
!!! Note
A delta value below 1 means that `ask` (sell) orderbook side depth is greater than the depth of the `bid` (buy) orderbook side, while a value greater than 1 means opposite (depth of the buy side is higher than the depth of the sell side).
#### Buy price side
The configuration setting `bid_strategy.price_side` defines the side of the spread the bot looks for when buying.
The following displays an orderbook.
``` explanation
...
103
102
101 # ask
-------------Current spread
99 # bid
98
97
...
```
If `bid_strategy.price_side` is set to `"bid"`, then the bot will use 99 as buying price.
In line with that, if `bid_strategy.price_side` is set to `"ask"`, then the bot will use 101 as buying price.
Using `ask` price often guarantees quicker filled orders, but the bot can also end up paying more than what would have been necessary.
Taker fees instead of maker fees will most likely apply even when using limit buy orders.
Also, prices at the "ask" side of the spread are higher than prices at the "bid" side in the orderbook, so the order behaves similar to a market order (however with a maximum price).
#### Buy price with Orderbook enabled
When buying with the orderbook enabled (`bid_strategy.use_order_book=True`), Freqtrade fetches the `bid_strategy.order_book_top` entries from the orderbook and then uses the entry specified as `bid_strategy.order_book_top` on the configured side (`bid_strategy.price_side`) of the orderbook. 1 specifies the topmost entry in the orderbook, while 2 would use the 2nd entry in the orderbook, and so on.
#### Buy price without Orderbook enabled
The following section uses `side` as the configured `bid_strategy.price_side`.
When not using orderbook (`bid_strategy.use_order_book=False`), Freqtrade uses the best `side` price from the ticker if it's below the `last` traded price from the ticker. Otherwise (when the `side` price is above the `last` price), it calculates a rate between `side` and `last` price.
The `bid_strategy.ask_last_balance` configuration parameter controls this. A value of `0.0` will use `side` price, while `1.0` will use the `last` price and values between those interpolate between ask and last price.
### Sell price
#### Sell price side
The configuration setting `ask_strategy.price_side` defines the side of the spread the bot looks for when selling.
The following displays an orderbook:
``` explanation
...
103
102
101 # ask
-------------Current spread
99 # bid
98
97
...
```
If `ask_strategy.price_side` is set to `"ask"`, then the bot will use 101 as selling price.
In line with that, if `ask_strategy.price_side` is set to `"bid"`, then the bot will use 99 as selling price.
#### Sell price with Orderbook enabled
When selling with the orderbook enabled (`ask_strategy.use_order_book=True`), Freqtrade fetches the `ask_strategy.order_book_max` entries in the orderbook. Then each of the orderbook steps between `ask_strategy.order_book_min` and `ask_strategy.order_book_max` on the configured orderbook side are validated for a profitable sell-possibility based on the strategy configuration (`minimal_roi` conditions) and the sell order is placed at the first profitable spot.
!!! Note
Using `order_book_max` higher than `order_book_min` only makes sense when ask_strategy.price_side is set to `"ask"`.
The idea here is to place the sell order early, to be ahead in the queue.
A fixed slot (mirroring `bid_strategy.order_book_top`) can be defined by setting `ask_strategy.order_book_min` and `ask_strategy.order_book_max` to the same number.
!!! Warning "Order_book_max > 1 - increased risks for stoplosses!"
Using `ask_strategy.order_book_max` higher than 1 will increase the risk the stoploss on exchange is cancelled too early, since an eventual [stoploss on exchange](#understand-order_types) will be cancelled as soon as the order is placed.
Also, the sell order will remain on the exchange for `unfilledtimeout.sell` (or until it's filled) - which can lead to missed stoplosses (with or without using stoploss on exchange).
!!! Warning "Order_book_max > 1 in dry-run"
Using `ask_strategy.order_book_max` higher than 1 will result in improper dry-run results (significantly better than real orders executed on exchange), since dry-run assumes orders to be filled almost instantly.
It is therefore advised to not use this setting for dry-runs.
#### Sell price without Orderbook enabled
When not using orderbook (`ask_strategy.use_order_book=False`), the price at the `ask_strategy.price_side` side (defaults to `"ask"`) from the ticker will be used as the sell price.
## Pairlists
Pairlists define the list of pairs that the bot should trade.
@ -410,6 +566,7 @@ Inactive markets and blacklisted pairs are always removed from the resulting `pa
* [`VolumePairList`](#volume-pair-list)
* [`PrecisionFilter`](#precision-filter)
* [`PriceFilter`](#price-pair-filter)
* [`SpreadFilter`](#spread-filter)
!!! Tip "Testing pairlists"
Pairlist configurations can be quite tricky to get right. Best use the [`test-pairlist`](utils.md#test-pairlist) subcommand to test your configuration quickly.
@ -428,12 +585,16 @@ It uses configuration from `exchange.pair_whitelist` and `exchange.pair_blacklis
#### Volume Pair List
`VolumePairList` selects `number_assets` top pairs based on `sort_key`, which can be one of `askVolume`, `bidVolume` and `quoteVolume` and defaults to `quoteVolume`.
`VolumePairList` selects `number_assets` top pairs based on `sort_key`, which can only be `quoteVolume`.
`VolumePairList` considers outputs of previous pairlists unless it's the first configured pairlist, it does not consider `pair_whitelist`, but selects the top assets from all available markets (with matching stake-currency) on the exchange.
`refresh_period` allows setting the period (in seconds), at which the pairlist will be refreshed. Defaults to 1800s (30 minutes).
`VolumePairList` is based on the ticker data, as reported by the ccxt library:
* The `quoteVolume` is the amount of quote (stake) currency traded (bought or sold) in last 24 hours.
```json
"pairlists": [{
"method": "VolumePairList",
@ -458,6 +619,12 @@ Min price precision is 8 decimals. If price is 0.00000011 - one step would be 0.
These pairs are dangerous since it may be impossible to place the desired stoploss - and often result in high losses.
#### Spread Filter
Removes pairs that have a difference between asks and bids above the specified ratio (default `0.005`).
Example:
If `DOGE/BTC` maximum bid is 0.00000026 and minimum ask is 0.00000027 the ratio is calculated as: `1 - bid/ask ~= 0.037` which is `> 0.005`
### Full Pairlist example
The below example blacklists `BNB/BTC`, uses `VolumePairList` with `20` assets, sorting by `quoteVolume` and applies both [`PrecisionFilter`](#precision-filter) and [`PriceFilter`](#price-pair-filter), filtering all assets where 1 priceunit is > 1%.
@ -509,12 +676,25 @@ Once you will be happy with your bot performance running in the Dry-run mode, yo
!!! Note
A simulated wallet is available during dry-run mode, and will assume a starting capital of `dry_run_wallet` (defaults to 1000).
### Considerations for dry-run
* API-keys may or may not be provided. Only Read-Only operations (i.e. operations that do not alter account state) on the exchange are performed in the dry-run mode.
* Wallets (`/balance`) are simulated.
* Orders are simulated, and will not be posted to the exchange.
* In combination with `stoploss_on_exchange`, the stop_loss price is assumed to be filled.
* Open orders (not trades, which are stored in the database) are reset on bot restart.
## Switch to production mode
In production mode, the bot will engage your money. Be careful, since a wrong
strategy can lose all your money. Be aware of what you are doing when
you run it in production mode.
### Setup your exchange account
You will need to create API Keys (usually you get `key` and `secret`, some exchanges require an additional `password`) from the Exchange website and you'll need to insert this into the appropriate fields in the configuration or when asked by the `freqtrade new-config` command.
API Keys are usually only required for live trading (trading for real money, bot running in "production mode", executing real orders on the exchange) and are not required for the bot running in dry-run (trade simulation) mode. When you setup the bot in dry-run mode, you may fill these fields with empty values.
### To switch your bot in production mode
**Edit your `config.json` file.**
@ -536,9 +716,6 @@ you run it in production mode.
}
```
!!! Note
If you have an exchange API key yet, [see our tutorial](installation.md#setup-your-exchange-account).
You should also make sure to read the [Exchanges](exchanges.md) section of the documentation to be aware of potential configuration details specific to your exchange.
### Using proxy with Freqtrade
@ -563,7 +740,7 @@ freqtrade
## Embedding Strategies
FreqTrade provides you with with an easy way to embed the strategy into your configuration file.
Freqtrade provides you with with an easy way to embed the strategy into your configuration file.
This is done by utilizing BASE64 encoding and providing this string at the strategy configuration field,
in your chosen config file.

View File

@ -8,6 +8,27 @@ You can analyze the results of backtests and trading history easily using Jupyte
* Don't forget to start a Jupyter notebook server from within your conda or venv environment or use [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels)*
* Copy the example notebook before use so your changes don't get clobbered with the next freqtrade update.
### Using virtual environment with system-wide Jupyter installation
Sometimes it can be desired to use a system-wide installation of Jupyter notebook, and use a jupyter kernel from the virtual environment.
This prevents you from installing the full jupyter suite multiple times per system, and provides an easy way to switch between tasks (freqtrade / other analytics tasks).
For this to work, first activate your virtual environment and run the following commands:
``` bash
# Activate virtual environment
source .env/bin/activate
pip install ipykernel
ipython kernel install --user --name=freqtrade
# Restart jupyter (lab / notebook)
# select kernel "freqtrade" in the notebook
```
!!! Note
This section is provided for completeness, the Freqtrade Team won't provide full support for problems with this setup and will recommend to install Jupyter in the virtual environment directly, as that is the easiest way to get jupyter notebooks up and running. For help with this setup please refer to the [Project Jupyter](https://jupyter.org/) [documentation](https://jupyter.org/documentation) or [help channels](https://jupyter.org/community).
## Fine print
Some tasks don't work especially well in notebooks. For example, anything using asynchronous execution is a problem for Jupyter. Also, freqtrade's primary entry point is the shell cli, so using pure python in a notebook bypasses arguments that provide required objects and parameters to helper functions. You may need to set those values or create expected objects manually.

View File

@ -12,6 +12,152 @@ Otherwise `--exchange` becomes mandatory.
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, use `--days xx` with a number slightly higher than the missing number of days. Freqtrade will keep the available data and only download the missing data.
Be carefull though: If the number is too small (which would result in a few missing days), the whole dataset will be removed and only xx days will be downloaded.
### Usage
```
usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH] [--userdir PATH] [-p PAIRS [PAIRS ...]]
[--pairs-file FILE] [--days INT] [--dl-trades] [--exchange EXCHANGE]
[-t {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...]]
[--erase] [--data-format-ohlcv {json,jsongz}] [--data-format-trades {json,jsongz}]
optional arguments:
-h, --help show this help message and exit
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
Show profits for only these pairs. Pairs are space-separated.
--pairs-file FILE File containing a list of pairs to download.
--days INT Download data for given number of days.
--dl-trades Download trades instead of OHLCV data. The bot will resample trades to the desired timeframe as specified as
--timeframes/-t.
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no config is provided.
-t {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...], --timeframes {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...]
Specify which tickers to download. Space-separated list. Default: `1m 5m`.
--erase Clean all existing data for the selected exchange/pairs/timeframes.
--data-format-ohlcv {json,jsongz}
Storage format for downloaded candle (OHLCV) data. (default: `json`).
--data-format-trades {json,jsongz}
Storage format for downloaded trades data. (default: `jsongz`).
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are: 'syslog', 'journald'. See the documentation for more details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`). Multiple --config options may be used. Can be set to `-`
to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
### Data format
Freqtrade currently supports 2 dataformats, `json` (plain "text" json files) and `jsongz` (a gzipped version of json files).
By default, OHLCV data is stored as `json` data, while trades data is stored as `jsongz` data.
This can be changed via the `--data-format-ohlcv` and `--data-format-trades` parameters respectivly.
If the default dataformat has been changed during download, then the keys `dataformat_ohlcv` and `dataformat_trades` in the configuration file need to be adjusted to the selected dataformat as well.
!!! Note
You can convert between data-formats using the [convert-data](#subcommand-convert-data) and [convert-trade-data](#subcommand-convert-trade-data) methods.
#### Subcommand convert data
```
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[-p PAIRS [PAIRS ...]] --format-from
{json,jsongz} --format-to {json,jsongz}
[--erase]
[-t {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...]]
optional arguments:
-h, --help show this help message and exit
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
Show profits for only these pairs. Pairs are space-
separated.
--format-from {json,jsongz}
Source format for data conversion.
--format-to {json,jsongz}
Destination format for data conversion.
--erase Clean all existing data for the selected
exchange/pairs/timeframes.
-t {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...], --timeframes {1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} [{1m,3m,5m,15m,30m,1h,2h,4h,6h,8h,12h,1d,3d,1w} ...]
Specify which tickers to download. Space-separated
list. Default: `1m 5m`.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
##### Example converting data
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
It'll also remove original json data files (`--erase` parameter).
``` bash
freqtrade convert-data --format-from json --format-to jsongz --data-dir ~/.freqtrade/data/binance -t 5m 15m --erase
```
#### Subcommand convert-trade data
```
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[-p PAIRS [PAIRS ...]] --format-from
{json,jsongz} --format-to {json,jsongz}
[--erase]
optional arguments:
-h, --help show this help message and exit
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
Show profits for only these pairs. Pairs are space-
separated.
--format-from {json,jsongz}
Source format for data conversion.
--format-to {json,jsongz}
Destination format for data conversion.
--erase Clean all existing data for the selected
exchange/pairs/timeframes.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
##### Example converting trades
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
It'll also remove original jsongz data files (`--erase` parameter).
``` bash
freqtrade convert-trade-data --format-from jsongz --format-to json --data-dir ~/.freqtrade/data/kraken --erase
```
### Pairs file
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
@ -46,15 +192,15 @@ Then run:
freqtrade download-data --exchange binance
```
This will download ticker data for all the currency pairs you defined in `pairs.json`.
This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`.
### Other Notes
- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
- To change the exchange used to download the tickers, please use a different configuration file (you'll probably need to adjust ratelimits etc.)
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust ratelimits etc.)
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
- To download ticker data for only 10 days, use `--days 10` (defaults to 30 days).
- Use `--timeframes` to specify which tickers to download. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute tickers.
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
### Trades (tick) data

View File

@ -24,3 +24,13 @@ and in freqtrade 2019.7 (master branch).
`--live` in the context of backtesting allowed to download the latest tick data for backtesting.
Did only download the latest 500 candles, so was ineffective in getting good backtest data.
Removed in 2019-7-dev (develop branch) and in freqtrade 2019-8 (master branch)
### Allow running multiple pairlists in sequence
The former `"pairlist"` section in the configuration has been removed, and is replaced by `"pairlists"` - being a list to specify a sequence of pairlists.
The old section of configuration parameters (`"pairlist"`) has been deprecated in 2019.11 and has been removed in 2020.4.
### deprecation of bidVolume and askVolume from volumepairlist
Since only quoteVolume can be compared between assets, the other options (bidVolume, askVolume) have been deprecated in 2020.4.

View File

@ -1,6 +1,6 @@
# Development Help
This page is intended for developers of FreqTrade, people who want to contribute to the FreqTrade codebase or documentation, or people who want to understand the source code of the application they're running.
This page is intended for developers of Freqtrade, people who want to contribute to the Freqtrade codebase or documentation, or people who want to understand the source code of the application they're running.
All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome. We [track issues](https://github.com/freqtrade/freqtrade/issues) on [GitHub](https://github.com) and also have a dev channel in [slack](https://join.slack.com/t/highfrequencybot/shared_invite/enQtNjU5ODcwNjI1MDU3LTU1MTgxMjkzNmYxNWE1MDEzYzQ3YmU4N2MwZjUyNjJjODRkMDVkNjg4YTAyZGYzYzlhOTZiMTE4ZjQ4YzM0OGE) where you can ask questions.
@ -153,7 +153,7 @@ In VolumePairList, this implements different methods of sorting, does early vali
## Implement a new Exchange (WIP)
!!! Note
This section is a Work in Progress and is not a complete guide on how to test a new exchange with FreqTrade.
This section is a Work in Progress and is not a complete guide on how to test a new exchange with Freqtrade.
Most exchanges supported by CCXT should work out of the box.
@ -165,7 +165,7 @@ Since CCXT does not provide unification for Stoploss On Exchange yet, we'll need
### Incomplete candles
While fetching OHLCV data, we're may end up getting incomplete candles (Depending on the exchange).
While fetching candle (OHLCV) data, we may end up getting incomplete candles (depending on the exchange).
To demonstrate this, we'll use daily candles (`"1d"`) to keep things simple.
We query the api (`ct.fetch_ohlcv()`) for the timeframe and look at the date of the last entry. If this entry changes or shows the date of a "incomplete" candle, then we should drop this since having incomplete candles is problematic because indicators assume that only complete candles are passed to them, and will generate a lot of false buy signals. By default, we're therefore removing the last candle assuming it's incomplete.
@ -174,14 +174,14 @@ To check how the new exchange behaves, you can use the following snippet:
``` python
import ccxt
from datetime import datetime
from freqtrade.data.converter import parse_ticker_dataframe
from freqtrade.data.converter import ohlcv_to_dataframe
ct = ccxt.binance()
timeframe = "1d"
pair = "XLM/BTC" # Make sure to use a pair that exists on that exchange!
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
# convert to dataframe
df1 = parse_ticker_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
print(df1.tail(1))
print(datetime.utcnow())
@ -234,7 +234,7 @@ git checkout -b new_release <commitid>
Determine if crucial bugfixes have been made between this commit and the current state, and eventually cherry-pick these.
* Edit `freqtrade/__init__.py` and add the version matching the current date (for example `2019.7` for July 2019). Minor versions can be `2019.7-1` should we need to do a second release that month.
* Edit `freqtrade/__init__.py` and add the version matching the current date (for example `2019.7` for July 2019). Minor versions can be `2019.7.1` should we need to do a second release that month. Version numbers must follow allowed versions from PEP0440 to avoid failures pushing to pypi.
* Commit this part
* push that branch to the remote and create a PR against the master branch
@ -266,4 +266,24 @@ Once the PR against master is merged (best right after merging):
* Use the button "Draft a new release" in the Github UI (subsection releases).
* Use the version-number specified as tag.
* Use "master" as reference (this step comes after the above PR is merged).
* Use the above changelog as release comment (as codeblock).
* Use the above changelog as release comment (as codeblock)
## Releases
### pypi
To create a pypi release, please run the following commands:
Additional requirement: `wheel`, `twine` (for uploading), account on pypi with proper permissions.
``` bash
python setup.py sdist bdist_wheel
# For pypi test (to check if some change to the installation did work)
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
# For production:
twine upload dist/*
```
Please don't push non-releases to the productive / real pypi instance.

View File

@ -1,4 +1,4 @@
# Using FreqTrade with Docker
# Using Freqtrade with Docker
## Install Docker
@ -8,13 +8,144 @@ Start by downloading and installing Docker CE for your platform:
* [Windows](https://docs.docker.com/docker-for-windows/install/)
* [Linux](https://docs.docker.com/install/)
Optionally, [docker-compose](https://docs.docker.com/compose/install/) should be installed and available to follow the [docker quick start guide](#docker-quick-start).
Once you have Docker installed, simply prepare the config file (e.g. `config.json`) and run the image for `freqtrade` as explained below.
## Download the official FreqTrade docker image
## Freqtrade with docker-compose
Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker-compose file](https://github.com/freqtrade/freqtrade/blob/develop/docker-compose.yml) ready for usage.
!!! Note
The following section assumes that docker and docker-compose is installed and available to the logged in user.
!!! Note
All below comands use relative directories and will have to be executed from the directory containing the `docker-compose.yml` file.
!!! Note "Docker on Raspberry"
If you're running freqtrade on a Raspberry PI, you must change the image from `freqtradeorg/freqtrade:master` to `freqtradeorg/freqtrade:master_pi` or `freqtradeorg/freqtrade:develop_pi`, otherwise the image will not work.
### Docker quick start
Create a new directory and place the [docker-compose file](https://github.com/freqtrade/freqtrade/blob/develop/docker-compose.yml) in this directory.
``` bash
mkdir ft_userdata
cd ft_userdata/
# Download the docker-compose file from the repository
curl https://raw.githubusercontent.com/freqtrade/freqtrade/develop/docker-compose.yml -o docker-compose.yml
# Pull the freqtrade image
docker-compose pull
# Create user directory structure
docker-compose run --rm freqtrade create-userdir --userdir user_data
# Create configuration - Requires answering interactive questions
docker-compose run --rm freqtrade new-config --config user_data/config.json
```
The above snippet creates a new directory called "ft_userdata", downloads the latest compose file and pulls the freqtrade image.
The last 2 steps in the snippet create the directory with user-data, as well as (interactively) the default configuration based on your selections.
!!! Note
You can edit the configuration at any time, which is available as `user_data/config.json` (within the directory `ft_userdata`) when using the above configuration.
#### Adding your strategy
The configuration is now available as `user_data/config.json`.
You should now copy your strategy to `user_data/strategies/` - and add the Strategy class name to the `docker-compose.yml` file, replacing `SampleStrategy`. If you wish to run the bot with the SampleStrategy, just leave it as it is.
!!! Warning
The `SampleStrategy` is there for your reference and give you ideas for your own strategy.
Please always backtest the strategy and use dry-run for some time before risking real money!
Once this is done, you're ready to launch the bot in trading mode (Dry-run or Live-trading, depending on your answer to the corresponding question you made above).
``` bash
docker-compose up -d
```
#### Docker-compose logs
Logs will be written to `user_data/logs/freqtrade.log`.
Alternatively, you can check the latest logs using `docker-compose logs -f`.
#### Database
The database will be in the user_data directory as well, and will be called `user_data/tradesv3.sqlite`.
#### Updating freqtrade with docker-compose
To update freqtrade when using docker-compose is as simple as running the following 2 commands:
``` bash
# Download the latest image
docker-compose pull
# Restart the image
docker-compose up -d
```
This will first pull the latest image, and will then restart the container with the just pulled version.
!!! Note
You should always check the changelog for breaking changes / manual interventions required and make sure the bot starts correctly after the update.
#### Going from here
Advanced users may edit the docker-compose file further to include all possible options or arguments.
All possible freqtrade arguments will be available by running `docker-compose run --rm freqtrade <command> <optional arguments>`.
!!! Note "`docker-compose run --rm`"
Including `--rm` will clean up the container after completion, and is highly recommended for all modes except trading mode (running with `freqtrade trade` command).
##### Example: Download data with docker-compose
Download backtesting data for 5 days for the pair ETH/BTC and 1h timeframe from Binance. The data will be stored in the directory `user_data/data/` on the host.
``` bash
docker-compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h
```
Head over to the [Data Downloading Documentation](data-download.md) for more details on downloading data.
##### Example: Backtest with docker-compose
Run backtesting in docker-containers for SampleStrategy and specified timerange of historical data, on 5m timeframe:
``` bash
docker-compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m
```
Head over to the [Backtesting Documentation](backtesting.md) to learn more.
#### Additional dependencies with docker-compose
If your strategy requires dependencies not included in the default image (like [technical](https://github.com/freqtrade/technical)) - it will be necessary to build the image on your host.
For this, please create a Dockerfile containing installation steps for the additional dependencies (have a look at [Dockerfile.technical](https://github.com/freqtrade/freqtrade/blob/develop/Dockerfile.technical) for an example).
You'll then also need to modify the `docker-compose.yml` file and uncomment the build step, as well as rename the image to avoid naming collisions.
``` yaml
image: freqtrade_custom
build:
context: .
dockerfile: "./Dockerfile.<yourextension>"
```
You can then run `docker-compose build` to build the docker image, and run it using the commands described above.
## Freqtrade with docker without docker-compose
!!! Warning
The below documentation is provided for completeness and assumes that you are somewhat familiar with running docker containers. If you're just starting out with docker, we recommend to follow the [Freqtrade with docker-compose](#freqtrade-with-docker-compose) instructions.
### Download the official Freqtrade docker image
Pull the image from docker hub.
Branches / tags available can be checked out on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/tags/).
Branches / tags available can be checked out on [Dockerhub tags page](https://hub.docker.com/r/freqtradeorg/freqtrade/tags/).
```bash
docker pull freqtradeorg/freqtrade:develop
@ -164,8 +295,7 @@ docker run -d \
```
!!! Note
db-url defaults to `sqlite:///tradesv3.sqlite` but it defaults to `sqlite://` if `dry_run=True` is being used.
To override this behaviour use a custom db-url value: i.e.: `--db-url sqlite:///tradesv3.dryrun.sqlite`
When using docker, it's best to specify `--db-url` explicitly to ensure that the database URL and the mounted database file match.
!!! Note
All available bot command line parameters can be added to the end of the `docker run` command.

View File

@ -1,4 +1,4 @@
# Edge positioning
# Edge positioning
This page explains how to use Edge Positioning module in your bot in order to enter into a trade only if the trade has a reasonable win rate and risk reward ratio, and consequently adjust your position size and stoploss.
@ -9,6 +9,7 @@ This page explains how to use Edge Positioning module in your bot in order to en
Edge does not consider anything else than buy/sell/stoploss signals. So trailing stoploss, ROI, and everything else are ignored in its calculation.
## Introduction
Trading is all about probability. No one can claim that he has a strategy working all the time. You have to assume that sometimes you lose.
But it doesn't mean there is no rule, it only means rules should work "most of the time". Let's play a game: we toss a coin, heads: I give you 10$, tails: you give me 10$. Is it an interesting game? No, it's quite boring, isn't it?
@ -22,45 +23,63 @@ Let's complicate it more: you win 80% of the time but only 2$, I win 20% of the
The question is: How do you calculate that? How do you know if you wanna play?
The answer comes to two factors:
- Win Rate
- Risk Reward Ratio
### Win Rate
Win Rate (*W*) is is the mean over some amount of trades (*N*) what is the percentage of winning trades to total number of trades (note that we don't consider how much you gained but only if you won or not).
W = (Number of winning trades) / (Total number of trades) = (Number of winning trades) / N
```
W = (Number of winning trades) / (Total number of trades) = (Number of winning trades) / N
```
Complementary Loss Rate (*L*) is defined as
L = (Number of losing trades) / (Total number of trades) = (Number of losing trades) / N
```
L = (Number of losing trades) / (Total number of trades) = (Number of losing trades) / N
```
or, which is the same, as
L = 1 W
```
L = 1 W
```
### Risk Reward Ratio
Risk Reward Ratio (*R*) is a formula used to measure the expected gains of a given investment against the risk of loss. It is basically what you potentially win divided by what you potentially lose:
R = Profit / Loss
```
R = Profit / Loss
```
Over time, on many trades, you can calculate your risk reward by dividing your average profit on winning trades by your average loss on losing trades:
Average profit = (Sum of profits) / (Number of winning trades)
```
Average profit = (Sum of profits) / (Number of winning trades)
Average loss = (Sum of losses) / (Number of losing trades)
Average loss = (Sum of losses) / (Number of losing trades)
R = (Average profit) / (Average loss)
R = (Average profit) / (Average loss)
```
### Expectancy
At this point we can combine *W* and *R* to create an expectancy ratio. This is a simple process of multiplying the risk reward ratio by the percentage of winning trades and subtracting the percentage of losing trades, which is calculated as follows:
Expectancy Ratio = (Risk Reward Ratio X Win Rate) Loss Rate = (R X W) L
```
Expectancy Ratio = (Risk Reward Ratio X Win Rate) Loss Rate = (R X W) L
```
So lets say your Win rate is 28% and your Risk Reward Ratio is 5:
Expectancy = (5 X 0.28) 0.72 = 0.68
```
Expectancy = (5 X 0.28) 0.72 = 0.68
```
Superficially, this means that on average you expect this strategys trades to return .68 times the size of your loses. This is important for two reasons: First, it may seem obvious, but you know right away that you have a positive return. Second, you now have a number you can compare to other candidate systems to make decisions about which ones you employ.
Superficially, this means that on average you expect this strategys trades to return 1.68 times the size of your loses. Said another way, you can expect to win $1.68 for every $1 you lose. This is important for two reasons: First, it may seem obvious, but you know right away that you have a positive return. Second, you now have a number you can compare to other candidate systems to make decisions about which ones you employ.
It is important to remember that any system with an expectancy greater than 0 is profitable using past data. The key is finding one that will be profitable in the future.
@ -69,6 +88,7 @@ You can also use this value to evaluate the effectiveness of modifications to th
**NOTICE:** It's important to keep in mind that Edge is testing your expectancy using historical data, there's no guarantee that you will have a similar edge in the future. It's still vital to do this testing in order to build confidence in your methodology, but be wary of "curve-fitting" your approach to the historical data as things are unlikely to play out the exact same way for future trades.
## How does it work?
If enabled in config, Edge will go through historical data with a range of stoplosses in order to find buy and sell/stoploss signals. It then calculates win rate and expectancy over *N* trades for each stoploss. Here is an example:
| Pair | Stoploss | Win Rate | Risk Reward Ratio | Expectancy |
@ -83,6 +103,7 @@ The goal here is to find the best stoploss for the strategy in order to have the
Edge module then forces stoploss value it evaluated to your strategy dynamically.
### Position size
Edge also dictates the stake amount for each trade to the bot according to the following factors:
- Allowed capital at risk
@ -90,13 +111,17 @@ Edge also dictates the stake amount for each trade to the bot according to the f
Allowed capital at risk is calculated as follows:
Allowed capital at risk = (Capital available_percentage) X (Allowed risk per trade)
```
Allowed capital at risk = (Capital available_percentage) X (Allowed risk per trade)
```
Stoploss is calculated as described above against historical data.
Your position size then will be:
Position size = (Allowed capital at risk) / Stoploss
```
Position size = (Allowed capital at risk) / Stoploss
```
Example:
@ -115,100 +140,30 @@ Available capital doesnt change before a position is sold. Lets assume tha
So the Bot receives another buy signal for trade 4 with a stoploss at 2% then your position size would be **0.055 / 0.02 = 2.75 ETH**.
## Configurations
Edge module has following configuration options:
#### enabled
If true, then Edge will run periodically.
(defaults to false)
#### process_throttle_secs
How often should Edge run in seconds?
(defaults to 3600 so one hour)
#### calculate_since_number_of_days
Number of days of data against which Edge calculates Win Rate, Risk Reward and Expectancy
Note that it downloads historical data so increasing this number would lead to slowing down the bot.
(defaults to 7)
#### capital_available_percentage
This is the percentage of the total capital on exchange in stake currency.
As an example if you have 10 ETH available in your wallet on the exchange and this value is 0.5 (which is 50%), then the bot will use a maximum amount of 5 ETH for trading and considers it as available capital.
(defaults to 0.5)
#### allowed_risk
Percentage of allowed risk per trade.
(defaults to 0.01 so 1%)
#### stoploss_range_min
Minimum stoploss.
(defaults to -0.01)
#### stoploss_range_max
Maximum stoploss.
(defaults to -0.10)
#### stoploss_range_step
As an example if this is set to -0.01 then Edge will test the strategy for \[-0.01, -0,02, -0,03 ..., -0.09, -0.10\] ranges.
Note than having a smaller step means having a bigger range which could lead to slow calculation.
If you set this parameter to -0.001, you then slow down the Edge calculation by a factor of 10.
(defaults to -0.01)
#### minimum_winrate
It filters out pairs which don't have at least minimum_winrate.
This comes handy if you want to be conservative and don't comprise win rate in favour of risk reward ratio.
(defaults to 0.60)
#### minimum_expectancy
It filters out pairs which have the expectancy lower than this number.
Having an expectancy of 0.20 means if you put 10$ on a trade you expect a 12$ return.
(defaults to 0.20)
#### min_trade_number
When calculating *W*, *R* and *E* (expectancy) against historical data, you always want to have a minimum number of trades. The more this number is the more Edge is reliable.
Having a win rate of 100% on a single trade doesn't mean anything at all. But having a win rate of 70% over past 100 trades means clearly something.
(defaults to 10, it is highly recommended not to decrease this number)
#### max_trade_duration_minute
Edge will filter out trades with long duration. If a trade is profitable after 1 month, it is hard to evaluate the strategy based on it. But if most of trades are profitable and they have maximum duration of 30 minutes, then it is clearly a good sign.
**NOTICE:** While configuring this value, you should take into consideration your ticker interval. As an example filtering out trades having duration less than one day for a strategy which has 4h interval does not make sense. Default value is set assuming your strategy interval is relatively small (1m or 5m, etc.).
(defaults to 1 day, i.e. to 60 * 24 = 1440 minutes)
#### remove_pumps
Edge will remove sudden pumps in a given market while going through historical data. However, given that pumps happen very often in crypto markets, we recommend you keep this off.
(defaults to false)
| Parameter | Description |
|------------|-------------|
| `enabled` | If true, then Edge will run periodically. <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `process_throttle_secs` | How often should Edge run in seconds. <br>*Defaults to `3600` (once per hour).* <br> **Datatype:** Integer
| `calculate_since_number_of_days` | Number of days of data against which Edge calculates Win Rate, Risk Reward and Expectancy. <br> **Note** that it downloads historical data so increasing this number would lead to slowing down the bot. <br>*Defaults to `7`.* <br> **Datatype:** Integer
| `capital_available_percentage` | **DEPRECATED - [replaced with `tradable_balance_ratio`](configuration.md#Available balance)** This is the percentage of the total capital on exchange in stake currency. <br>As an example if you have 10 ETH available in your wallet on the exchange and this value is 0.5 (which is 50%), then the bot will use a maximum amount of 5 ETH for trading and considers it as available capital. <br>*Defaults to `0.5`.* <br> **Datatype:** Float
| `allowed_risk` | Ratio of allowed risk per trade. <br>*Defaults to `0.01` (1%)).* <br> **Datatype:** Float
| `stoploss_range_min` | Minimum stoploss. <br>*Defaults to `-0.01`.* <br> **Datatype:** Float
| `stoploss_range_max` | Maximum stoploss. <br>*Defaults to `-0.10`.* <br> **Datatype:** Float
| `stoploss_range_step` | As an example if this is set to -0.01 then Edge will test the strategy for `[-0.01, -0,02, -0,03 ..., -0.09, -0.10]` ranges. <br> **Note** than having a smaller step means having a bigger range which could lead to slow calculation. <br> If you set this parameter to -0.001, you then slow down the Edge calculation by a factor of 10. <br>*Defaults to `-0.001`.* <br> **Datatype:** Float
| `minimum_winrate` | It filters out pairs which don't have at least minimum_winrate. <br>This comes handy if you want to be conservative and don't comprise win rate in favour of risk reward ratio. <br>*Defaults to `0.60`.* <br> **Datatype:** Float
| `minimum_expectancy` | It filters out pairs which have the expectancy lower than this number. <br>Having an expectancy of 0.20 means if you put 10$ on a trade you expect a 12$ return. <br>*Defaults to `0.20`.* <br> **Datatype:** Float
| `min_trade_number` | When calculating *W*, *R* and *E* (expectancy) against historical data, you always want to have a minimum number of trades. The more this number is the more Edge is reliable. <br>Having a win rate of 100% on a single trade doesn't mean anything at all. But having a win rate of 70% over past 100 trades means clearly something. <br>*Defaults to `10` (it is highly recommended not to decrease this number).* <br> **Datatype:** Integer
| `max_trade_duration_minute` | Edge will filter out trades with long duration. If a trade is profitable after 1 month, it is hard to evaluate the strategy based on it. But if most of trades are profitable and they have maximum duration of 30 minutes, then it is clearly a good sign.<br>**NOTICE:** While configuring this value, you should take into consideration your timeframe (ticker interval). As an example filtering out trades having duration less than one day for a strategy which has 4h interval does not make sense. Default value is set assuming your strategy interval is relatively small (1m or 5m, etc.).<br>*Defaults to `1440` (one day).* <br> **Datatype:** Integer
| `remove_pumps` | Edge will remove sudden pumps in a given market while going through historical data. However, given that pumps happen very often in crypto markets, we recommend you keep this off.<br>*Defaults to `false`.* <br> **Datatype:** Boolean
## Running Edge independently
You can run Edge independently in order to see in details the result. Here is an example:
```bash
``` bash
freqtrade edge
```

View File

@ -5,7 +5,7 @@ This page combines common gotchas and informations which are exchange-specific a
## Binance
!!! Tip "Stoploss on Exchange"
Binance is currently the only exchange supporting `stoploss_on_exchange`. It provides great advantages, so we recommend to benefit from it.
Binance supports `stoploss_on_exchange` and uses stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
### Blacklists
@ -22,6 +22,9 @@ Binance has been split into 3, and users must use the correct ccxt exchange ID f
## Kraken
!!! Tip "Stoploss on Exchange"
Kraken supports `stoploss_on_exchange` and uses stop-loss-market orders. It provides great advantages, so we recommend to benefit from it, however since the resulting order is a stoploss-market order, sell-rates are not guaranteed, which makes this feature less secure than on other exchanges. This limitation is based on kraken's policy [source](https://blog.kraken.com/post/1234/announcement-delisting-pairs-and-temporary-suspension-of-advanced-order-types/) and [source2](https://blog.kraken.com/post/1494/kraken-enables-advanced-orders-and-adds-10-currency-pairs/) - which has stoploss-limit orders disabled.
### Historic Kraken data
The Kraken API does only provide 720 historic candles, which is sufficient for Freqtrade dry-run and live trade modes, but is a problem for backtesting.
@ -29,6 +32,10 @@ To download data for the Kraken exchange, using `--dl-trades` is mandatory, othe
## Bittrex
### Order types
Bittrex does not support market orders. If you have a message at the bot startup about this, you should change order type values set in your configuration and/or in the strategy from `"market"` to `"limit"`. See some more details on this [here in the FAQ](faq.md#im-getting-the-exchange-bittrex-does-not-support-market-orders-message-and-cannot-run-my-strategy).
### Restricted markets
Bittrex split its exchange into US and International versions.
@ -55,6 +62,11 @@ res = [ f"{x['MarketCurrency']}/{x['BaseCurrency']}" for x in ct.publicGetMarket
print(res)
```
## All exchanges
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
## Random notes for other exchanges
* The Ocean (exchange id: `theocean`) exchange uses Web3 functionality and requires `web3` python package to be installed:
@ -62,23 +74,13 @@ print(res)
$ pip3 install web3
```
### Send incomplete candles to the strategy
### Getting latest price / Incomplete candles
Most exchanges return incomplete candles via their ohlcv / klines interface.
By default, Freqtrade assumes that incomplete candles are returned and removes the last candle assuming it's an incomplete candle.
Most exchanges return current incomplete candle via their OHLCV/klines API interface.
By default, Freqtrade assumes that incomplete candle is fetched from the exchange and removes the last candle assuming it's the incomplete candle.
Whether your exchange returns incomplete candles or not can be checked using [the helper script](developer.md#Incomplete-candles) from the Contributor documentation.
If the exchange does return incomplete candles and you would like to have incomplete candles in your strategy, you can set the following parameter in the configuration file.
Due to the danger of repainting, Freqtrade does not allow you to use this incomplete candle.
``` json
{
"exchange": {
"_ft_has_params": {"ohlcv_partial_candle": false}
}
}
```
!!! Warning "Danger of repainting"
Changing this parameter makes the strategy responsible to avoid repainting and handle this accordingly. Doing this is therefore not recommended, and should only be performed by experienced users who are fully aware of the impact this setting has.
However, if it is based on the need for the latest price for your strategy - then this requirement can be acquired using the [data provider](strategy-customization.md#possible-options-for-dataprovider) from within the strategy.

View File

@ -45,12 +45,28 @@ the tutorial [here|Testing-new-strategies-with-Hyperopt](bot-usage.md#hyperopt-c
You can use the `/forcesell all` command from Telegram.
### I get the message "RESTRICTED_MARKET"
### I'm getting the "RESTRICTED_MARKET" message in the log
Currently known to happen for US Bittrex users.
Read [the Bittrex section about restricted markets](exchanges.md#restricted-markets) for more information.
### I'm getting the "Exchange Bittrex does not support market orders." message and cannot run my strategy
As the message says, Bittrex does not support market orders and you have one of the [order types](configuration.md/#understand-order_types) set to "market". Probably your strategy was written with other exchanges in mind and sets "market" orders for "stoploss" orders, which is correct and preferable for most of the exchanges supporting market orders (but not for Bittrex).
To fix it for Bittrex, redefine order types in the strategy to use "limit" instead of "market":
```
order_types = {
...
'stoploss': 'limit',
...
}
```
Same fix should be done in the configuration file, if order types are defined in your custom config rather than in the strategy.
### How do I search the bot logs for something?
By default, the bot writes its log into stderr stream. This is implemented this way so that you can easily separate the bot's diagnostics messages from Backtesting, Edge and Hyperopt results, output from other various Freqtrade utility subcommands, as well as from the output of your custom `print()`'s you may have inserted into your strategy. So if you need to search the log messages with the grep utility, you need to redirect stderr to stdout and disregard stdout.
@ -84,7 +100,7 @@ $ tail -f /path/to/mylogfile.log | grep 'something'
```
from a separate terminal window.
On Windows, the `--logfilename` option is also supported by Freqtrade and you can use the `findstr` command to search the log for the string of interest:
On Windows, the `--logfile` option is also supported by Freqtrade and you can use the `findstr` command to search the log for the string of interest:
```
> type \path\to\mylogfile.log | findstr "something"
```

View File

@ -6,9 +6,7 @@ algorithms included in the `scikit-optimize` package to accomplish this. The
search will burn all your CPU cores, make your laptop sound like a fighter jet
and still take a long time.
In general, the search for best parameters starts with a few random combinations and then uses Bayesian search with a
ML regressor algorithm (currently ExtraTreesRegressor) to quickly find a combination of parameters in the search hyperspace
that minimizes the value of the [loss function](#loss-functions).
In general, the search for best parameters starts with a few random combinations (see [below](#reproducible-results) for more details) and then uses Bayesian search with a ML regressor algorithm (currently ExtraTreesRegressor) to quickly find a combination of parameters in the search hyperspace that minimizes the value of the [loss function](#loss-functions).
Hyperopt requires historic data to be available, just as backtesting does.
To learn how to get data for the pairs and exchange you're interested in, head over to the [Data Downloading](data-download.md) section of the documentation.
@ -16,6 +14,24 @@ To learn how to get data for the pairs and exchange you're interested in, head o
!!! Bug
Hyperopt can crash when used with only 1 CPU Core as found out in [Issue #1133](https://github.com/freqtrade/freqtrade/issues/1133)
## Install hyperopt dependencies
Since Hyperopt dependencies are not needed to run the bot itself, are heavy, can not be easily built on some platforms (like Raspberry PI), they are not installed by default. Before you run Hyperopt, you need to install the corresponding dependencies, as described in this section below.
!!! Note
Since Hyperopt is a resource intensive process, running it on a Raspberry Pi is not recommended nor supported.
### Docker
The docker-image includes hyperopt dependencies, no further action needed.
### Easy installation script (setup.sh) / Manual installation
```bash
source .env/bin/activate
pip install -r requirements-hyperopt.txt
```
## Prepare Hyperopting
Before we start digging into Hyperopt, we recommend you to take a look at
@ -31,9 +47,9 @@ This will create a new hyperopt file from a template, which will be located unde
Depending on the space you want to optimize, only some of the below are required:
* fill `buy_strategy_generator` - for buy signal optimization
* fill `indicator_space` - for buy signal optimzation
* fill `indicator_space` - for buy signal optimization
* fill `sell_strategy_generator` - for sell signal optimization
* fill `sell_indicator_space` - for sell signal optimzation
* fill `sell_indicator_space` - for sell signal optimization
!!! Note
`populate_indicators` needs to create all indicators any of thee spaces may use, otherwise hyperopt will not work.
@ -47,6 +63,9 @@ Optional - can also be loaded from a strategy:
!!! Note
Assuming the optional methods are not in your hyperopt file, please use `--strategy AweSomeStrategy` which contains these methods so hyperopt can use these methods instead.
!!! Note
You always have to provide a strategy to Hyperopt, even if your custom Hyperopt class contains all methods.
Rarely you may also need to override:
* `roi_space` - for custom ROI optimization (if you need the ranges for the ROI parameters in the optimization hyperspace that differ from default)
@ -57,12 +76,12 @@ Rarely you may also need to override:
!!! Tip "Quickly optimize ROI, stoploss and trailing stoploss"
You can quickly optimize the spaces `roi`, `stoploss` and `trailing` without changing anything (i.e. without creation of a "complete" Hyperopt class with dimensions, parameters, triggers and guards, as described in this document) from the default hyperopt template by relying on your strategy to do most of the calculations.
``` python
```python
# Have a working strategy at hand.
freqtrade new-hyperopt --hyperopt EmptyHyperopt
freqtrade hyperopt --hyperopt EmptyHyperopt --spaces roi stoploss trailing --strategy MyWorkingStrategy --config config.json -e 100
```
```
### 1. Install a Custom Hyperopt File
@ -75,17 +94,17 @@ Copy the file `user_data/hyperopts/sample_hyperopt.py` into `user_data/hyperopts
There are two places you need to change in your hyperopt file to add a new buy hyperopt for testing:
- Inside `indicator_space()` - the parameters hyperopt shall be optimizing.
- Inside `populate_buy_trend()` - applying the parameters.
* Inside `indicator_space()` - the parameters hyperopt shall be optimizing.
* Inside `populate_buy_trend()` - applying the parameters.
There you have two different types of indicators: 1. `guards` and 2. `triggers`.
1. Guards are conditions like "never buy if ADX < 10", or never buy if current price is over EMA10.
2. Triggers are ones that actually trigger buy in specific moment, like "buy when EMA5 crosses over EMA10" or "buy when close price touches lower bollinger band".
2. Triggers are ones that actually trigger buy in specific moment, like "buy when EMA5 crosses over EMA10" or "buy when close price touches lower Bollinger band".
Hyperoptimization will, for each eval round, pick one trigger and possibly
multiple guards. The constructed strategy will be something like
"*buy exactly when close price touches lower bollinger band, BUT only if
"*buy exactly when close price touches lower Bollinger band, BUT only if
ADX > 10*".
If you have updated the buy strategy, i.e. changed the contents of
@ -103,9 +122,10 @@ Place the corresponding settings into the following methods
The configuration and rules are the same than for buy signals.
To avoid naming collisions in the search-space, please prefix all sell-spaces with `sell-`.
#### Using ticker-interval as part of the Strategy
#### Using timeframe as a part of the Strategy
The Strategy exposes the ticker-interval as `self.ticker_interval`. The same value is available as class-attribute `HyperoptName.ticker_interval`.
The Strategy class exposes the timeframe (ticker interval) value as the `self.ticker_interval` attribute.
The same value is available as class-attribute `HyperoptName.ticker_interval`.
In the case of the linked sample-value this would be `SampleHyperOpt.ticker_interval`.
## Solving a Mystery
@ -141,7 +161,7 @@ one we call `trigger` and use it to decide which buy trigger we want to use.
So let's write the buy strategy using these values:
``` python
```python
def populate_buy_trend(dataframe: DataFrame) -> DataFrame:
conditions = []
# GUARDS AND TRENDS
@ -159,6 +179,9 @@ So let's write the buy strategy using these values:
dataframe['macd'], dataframe['macdsignal']
))
# Check that volume is not 0
conditions.append(dataframe['volume'] > 0)
if conditions:
dataframe.loc[
reduce(lambda x, y: x & y, conditions),
@ -172,7 +195,7 @@ So let's write the buy strategy using these values:
Hyperopting will now call this `populate_buy_trend` as many times you ask it (`epochs`)
with different value combinations. It will then use the given historical data and make
buys based on the buy signals generated with the above function and based on the results
it will end with telling you which paramter combination produced the best profits.
it will end with telling you which parameter combination produced the best profits.
The above setup expects to find ADX, RSI and Bollinger Bands in the populated indicators.
When you want to test an indicator that isn't used by the bot currently, remember to
@ -182,7 +205,7 @@ add it to the `populate_indicators()` method in your custom hyperopt file.
Each hyperparameter tuning requires a target. This is usually defined as a loss function (sometimes also called objective function), which should decrease for more desirable results, and increase for bad results.
By default, FreqTrade uses a loss function, which has been with freqtrade since the beginning and optimizes mostly for short trade duration and avoiding losses.
By default, Freqtrade uses a loss function, which has been with freqtrade since the beginning and optimizes mostly for short trade duration and avoiding losses.
A different loss function can be specified by using the `--hyperopt-loss <Class-name>` argument.
This class should be in its own file within the `user_data/hyperopts/` directory.
@ -191,7 +214,10 @@ Currently, the following loss functions are builtin:
* `DefaultHyperOptLoss` (default legacy Freqtrade hyperoptimization loss function)
* `OnlyProfitHyperOptLoss` (which takes only amount of profit into consideration)
* `SharpeHyperOptLoss` (optimizes Sharpe Ratio calculated on the trade returns)
* `SharpeHyperOptLoss` (optimizes Sharpe Ratio calculated on trade returns relative to standard deviation)
* `SharpeHyperOptLossDaily` (optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation)
* `SortinoHyperOptLoss` (optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation)
* `SortinoHyperOptLossDaily` (optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation)
Creation of a custom loss function is covered in the [Advanced Hyperopt](advanced-hyperopt.md) part of the documentation.
@ -206,7 +232,7 @@ We strongly recommend to use `screen` or `tmux` to prevent any connection loss.
freqtrade hyperopt --config config.json --hyperopt <hyperoptname> -e 5000 --spaces all
```
Use `<hyperoptname>` as the name of the custom hyperopt used.
Use `<hyperoptname>` as the name of the custom hyperopt used.
The `-e` option will set how many evaluations hyperopt will do. We recommend
running at least several thousand evaluations.
@ -219,11 +245,11 @@ The `--spaces all` option determines that all possible parameters should be opti
!!! Warning
When switching parameters or changing configuration options, make sure to not use the argument `--continue` so temporary results can be removed.
### Execute Hyperopt with Different Ticker-Data Source
### Execute Hyperopt with different historical data source
If you would like to hyperopt parameters using an alternate ticker data that
you have on-disk, use the `--datadir PATH` option. Default hyperopt will
use data from directory `user_data/data`.
If you would like to hyperopt parameters using an alternate historical data set that
you have on-disk, use the `--datadir PATH` option. By default, hyperopt
uses data from directory `user_data/data`.
### Running Hyperopt with Smaller Testset
@ -265,28 +291,28 @@ The default Hyperopt Search Space, used when no `--space` command line option is
### Position stacking and disabling max market positions
In some situations, you may need to run Hyperopt (and Backtesting) with the
In some situations, you may need to run Hyperopt (and Backtesting) with the
`--eps`/`--enable-position-staking` and `--dmmp`/`--disable-max-market-positions` arguments.
By default, hyperopt emulates the behavior of the Freqtrade Live Run/Dry Run, where only one
open trade is allowed for every traded pair. The total number of trades open for all pairs
open trade is allowed for every traded pair. The total number of trades open for all pairs
is also limited by the `max_open_trades` setting. During Hyperopt/Backtesting this may lead to
some potential trades to be hidden (or masked) by previosly open trades.
some potential trades to be hidden (or masked) by previously open trades.
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times,
while `--dmmp`/`--disable-max-market-positions` disables applying `max_open_trades`
while `--dmmp`/`--disable-max-market-positions` disables applying `max_open_trades`
during Hyperopt/Backtesting (which is equal to setting `max_open_trades` to a very high
number).
!!! Note
Dry/live runs will **NOT** use position stacking - therefore it does make sense to also validate the strategy without this as it's closer to reality.
You can also enable position stacking in the configuration file by explicitly setting
You can also enable position stacking in the configuration file by explicitly setting
`"position_stacking"=true`.
### Reproducible results
The search for optimal parameters starts with a few (currently 30) random combinations in the hyperspace of parameters, random Hyperopt epochs. These random epochs are marked with a leading asterisk sign at the Hyperopt output.
The search for optimal parameters starts with a few (currently 30) random combinations in the hyperspace of parameters, random Hyperopt epochs. These random epochs are marked with an asterisk character (`*`) in the first column in the Hyperopt output.
The initial state for generation of these random values (random state) is controlled by the value of the `--random-state` command line option. You can set it to some arbitrary value of your choice to obtain reproducible results.
@ -323,7 +349,7 @@ method, what those values match to.
So for example you had `rsi-value: 29.0` so we would look at `rsi`-block, that translates to the following code block:
``` python
```python
(dataframe['rsi'] < 29.0)
```
@ -372,20 +398,21 @@ In order to use this best ROI table found by Hyperopt in backtesting and for liv
118: 0
}
```
As stated in the comment, you can also use it as the value of the `minimal_roi` setting in the configuration file.
#### Default ROI Search Space
If you are optimizing ROI, Freqtrade creates the 'roi' optimization hyperspace for you -- it's the hyperspace of components for the ROI tables. By default, each ROI table generated by the Freqtrade consists of 4 rows (steps). Hyperopt implements adaptive ranges for ROI tables with ranges for values in the ROI steps that depend on the ticker_interval used. By default the values vary in the following ranges (for some of the most used ticker intervals, values are rounded to 5 digits after the decimal point):
If you are optimizing ROI, Freqtrade creates the 'roi' optimization hyperspace for you -- it's the hyperspace of components for the ROI tables. By default, each ROI table generated by the Freqtrade consists of 4 rows (steps). Hyperopt implements adaptive ranges for ROI tables with ranges for values in the ROI steps that depend on the ticker_interval used. By default the values vary in the following ranges (for some of the most used timeframes, values are rounded to 5 digits after the decimal point):
| # step | 1m | | 5m | | 1h | | 1d | |
|---|---|---|---|---|---|---|---|---|
| 1 | 0 | 0.01161...0.11992 | 0 | 0.03...0.31 | 0 | 0.06883...0.71124 | 0 | 0.12178...1.25835 |
| 2 | 2...8 | 0.00774...0.04255 | 10...40 | 0.02...0.11 | 120...480 | 0.04589...0.25238 | 2880...11520 | 0.08118...0.44651 |
| 3 | 4...20 | 0.00387...0.01547 | 20...100 | 0.01...0.04 | 240...1200 | 0.02294...0.09177 | 5760...28800 | 0.04059...0.16237 |
| 4 | 6...44 | 0.0 | 30...220 | 0.0 | 360...2640 | 0.0 | 8640...63360 | 0.0 |
| # step | 1m | | 5m | | 1h | | 1d | |
| ------ | ------ | ----------------- | -------- | ----------- | ---------- | ----------------- | ------------ | ----------------- |
| 1 | 0 | 0.01161...0.11992 | 0 | 0.03...0.31 | 0 | 0.06883...0.71124 | 0 | 0.12178...1.25835 |
| 2 | 2...8 | 0.00774...0.04255 | 10...40 | 0.02...0.11 | 120...480 | 0.04589...0.25238 | 2880...11520 | 0.08118...0.44651 |
| 3 | 4...20 | 0.00387...0.01547 | 20...100 | 0.01...0.04 | 240...1200 | 0.02294...0.09177 | 5760...28800 | 0.04059...0.16237 |
| 4 | 6...44 | 0.0 | 30...220 | 0.0 | 360...2640 | 0.0 | 8640...63360 | 0.0 |
These ranges should be sufficient in most cases. The minutes in the steps (ROI dict keys) are scaled linearly depending on the ticker interval used. The ROI values in the steps (ROI dict values) are scaled logarithmically depending on the ticker interval used.
These ranges should be sufficient in most cases. The minutes in the steps (ROI dict keys) are scaled linearly depending on the timeframe (ticker interval) used. The ROI values in the steps (ROI dict values) are scaled logarithmically depending on the timeframe used.
If you have the `generate_roi_table()` and `roi_space()` methods in your custom hyperopt file, remove them in order to utilize these adaptive ROI tables and the ROI hyperoptimization space generated by Freqtrade by default.
@ -416,6 +443,7 @@ In order to use this best stoploss value found by Hyperopt in backtesting and fo
# This attribute will be overridden if the config file contains "stoploss"
stoploss = -0.27996
```
As stated in the comment, you can also use it as the value of the `stoploss` setting in the configuration file.
#### Default Stoploss Search Space
@ -452,6 +480,7 @@ In order to use these best trailing stop parameters found by Hyperopt in backtes
trailing_stop_positive_offset = 0.06038
trailing_only_offset_is_reached = True
```
As stated in the comment, you can also use it as the values of the corresponding settings in the configuration file.
#### Default Trailing Stop Search Space

View File

@ -1,5 +1,5 @@
# Freqtrade
[![Build Status](https://travis-ci.org/freqtrade/freqtrade.svg?branch=develop)](https://travis-ci.org/freqtrade/freqtrade)
[![Freqtrade CI](https://github.com/freqtrade/freqtrade/workflows/Freqtrade%20CI/badge.svg)](https://github.com/freqtrade/freqtrade/actions/)
[![Coverage Status](https://coveralls.io/repos/github/freqtrade/freqtrade/badge.svg?branch=develop&service=github)](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
[![Maintainability](https://api.codeclimate.com/v1/badges/5737e6d668200b7518ff/maintainability)](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
@ -11,8 +11,10 @@
<a class="github-button" href="https://github.com/freqtrade/freqtrade/archive/master.zip" data-icon="octicon-cloud-download" data-size="large" aria-label="Download freqtrade/freqtrade on GitHub">Download</a>
<!-- Place this tag where you want the button to render. -->
<a class="github-button" href="https://github.com/freqtrade" data-size="large" aria-label="Follow @freqtrade on GitHub">Follow @freqtrade</a>
## Introduction
Freqtrade is a cryptocurrency trading bot written in Python.
Freqtrade is a crypto-currency algorithmic trading software developed in python (3.6+) and supported on Windows, macOS and Linux.
!!! Danger "DISCLAIMER"
This software is for educational purposes only. Do not risk money which you are afraid to lose. USE THE SOFTWARE AT YOUR OWN RISK. THE AUTHORS AND ALL AFFILIATES ASSUME NO RESPONSIBILITY FOR YOUR TRADING RESULTS.
@ -23,18 +25,15 @@ Freqtrade is a cryptocurrency trading bot written in Python.
## Features
- Based on Python 3.6+: For botting on any operating system — Windows, macOS and Linux.
- Persistence: Persistence is achieved through sqlite database.
- Dry-run mode: Run the bot without playing money.
- Backtesting: Run a simulation of your buy/sell strategy with historical data.
- Strategy Optimization by machine learning: Use machine learning to optimize your buy/sell strategy parameters with real exchange data.
- Edge position sizing: Calculate your win rate, risk reward ratio, the best stoploss and adjust your position size before taking a position for each specific market.
- Whitelist crypto-currencies: Select which crypto-currency you want to trade or use dynamic whitelists based on market (pair) trade volume.
- Blacklist crypto-currencies: Select which crypto-currency you want to avoid.
- Manageable via Telegram or REST APi: Manage the bot with Telegram or via the builtin REST API.
- Display profit/loss in fiat: Display your profit/loss in any of 33 fiat currencies supported.
- Daily summary of profit/loss: Receive the daily summary of your profit/loss.
- Performance status report: Receive the performance status of your current trades.
- Develop your Strategy: Write your strategy in python, using [pandas](https://pandas.pydata.org/). Example strategies to inspire you are available in the [strategy repository](https://github.com/freqtrade/freqtrade-strategies).
- Download market data: Download historical data of the exchange and the markets your may want to trade with.
- Backtest: Test your strategy on downloaded historical data.
- Optimize: Find the best parameters for your strategy using hyperoptimization which employs machining learning methods. You can optimize buy, sell, take profit (ROI), stop-loss and trailing stop-loss parameters for your strategy.
- Select markets: Create your static list or use an automatic one based on top traded volumes and/or prices (not available during backtesting). You can also explicitly blacklist markets you don't want to trade.
- Run: Test your strategy with simulated money (Dry-Run mode) or deploy it with real money (Live-Trade mode).
- Run using Edge (optional module): The concept is to find the best historical [trade expectancy](edge.md#expectancy) by markets based on variation of the stop-loss and then allow/reject markets to trade. The sizing of the trade is based on a risk of a percentage of your capital.
- Control/Monitor: Use Telegram or a REST API (start/stop the bot, show profit/loss, daily summary, current open trades results, etc.).
- Analyse: Further analysis can be performed on either Backtesting data or Freqtrade trading history (SQL database), including automated standard plots, and methods to load the data into [interactive environments](data-analysis.md).
## Requirements
@ -52,20 +51,23 @@ To run this bot we recommend you a cloud instance with a minimum of:
### Software requirements
- Docker (Recommended)
Alternatively
- Python 3.6.x
- pip (pip3)
- git
- TA-Lib
- virtualenv (Recommended)
- Docker (Recommended)
## Support
Help / Slack
For any questions not covered by the documentation or for further information about the bot, we encourage you to join our Slack channel.
### Help / Slack
For any questions not covered by the documentation or for further information about the bot, we encourage you to join our passionate Slack community.
Click [here](https://join.slack.com/t/highfrequencybot/shared_invite/enQtNjU5ODcwNjI1MDU3LTU1MTgxMjkzNmYxNWE1MDEzYzQ3YmU4N2MwZjUyNjJjODRkMDVkNjg4YTAyZGYzYzlhOTZiMTE4ZjQ4YzM0OGE) to join Slack channel.
Click [here](https://join.slack.com/t/highfrequencybot/shared_invite/enQtNjU5ODcwNjI1MDU3LTU1MTgxMjkzNmYxNWE1MDEzYzQ3YmU4N2MwZjUyNjJjODRkMDVkNjg4YTAyZGYzYzlhOTZiMTE4ZjQ4YzM0OGE) to join the Freqtrade Slack channel.
## Ready to try?
Begin by reading our installation guide [here](installation).
Begin by reading our installation guide [for docker](docker.md), or for [installation without docker](installation.md).

View File

@ -2,6 +2,8 @@
This page explains how to prepare your environment for running the bot.
Please consider using the prebuilt [docker images](docker.md) to get started quickly while trying out freqtrade evaluating how it operates.
## Prerequisite
### Requirements
@ -14,15 +16,7 @@ Click each one for install guide:
* [virtualenv](https://virtualenv.pypa.io/en/stable/installation/) (Recommended)
* [TA-Lib](https://mrjbq7.github.io/ta-lib/install.html) (install instructions below)
### API keys
Before running your bot in production you will need to setup few
external API. In production mode, the bot will require valid Exchange API
credentials. We also recommend a [Telegram bot](telegram-usage.md#setup-your-telegram-bot) (optional but recommended).
### Setup your exchange account
You will need to create API Keys (Usually you get `key` and `secret`) from the Exchange website and insert this into the appropriate fields in the configuration or when asked by the installation script.
We also recommend a [Telegram bot](telegram-usage.md#setup-your-telegram-bot), which is optional but recommended.
## Quick start
@ -31,7 +25,7 @@ Freqtrade provides the Linux/MacOS Easy Installation script to install all depen
!!! Note
Windows installation is explained [here](#windows).
The easiest way to install and run Freqtrade is to clone the bot GitHub repository and then run the Easy Installation script, if it's available for your platform.
The easiest way to install and run Freqtrade is to clone the bot Github repository and then run the Easy Installation script, if it's available for your platform.
!!! Note "Version considerations"
When cloning the repository the default working branch has the name `develop`. This branch contains all last features (can be considered as relatively stable, thanks to automated tests). The `master` branch contains the code of the last release (done usually once per month on an approximately one week old snapshot of the `develop` branch to prevent packaging bugs, so potentially it's more stable).
@ -42,11 +36,12 @@ The easiest way to install and run Freqtrade is to clone the bot GitHub reposito
This can be achieved with the following commands:
```bash
git clone git@github.com:freqtrade/freqtrade.git
git clone https://github.com/freqtrade/freqtrade.git
cd freqtrade
git checkout master # Optional, see (1)
./setup.sh --install
```
(1) This command switches the cloned repository to the use of the `master` branch. It's not needed if you wish to stay on the `develop` branch. You may later switch between branches at any time with the `git checkout master`/`git checkout develop` commands.
## Easy Installation Script (Linux/MacOS)
@ -64,11 +59,11 @@ usage:
** --install **
With this option, the script will install everything you need to run the bot:
With this option, the script will install the bot and most dependencies:
You will need to have git and python3.6+ installed beforehand for this to work.
* Mandatory software as: `ta-lib`
* Setup your virtualenv
* Configure your `config.json` file
* Setup your virtualenv under `.env/`
This option is a combination of installation tasks, `--reset` and `--config`.
@ -82,7 +77,7 @@ This option will hard reset your branch (only if you are on either `master` or `
** --config **
Use this option to configure the `config.json` configuration file. The script will interactively ask you questions to setup your bot and create your `config.json`.
DEPRECATED - use `freqtrade new-config -c config.json` instead.
------
@ -129,6 +124,17 @@ bash setup.sh -i
#### 1. Install TA-Lib
Use the provided ta-lib installation script
```bash
sudo ./build_helpers/install_ta-lib.sh
```
!!! Note
This will use the ta-lib tar.gz included in this repository.
##### TA-Lib manual installation
Official webpage: https://mrjbq7.github.io/ta-lib/install.html
```bash
@ -184,7 +190,8 @@ python3 -m pip install -e .
# Initialize the user_directory
freqtrade create-userdir --userdir user_data/
cp config.json.example config.json
# Create a new configuration file
freqtrade new-config --config config.json
```
> *To edit the config please refer to [Bot Configuration](configuration.md).*
@ -241,14 +248,14 @@ git clone https://github.com/freqtrade/freqtrade.git
Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7/ta-lib#windows).
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial precompiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which needs to be downloaded and installed using `pip install TA_Lib0.4.17cp36cp36mwin32.whl` (make sure to use the version matching your python version)
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial precompiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which needs to be downloaded and installed using `pip install TA_Lib0.4.18cp38cp38win_amd64.whl` (make sure to use the version matching your python version)
```cmd
>cd \path\freqtrade-develop
>python -m venv .env
>.env\Scripts\activate.bat
REM optionally install ta-lib from wheel
REM >pip install TA_Lib0.4.17cp36cp36mwin32.whl
REM >pip install TA_Lib0.4.18cp38cp38win_amd64.whl
>pip install -r requirements.txt
>pip install -e .
>freqtrade
@ -270,3 +277,18 @@ The easiest way is to download install Microsoft Visual Studio Community [here](
Now you have an environment ready, the next step is
[Bot Configuration](configuration.md).
## Troubleshooting
### MacOS installation error
Newer versions of MacOS may have installation failed with errors like `error: command 'g++' failed with exit status 1`.
This error will require explicit installation of the SDK Headers, which are not installed by default in this version of MacOS.
For MacOS 10.14, this can be accomplished with the below command.
``` bash
open /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10.14.pkg
```
If this file is inexistant, then you're probably on a different version of MacOS, so you may need to consult the internet for specific resolution details.

View File

@ -32,6 +32,7 @@ usage: freqtrade plot-dataframe [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[--trade-source {DB,file}] [--export EXPORT]
[--export-filename PATH]
[--timerange TIMERANGE] [-i TICKER_INTERVAL]
[--no-trades]
optional arguments:
-h, --help show this help message and exit
@ -50,32 +51,36 @@ optional arguments:
values cause huge files. Default: 750.
--db-url PATH Override trades database URL, this is useful in custom
deployments (default: `sqlite:///tradesv3.sqlite` for
Live Run mode, `sqlite://` for Dry Run).
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
Dry Run).
--trade-source {DB,file}
Specify the source for trades (Can be DB or file
(backtest file)) Default: file
--export EXPORT Export backtest results, argument are: trades.
Example: `--export=trades`
--export-filename PATH
Save backtest results to the file with this filename
(default: `user_data/backtest_results/backtest-
result.json`). Requires `--export` to be set as well.
Example: `--export-filename=user_data/backtest_results
/backtest_today.json`
Save backtest results to the file with this filename.
Requires `--export` to be set as well. Example:
`--export-filename=user_data/backtest_results/backtest
_today.json`
--timerange TIMERANGE
Specify what timerange of data to use.
-i TICKER_INTERVAL, --ticker-interval TICKER_INTERVAL
Specify ticker interval (`1m`, `5m`, `30m`, `1h`,
`1d`).
--no-trades Skip using trades from backtesting file and DB.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
@ -83,10 +88,9 @@ Common arguments:
Strategy arguments:
-s NAME, --strategy NAME
Specify strategy class name (default:
`DefaultStrategy`).
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
```
Example:
@ -136,21 +140,83 @@ To plot trades from a backtesting result, use `--export-filename <filename>`
freqtrade plot-dataframe --strategy AwesomeStrategy --export-filename user_data/backtest_results/backtest-result.json -p BTC/ETH
```
### Plot dataframe basics
![plot-dataframe2](assets/plot-dataframe2.png)
The `plot-dataframe` subcommand requires backtesting data, a strategy and either a backtesting-results file or a database, containing trades corresponding to the strategy.
The resulting plot will have the following elements:
* Green triangles: Buy signals from the strategy. (Note: not every buy signal generates a trade, compare to cyan circles.)
* Red triangles: Sell signals from the strategy. (Also, not every sell signal terminates a trade, compare to red and green squares.)
* Cyan circles: Trade entry points.
* Red squares: Trade exit points for trades with loss or 0% profit.
* Green squares: Trade exit points for profitable trades.
* Indicators with values corresponding to the candle scale (e.g. SMA/EMA), as specified with `--indicators1`.
* Volume (bar chart at the bottom of the main chart).
* Indicators with values in different scales (e.g. MACD, RSI) below the volume bars, as specified with `--indicators2`.
!!! Note "Bollinger Bands"
Bollinger bands are automatically added to the plot if the columns `bb_lowerband` and `bb_upperband` exist, and are painted as a light blue area spanning from the lower band to the upper band.
#### Advanced plot configuration
An advanced plot configuration can be specified in the strategy in the `plot_config` parameter.
Additional features when using plot_config include:
* Specify colors per indicator
* Specify additional subplots
The sample plot configuration below specifies fixed colors for the indicators. Otherwise consecutive plots may produce different colorschemes each time, making comparisons difficult.
It also allows multiple subplots to display both MACD and RSI at the same time.
Sample configuration with inline comments explaining the process:
``` python
plot_config = {
'main_plot': {
# Configuration for main plot indicators.
# Specifies `ema10` to be red, and `ema50` to be a shade of gray
'ema10': {'color': 'red'},
'ema50': {'color': '#CCCCCC'},
# By omitting color, a random color is selected.
'sar': {},
},
'subplots': {
# Create subplot MACD
"MACD": {
'macd': {'color': 'blue'},
'macdsignal': {'color': 'orange'},
},
# Additional subplot RSI
"RSI": {
'rsi': {'color': 'red'},
}
}
}
```
!!! Note
The above configuration assumes that `ema10`, `ema50`, `macd`, `macdsignal` and `rsi` are columns in the DataFrame created by the strategy.
## Plot profit
![plot-profit](assets/plot-profit.png)
The `freqtrade plot-profit` subcommand shows an interactive graph with three plots:
The `plot-profit` subcommand shows an interactive graph with three plots:
1) Average closing price for all pairs
2) The summarized profit made by backtesting.
Note that this is not the real-world profit, but more of an estimate.
3) Profit for each individual pair
* Average closing price for all pairs.
* The summarized profit made by backtesting.
Note that this is not the real-world profit, but more of an estimate.
* Profit for each individual pair.
The first graph is good to get a grip of how the overall market progresses.
The second graph will show if your algorithm works or doesn't.
Perhaps you want an algorithm that steadily makes small profits, or one that acts less often, but makes big swings.
This graph will also highlight the start (and end) of the Max drawdown period.
The third graph can be useful to spot outliers, events in pairs that cause profit spikes.
@ -173,14 +239,14 @@ optional arguments:
--export EXPORT Export backtest results, argument are: trades.
Example: `--export=trades`
--export-filename PATH
Save backtest results to the file with this filename
(default: `user_data/backtest_results/backtest-
result.json`). Requires `--export` to be set as well.
Example: `--export-filename=user_data/backtest_results
/backtest_today.json`
Save backtest results to the file with this filename.
Requires `--export` to be set as well. Example:
`--export-filename=user_data/backtest_results/backtest
_today.json`
--db-url PATH Override trades database URL, this is useful in custom
deployments (default: `sqlite:///tradesv3.sqlite` for
Live Run mode, `sqlite://` for Dry Run).
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
Dry Run).
--trade-source {DB,file}
Specify the source for trades (Can be DB or file
(backtest file)) Default: file
@ -190,7 +256,9 @@ optional arguments:
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified.
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).

View File

@ -1,2 +1,2 @@
mkdocs-material==4.6.0
mkdocs-material==5.1.7
mdx_truly_sane_lists==1.2

View File

@ -11,6 +11,7 @@ Sample configuration:
"enabled": true,
"listen_ip_address": "127.0.0.1",
"listen_port": 8080,
"jwt_secret_key": "somethingrandom",
"username": "Freqtrader",
"password": "SuperSecret1!"
},
@ -29,7 +30,7 @@ This should return the response:
{"status":"pong"}
```
All other endpoints return sensitive info and require authentication, so are not available through a web browser.
All other endpoints return sensitive info and require authentication and are therefore not available through a web browser.
To generate a secure password, either use a password manager, or use the below code snipped.
@ -38,6 +39,9 @@ import secrets
secrets.token_hex()
```
!!! Hint
Use the same method to also generate a JWT secret key (`jwt_secret_key`).
### Configuration with docker
If you run your bot using docker, you'll need to have the bot listen to incomming connections. The security is then handled by docker.
@ -74,7 +78,7 @@ docker run -d \
## Consuming the API
You can consume the API by using the script `scripts/rest_client.py`.
The client script only requires the `requests` module, so FreqTrade does not need to be installed on the system.
The client script only requires the `requests` module, so Freqtrade does not need to be installed on the system.
``` bash
python3 scripts/rest_client.py <command> [optional parameters]
@ -202,3 +206,28 @@ whitelist
Show the current whitelist
:returns: json object
```
## Advanced API usage using JWT tokens
!!! Note
The below should be done in an application (a Freqtrade REST API client, which fetches info via API), and is not intended to be used on a regular basis.
Freqtrade's REST API also offers JWT (JSON Web Tokens).
You can login using the following command, and subsequently use the resulting access_token.
``` bash
> curl -X POST --user Freqtrader http://localhost:8080/api/v1/token/login
{"access_token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk2ODEsIm5iZiI6MTU4OTExOTY4MSwianRpIjoiMmEwYmY0NWUtMjhmOS00YTUzLTlmNzItMmM5ZWVlYThkNzc2IiwiZXhwIjoxNTg5MTIwNTgxLCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJmcmVzaCI6ZmFsc2UsInR5cGUiOiJhY2Nlc3MifQ.qt6MAXYIa-l556OM7arBvYJ0SDI9J8bIk3_glDujF5g","refresh_token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk2ODEsIm5iZiI6MTU4OTExOTY4MSwianRpIjoiZWQ1ZWI3YjAtYjMwMy00YzAyLTg2N2MtNWViMjIxNWQ2YTMxIiwiZXhwIjoxNTkxNzExNjgxLCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJ0eXBlIjoicmVmcmVzaCJ9.d1AT_jYICyTAjD0fiQAr52rkRqtxCjUGEMwlNuuzgNQ"}
> access_token="eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk2ODEsIm5iZiI6MTU4OTExOTY4MSwianRpIjoiMmEwYmY0NWUtMjhmOS00YTUzLTlmNzItMmM5ZWVlYThkNzc2IiwiZXhwIjoxNTg5MTIwNTgxLCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJmcmVzaCI6ZmFsc2UsInR5cGUiOiJhY2Nlc3MifQ.qt6MAXYIa-l556OM7arBvYJ0SDI9J8bIk3_glDujF5g"
# Use access_token for authentication
> curl -X GET --header "Authorization: Bearer ${access_token}" http://localhost:8080/api/v1/count
```
Since the access token has a short timeout (15 min) - the `token/refresh` request should be used periodically to get a fresh access token:
``` bash
> curl -X POST --header "Authorization: Bearer ${refresh_token}"http://localhost:8080/api/v1/token/refresh
{"access_token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk5NzQsIm5iZiI6MTU4OTExOTk3NCwianRpIjoiMDBjNTlhMWUtMjBmYS00ZTk0LTliZjAtNWQwNTg2MTdiZDIyIiwiZXhwIjoxNTg5MTIwODc0LCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJmcmVzaCI6ZmFsc2UsInR5cGUiOiJhY2Nlc3MifQ.1seHlII3WprjjclY6DpRhen0rqdF4j6jbvxIhUFaSbs"}
```

View File

@ -1,13 +1,20 @@
# SQL Helper
This page contains some help if you want to edit your sqlite db.
## Install sqlite3
**Ubuntu/Debian installation**
Sqlite3 is a terminal based sqlite application.
Feel free to use a visual Database editor like SqliteBrowser if you feel more comfortable with that.
### Ubuntu/Debian installation
```bash
sudo apt-get install sqlite3
```
## Open the DB
```bash
sqlite3
.open <filepath>
@ -16,45 +23,61 @@ sqlite3
## Table structure
### List tables
```bash
.tables
```
### Display table structure
```bash
.schema <table_name>
```
### Trade table structure
```sql
CREATE TABLE trades (
id INTEGER NOT NULL,
exchange VARCHAR NOT NULL,
pair VARCHAR NOT NULL,
is_open BOOLEAN NOT NULL,
fee_open FLOAT NOT NULL,
fee_close FLOAT NOT NULL,
open_rate FLOAT,
open_rate_requested FLOAT,
close_rate FLOAT,
close_rate_requested FLOAT,
close_profit FLOAT,
stake_amount FLOAT NOT NULL,
amount FLOAT,
open_date DATETIME NOT NULL,
close_date DATETIME,
open_order_id VARCHAR,
stop_loss FLOAT,
initial_stop_loss FLOAT,
stoploss_order_id VARCHAR,
stoploss_last_update DATETIME,
max_rate FLOAT,
sell_reason VARCHAR,
strategy VARCHAR,
ticker_interval INTEGER,
PRIMARY KEY (id),
CHECK (is_open IN (0, 1))
CREATE TABLE trades
id INTEGER NOT NULL,
exchange VARCHAR NOT NULL,
pair VARCHAR NOT NULL,
is_open BOOLEAN NOT NULL,
fee_open FLOAT NOT NULL,
fee_open_cost FLOAT,
fee_open_currency VARCHAR,
fee_close FLOAT NOT NULL,
fee_close_cost FLOAT,
fee_close_currency VARCHAR,
open_rate FLOAT,
open_rate_requested FLOAT,
open_trade_price FLOAT,
close_rate FLOAT,
close_rate_requested FLOAT,
close_profit FLOAT,
close_profit_abs FLOAT,
stake_amount FLOAT NOT NULL,
amount FLOAT,
open_date DATETIME NOT NULL,
close_date DATETIME,
open_order_id VARCHAR,
stop_loss FLOAT,
stop_loss_pct FLOAT,
initial_stop_loss FLOAT,
initial_stop_loss_pct FLOAT,
stoploss_order_id VARCHAR,
stoploss_last_update DATETIME,
max_rate FLOAT,
min_rate FLOAT,
sell_reason VARCHAR,
strategy VARCHAR,
ticker_interval INTEGER,
PRIMARY KEY (id),
CHECK (is_open IN (0, 1))
);
CREATE INDEX ix_trades_stoploss_order_id ON trades (stoploss_order_id);
CREATE INDEX ix_trades_pair ON trades (pair);
CREATE INDEX ix_trades_is_open ON trades (is_open);
```
## Get all trades in the table
@ -67,22 +90,32 @@ SELECT * FROM trades;
!!! Warning
Manually selling a pair on the exchange will not be detected by the bot and it will try to sell anyway. Whenever possible, forcesell <tradeid> should be used to accomplish the same thing.
It is strongly advised to backup your database file before making any manual changes.
It is strongly advised to backup your database file before making any manual changes.
!!! Note
This should not be necessary after /forcesell, as forcesell orders are closed automatically by the bot on the next iteration.
```sql
UPDATE trades
SET is_open=0, close_date=<close_date>, close_rate=<close_rate>, close_profit=close_rate/open_rate-1, sell_reason=<sell_reason>
SET is_open=0,
close_date=<close_date>,
close_rate=<close_rate>,
close_profit=close_rate/open_rate-1,
close_profit_abs = (amount * <close_rate> * (1 - fee_close) - (amount * open_rate * 1 - fee_open),
sell_reason=<sell_reason>
WHERE id=<trade_ID_to_update>;
```
##### Example
### Example
```sql
UPDATE trades
SET is_open=0, close_date='2017-12-20 03:08:45.103418', close_rate=0.19638016, close_profit=0.0496, sell_reason='force_sell'
SET is_open=0,
close_date='2017-12-20 03:08:45.103418',
close_rate=0.19638016,
close_profit=0.0496,
close_profit_abs = (amount * 0.19638016 * (1 - fee_close) - (amount * open_rate * 1 - fee_open)
sell_reason='force_sell'
WHERE id=31;
```
@ -99,10 +132,3 @@ VALUES ('bittrex', 'ETH/BTC', 1, 0.0025, 0.0025, <open_rate>, <stake_amount>, <a
INSERT INTO trades (exchange, pair, is_open, fee_open, fee_close, open_rate, stake_amount, amount, open_date)
VALUES ('bittrex', 'ETH/BTC', 1, 0.0025, 0.0025, 0.00258580, 0.002, 0.7715262081, '2017-11-28 12:44:24.000000')
```
## Fix wrong fees in the table
If your DB was created before [PR#200](https://github.com/freqtrade/freqtrade/pull/200) was merged (before 12/23/17).
```sql
UPDATE trades SET fee=0.0025 WHERE fee=0.005;
```

View File

@ -27,7 +27,7 @@ So this parameter will tell the bot how often it should update the stoploss orde
This same logic will reapply a stoploss order on the exchange should you cancel it accidentally.
!!! Note
Stoploss on exchange is only supported for Binance as of now.
Stoploss on exchange is only supported for Binance (stop-loss-limit) and Kraken (stop-loss-market) as of now.
## Static Stop Loss

91
docs/strategy-advanced.md Normal file
View File

@ -0,0 +1,91 @@
# Advanced Strategies
This page explains some advanced concepts available for strategies.
If you're just getting started, please be familiar with the methods described in the [Strategy Customization](strategy-customization.md) documentation first.
## Custom order timeout rules
Simple, timebased order-timeouts can be configured either via strategy or in the configuration in the `unfilledtimeout` section.
However, freqtrade also offers a custom callback for both ordertypes, which allows you to decide based on custom criteria if a order did time out or not.
!!! Note
Unfilled order timeouts are not relevant during backtesting or hyperopt, and are only relevant during real (live) trading. Therefore these methods are only called in these circumstances.
### Custom order timeout example
A simple example, which applies different unfilled-timeouts depending on the price of the asset can be seen below.
It applies a tight timeout for higher priced assets, while allowing more time to fill on cheap coins.
The function must return either `True` (cancel order) or `False` (keep order alive).
``` python
from datetime import datetime, timedelta
from freqtrade.persistence import Trade
class Awesomestrategy(IStrategy):
# ... populate_* methods
# Set unfilledtimeout to 25 hours, since our maximum timeout from below is 24 hours.
unfilledtimeout = {
'buy': 60 * 25,
'sell': 60 * 25
}
def check_buy_timeout(self, pair: str, trade: 'Trade', order: dict, **kwargs) -> bool:
if trade.open_rate > 100 and trade.open_date < datetime.utcnow() - timedelta(minutes=5):
return True
elif trade.open_rate > 10 and trade.open_date < datetime.utcnow() - timedelta(minutes=3):
return True
elif trade.open_rate < 1 and trade.open_date < datetime.utcnow() - timedelta(hours=24):
return True
return False
def check_sell_timeout(self, pair: str, trade: 'Trade', order: dict, **kwargs) -> bool:
if trade.open_rate > 100 and trade.open_date < datetime.utcnow() - timedelta(minutes=5):
return True
elif trade.open_rate > 10 and trade.open_date < datetime.utcnow() - timedelta(minutes=3):
return True
elif trade.open_rate < 1 and trade.open_date < datetime.utcnow() - timedelta(hours=24):
return True
return False
```
!!! Note
For the above example, `unfilledtimeout` must be set to something bigger than 24h, otherwise that type of timeout will apply first.
### Custom order timeout example (using additional data)
``` python
from datetime import datetime
from freqtrade.persistence import Trade
class Awesomestrategy(IStrategy):
# ... populate_* methods
# Set unfilledtimeout to 25 hours, since our maximum timeout from below is 24 hours.
unfilledtimeout = {
'buy': 60 * 25,
'sell': 60 * 25
}
def check_buy_timeout(self, pair: str, trade: Trade, order: dict, **kwargs) -> bool:
ob = self.dp.orderbook(pair, 1)
current_price = ob['bids'][0][0]
# Cancel buy order if price is more than 2% above the order.
if current_price > order['price'] * 1.02:
return True
return False
def check_sell_timeout(self, pair: str, trade: Trade, order: dict, **kwargs) -> bool:
ob = self.dp.orderbook(pair, 1)
current_price = ob['asks'][0][0]
# Cancel sell order if price is more than 2% below the order.
if current_price < order['price'] * 0.98:
return True
return False
```

View File

@ -1,7 +1,6 @@
# Strategy Customization
This page explains where to customize your strategies, and add new
indicators.
This page explains where to customize your strategies, and add new indicators.
## Install a custom strategy file
@ -84,7 +83,7 @@ def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
:param dataframe: Raw data from the exchange and parsed by parse_ticker_dataframe()
:param dataframe: Dataframe with data from the exchange
:param metadata: Additional information, like the currently traded pair
:return: a Dataframe with all mandatory indicators for the strategies
"""
@ -249,6 +248,23 @@ minimal_roi = {
While technically not completely disabled, this would sell once the trade reaches 10000% Profit.
To use times based on candle duration (ticker_interval or timeframe), the following snippet can be handy.
This will allow you to change the ticket_interval for the strategy, and ROI times will still be set as candles (e.g. after 3 candles ...)
``` python
from freqtrade.exchange import timeframe_to_minutes
class AwesomeStrategy(IStrategy):
ticker_interval = "1d"
ticker_interval_mins = timeframe_to_minutes(ticker_interval)
minimal_roi = {
"0": 0.05, # 5% for the first 3 candles
str(ticker_interval_mins * 3)): 0.02, # 2% after 3 candles
str(ticker_interval_mins * 6)): 0.01, # 1% After 6 candles
}
```
### Stoploss
Setting a stoploss is highly recommended to protect your capital from strong moves against you.
@ -267,13 +283,14 @@ If your exchange supports it, it's recommended to also set `"stoploss_on_exchang
For more information on order_types please look [here](configuration.md#understand-order_types).
### Ticker interval
### Timeframe (ticker interval)
This is the set of candles the bot should download and use for the analysis.
Common values are `"1m"`, `"5m"`, `"15m"`, `"1h"`, however all values supported by your exchange should work.
Please note that the same buy/sell signals may work with one interval, but not the other.
This setting is accessible within the strategy by using `self.ticker_interval`.
Please note that the same buy/sell signals may work well with one timeframe, but not with the others.
This setting is accessible within the strategy methods as the `self.ticker_interval` attribute.
### Metadata dict
@ -307,67 +324,14 @@ class Awesomestrategy(IStrategy):
!!! Note
If the data is pair-specific, make sure to use pair as one of the keys in the dictionary.
### Additional data (DataProvider)
***
The strategy provides access to the `DataProvider`. This allows you to get additional data to use in your strategy.
All methods return `None` in case of failure (do not raise an exception).
Please always check the mode of operation to select the correct method to get data (samples see below).
#### Possible options for DataProvider
- `available_pairs` - Property with tuples listing cached pairs with their intervals (pair, interval).
- `ohlcv(pair, timeframe)` - Currently cached ticker data for the pair, returns DataFrame or empty DataFrame.
- `historic_ohlcv(pair, timeframe)` - Returns historical data stored on disk.
- `get_pair_dataframe(pair, timeframe)` - This is a universal method, which returns either historical data (for backtesting) or cached live data (for the Dry-Run and Live-Run modes).
- `orderbook(pair, maximum)` - Returns latest orderbook data for the pair, a dict with bids/asks with a total of `maximum` entries.
- `market(pair)` - Returns market data for the pair: fees, limits, precisions, activity flag, etc. See [ccxt documentation](https://github.com/ccxt/ccxt/wiki/Manual#markets) for more details on Market data structure.
- `runmode` - Property containing the current runmode.
#### Example: fetch live ohlcv / historic data for the first informative pair
``` python
if self.dp:
inf_pair, inf_timeframe = self.informative_pairs()[0]
informative = self.dp.get_pair_dataframe(pair=inf_pair,
timeframe=inf_timeframe)
```
!!! Warning "Warning about backtesting"
Be carefull when using dataprovider in backtesting. `historic_ohlcv()` (and `get_pair_dataframe()`
for the backtesting runmode) provides the full time-range in one go,
so please be aware of it and make sure to not "look into the future" to avoid surprises when running in dry/live mode).
!!! Warning "Warning in hyperopt"
This option cannot currently be used during hyperopt.
#### Orderbook
``` python
if self.dp:
if self.dp.runmode in ('live', 'dry_run'):
ob = self.dp.orderbook(metadata['pair'], 1)
dataframe['best_bid'] = ob['bids'][0][0]
dataframe['best_ask'] = ob['asks'][0][0]
```
!!! Warning
The order book is not part of the historic data which means backtesting and hyperopt will not work if this
method is used.
#### Available Pairs
``` python
if self.dp:
for pair, ticker in self.dp.available_pairs:
print(f"available {pair}, {ticker}")
```
### Additional data (informative_pairs)
#### Get data for non-tradeable pairs
Data for additional, informative pairs (reference pairs) can be beneficial for some strategies.
Ohlcv data for these pairs will be downloaded as part of the regular whitelist refresh process and is available via `DataProvider` just as other pairs (see above).
Ohlcv data for these pairs will be downloaded as part of the regular whitelist refresh process and is available via `DataProvider` just as other pairs (see below).
These parts will **not** be traded unless they are also specified in the pair whitelist, or have been selected by Dynamic Whitelisting.
The pairs need to be specified as tuples in the format `("pair", "interval")`, with pair as the first and time interval as the second argument.
@ -387,6 +351,125 @@ def informative_pairs(self):
It is however better to use resampling to longer time-intervals when possible
to avoid hammering the exchange with too many requests and risk being blocked.
***
### Additional data (DataProvider)
The strategy provides access to the `DataProvider`. This allows you to get additional data to use in your strategy.
All methods return `None` in case of failure (do not raise an exception).
Please always check the mode of operation to select the correct method to get data (samples see below).
#### Possible options for DataProvider
- [`available_pairs`](#available_pairs) - Property with tuples listing cached pairs with their intervals (pair, interval).
- [`current_whitelist()`](#current_whitelist) - Returns a current list of whitelisted pairs. Useful for accessing dynamic whitelists (ie. VolumePairlist)
- [`get_pair_dataframe(pair, timeframe)`](#get_pair_dataframepair-timeframe) - This is a universal method, which returns either historical data (for backtesting) or cached live data (for the Dry-Run and Live-Run modes).
- `historic_ohlcv(pair, timeframe)` - Returns historical data stored on disk.
- `market(pair)` - Returns market data for the pair: fees, limits, precisions, activity flag, etc. See [ccxt documentation](https://github.com/ccxt/ccxt/wiki/Manual#markets) for more details on the Market data structure.
- `ohlcv(pair, timeframe)` - Currently cached candle (OHLCV) data for the pair, returns DataFrame or empty DataFrame.
- [`orderbook(pair, maximum)`](#orderbookpair-maximum) - Returns latest orderbook data for the pair, a dict with bids/asks with a total of `maximum` entries.
- [`ticker(pair)`](#tickerpair) - Returns current ticker data for the pair. See [ccxt documentation](https://github.com/ccxt/ccxt/wiki/Manual#price-tickers) for more details on the Ticker data structure.
- `runmode` - Property containing the current runmode.
#### Example Usages:
#### *available_pairs*
``` python
if self.dp:
for pair, timeframe in self.dp.available_pairs:
print(f"available {pair}, {timeframe}")
```
#### *current_whitelist()*
Imagine you've developed a strategy that trades the `5m` timeframe using signals generated from a `1d` timeframe on the top 10 volume pairs by volume.
The strategy might look something like this:
*Scan through the top 10 pairs by volume using the `VolumePairList` every 5 minutes and use a 14 day ATR to buy and sell.*
Due to the limited available data, it's very difficult to resample our `5m` candles into daily candles for use in a 14 day ATR. Most exchanges limit us to just 500 candles which effectively gives us around 1.74 daily candles. We need 14 days at least!
Since we can't resample our data we will have to use an informative pair; and since our whitelist will be dynamic we don't know which pair(s) to use.
This is where calling `self.dp.current_whitelist()` comes in handy.
```python
class SampleStrategy(IStrategy):
# strategy init stuff...
ticker_interval = '5m'
# more strategy init stuff..
def informative_pairs(self):
# get access to all pairs available in whitelist.
pairs = self.dp.current_whitelist()
# Assign tf to each pair so they can be downloaded and cached for strategy.
informative_pairs = [(pair, '1d') for pair in pairs]
return informative_pairs
def populate_indicators(self, dataframe, metadata):
# Get the informative pair
informative = self.dp.get_pair_dataframe(pair=metadata['pair'], timeframe='1d')
# Get the 14 day ATR.
atr = ta.ATR(informative, timeperiod=14)
# Do other stuff
```
#### *get_pair_dataframe(pair, timeframe)*
``` python
# fetch live / historical candle (OHLCV) data for the first informative pair
if self.dp:
inf_pair, inf_timeframe = self.informative_pairs()[0]
informative = self.dp.get_pair_dataframe(pair=inf_pair,
timeframe=inf_timeframe)
```
!!! Warning "Warning about backtesting"
Be carefull when using dataprovider in backtesting. `historic_ohlcv()` (and `get_pair_dataframe()`
for the backtesting runmode) provides the full time-range in one go,
so please be aware of it and make sure to not "look into the future" to avoid surprises when running in dry/live mode).
!!! Warning "Warning in hyperopt"
This option cannot currently be used during hyperopt.
#### *orderbook(pair, maximum)*
``` python
if self.dp:
if self.dp.runmode.value in ('live', 'dry_run'):
ob = self.dp.orderbook(metadata['pair'], 1)
dataframe['best_bid'] = ob['bids'][0][0]
dataframe['best_ask'] = ob['asks'][0][0]
```
!!! Warning
The order book is not part of the historic data which means backtesting and hyperopt will not work if this
method is used.
#### *ticker(pair)*
``` python
if self.dp:
if self.dp.runmode.value in ('live', 'dry_run'):
ticker = self.dp.ticker(metadata['pair'])
dataframe['last_price'] = ticker['last']
dataframe['volume24h'] = ticker['quoteVolume']
dataframe['vwap'] = ticker['vwap']
```
!!! Warning
Although the ticker data structure is a part of the ccxt Unified Interface, the values returned by this method can
vary for different exchanges. For instance, many exchanges do not return `vwap` values, the FTX exchange
does not always fills in the `last` field (so it can be None), etc. So you need to carefully verify the ticker
data returned from the exchange and add appropriate error handling / defaults.
***
### Additional data (Wallets)
The strategy provides access to the `Wallets` object. This contains the current balances on the exchange.
@ -409,6 +492,7 @@ if self.wallets:
- `get_used(asset)` - currently tied up balance (open orders)
- `get_total(asset)` - total available balance - sum of the 2 above
***
### Additional data (Trades)
A history of Trades can be retrieved in the strategy by querying the database.
@ -422,7 +506,7 @@ from freqtrade.persistence import Trade
The following example queries for the current pair and trades from today, however other filters can easily be added.
``` python
if self.config['runmode'] in ('live', 'dry_run'):
if self.config['runmode'].value in ('live', 'dry_run'):
trades = Trade.get_trades([Trade.pair == metadata['pair'],
Trade.open_date > datetime.utcnow() - timedelta(days=1),
Trade.is_open == False,
@ -434,7 +518,7 @@ if self.config['runmode'] in ('live', 'dry_run'):
Get amount of stake_currency currently invested in Trades:
``` python
if self.config['runmode'] in ('live', 'dry_run'):
if self.config['runmode'].value in ('live', 'dry_run'):
total_stakes = Trade.total_open_trades_stakes()
```
@ -442,7 +526,7 @@ Retrieve performance per pair.
Returns a List of dicts per pair.
``` python
if self.config['runmode'] in ('live', 'dry_run'):
if self.config['runmode'].value in ('live', 'dry_run'):
performance = Trade.get_overall_performance()
```
@ -455,6 +539,51 @@ Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of
!!! Warning
Trade history is not available during backtesting or hyperopt.
### Prevent trades from happening for a specific pair
Freqtrade locks pairs automatically for the current candle (until that candle is over) when a pair is sold, preventing an immediate re-buy of that pair.
Locked pairs will show the message `Pair <pair> is currently locked.`.
#### Locking pairs from within the strategy
Sometimes it may be desired to lock a pair after certain events happen (e.g. multiple losing trades in a row).
Freqtrade has an easy method to do this from within the strategy, by calling `self.lock_pair(pair, until)`.
`until` must be a datetime object in the future, after which trading will be reenabled for that pair.
Locks can also be lifted manually, by calling `self.unlock_pair(pair)`.
To verify if a pair is currently locked, use `self.is_pair_locked(pair)`.
!!! Note
Locked pairs are not persisted, so a restart of the bot, or calling `/reload_conf` will reset locked pairs.
!!! Warning
Locking pairs is not functioning during backtesting.
##### Pair locking example
``` python
from freqtrade.persistence import Trade
from datetime import timedelta, datetime, timezone
# Put the above lines a the top of the strategy file, next to all the other imports
# --------
# Within populate indicators (or populate_buy):
if self.config['runmode'].value in ('live', 'dry_run'):
# fetch closed trades for the last 2 days
trades = Trade.get_trades([Trade.pair == metadata['pair'],
Trade.open_date > datetime.utcnow() - timedelta(days=2),
Trade.is_open == False,
]).all()
# Analyze the conditions you'd like to lock the pair .... will probably be different for every strategy
sumprofit = sum(trade.close_profit for trade in trades)
if sumprofit < 0:
# Lock pair for 12 hours
self.lock_pair(metadata['pair'], until=datetime.now(timezone.utc) + timedelta(hours=12))
```
### Print created dataframe
To inspect the created dataframe, you can issue a print-statement in either `populate_buy_trend()` or `populate_sell_trend()`.
@ -479,11 +608,6 @@ def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
Printing more than a few rows is also possible (simply use `print(dataframe)` instead of `print(dataframe.tail())`), however not recommended, as that will be very verbose (~500 lines per pair every 5 seconds).
### Where can i find a strategy template?
The strategy template is located in the file
[user_data/strategies/sample_strategy.py](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/templates/sample_strategy.py).
### Specify custom strategy location
If you want to use a strategy from a different directory you can pass `--strategy-path`
@ -492,6 +616,27 @@ If you want to use a strategy from a different directory you can pass `--strateg
freqtrade trade --strategy AwesomeStrategy --strategy-path /some/directory
```
### Derived strategies
The strategies can be derived from other strategies. This avoids duplication of your custom strategy code. You can use this technique to override small parts of your main strategy, leaving the rest untouched:
``` python
class MyAwesomeStrategy(IStrategy):
...
stoploss = 0.13
trailing_stop = False
# All other attributes and methods are here as they
# should be in any custom strategy...
...
class MyAwesomeStrategy2(MyAwesomeStrategy):
# Override something
stoploss = 0.08
trailing_stop = True
```
Both attributes and methods may be overriden, altering behavior of the original strategy in a way you need.
### Common mistakes when developing strategies
Backtesting analyzes the whole time-range at once for performance reasons. Because of this, strategy authors need to make sure that strategies do not look-ahead into the future.

View File

@ -1,24 +1,28 @@
# Strategy analysis example
Debugging a strategy can be time-consuming. FreqTrade offers helper functions to visualize raw data.
Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.
The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location.
## Setup
```python
from pathlib import Path
from freqtrade.configuration import Configuration
# Customize these according to your needs.
# Initialize empty configuration object
config = Configuration.from_files([])
# Optionally, use existing configuration file
# config = Configuration.from_files(["config.json"])
# Define some constants
timeframe = "5m"
config["ticker_interval"] = "5m"
# Name of the strategy class
strategy_name = 'SampleStrategy'
# Path to user data
user_data_dir = Path('user_data')
# Location of the strategy
strategy_location = user_data_dir / 'strategies'
config["strategy"] = "SampleStrategy"
# Location of the data
data_location = Path(user_data_dir, 'data', 'binance')
data_location = Path(config['user_data_dir'], 'data', 'binance')
# Pair to analyze - Only use one pair here
pair = "BTC_USDT"
```
@ -29,7 +33,7 @@ pair = "BTC_USDT"
from freqtrade.data.history import load_pair_history
candles = load_pair_history(datadir=data_location,
timeframe=timeframe,
timeframe=config["ticker_interval"],
pair=pair)
# Confirm success
@ -44,9 +48,7 @@ candles.head()
```python
# Load strategy using values set above
from freqtrade.resolvers import StrategyResolver
strategy = StrategyResolver({'strategy': strategy_name,
'user_data_dir': user_data_dir,
'strategy_path': strategy_location}).strategy
strategy = StrategyResolver.load_strategy(config)
# Generate buy/sell signals using strategy
df = strategy.analyze_ticker(candles, {'pair': pair})
@ -86,7 +88,7 @@ Analyze a trades dataframe (also used below for plotting)
from freqtrade.data.btanalysis import load_backtest_data
# Load backtest results
trades = load_backtest_data(user_data_dir / "backtest_results/backtest-result.json")
trades = load_backtest_data(config["user_data_dir"] / "backtest_results/backtest-result.json")
# Show value-counts per pair
trades.groupby("pair")["sell_reason"].value_counts()
@ -119,7 +121,6 @@ from freqtrade.data.btanalysis import analyze_trade_parallelism
# Analyze the above
parallel_trades = analyze_trade_parallelism(trades, '5m')
parallel_trades.plot()
```
@ -132,11 +133,14 @@ Freqtrade offers interactive plotting capabilities based on plotly.
from freqtrade.plot.plotting import generate_candlestick_graph
# Limit graph period to keep plotly quick and reactive
# Filter trades to one pair
trades_red = trades.loc[trades['pair'] == pair]
data_red = data['2019-06-01':'2019-06-10']
# Generate candlestick graph
graph = generate_candlestick_graph(pair=pair,
data=data_red,
trades=trades,
trades=trades_red,
indicators1=['sma20', 'ema50', 'ema55'],
indicators2=['rsi', 'macd', 'macdsignal', 'macdhist']
)

View File

@ -55,7 +55,7 @@ official commands. You can ask at any moment for help with `/help`.
| `/reload_conf` | | Reloads the configuration file
| `/show_config` | | Shows part of the current configuration with relevant settings to operation
| `/status` | | Lists all open trades
| `/status table` | | List all open trades in a table format
| `/status table` | | List all open trades in a table format. Pending buy orders are marked with an asterisk (*) Pending sell orders are marked with a double asterisk (**)
| `/count` | | Displays number of trades used and available
| `/profit` | | Display a summary of your profit/loss from close trades and some stats about your performance
| `/forcesell <trade_id>` | | Instantly sells the given trade (Ignoring `minimum_roi`).

View File

@ -36,6 +36,38 @@ optional arguments:
└── sample_strategy.py
```
## Create new config
Creates a new configuration file, asking some questions which are important selections for a configuration.
```
usage: freqtrade new-config [-h] [-c PATH]
optional arguments:
-h, --help show this help message and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`). Multiple --config options may be used. Can be set to `-`
to read config from stdin.
```
!!! Warning
Only vital questions are asked. Freqtrade offers a lot more configuration possibilities, which are listed in the [Configuration documentation](configuration.md#configuration-parameters)
### Create config examples
```
$ freqtrade new-config --config config_binance.json
? Do you want to enable Dry-run (simulated trades)? Yes
? Please insert your stake currency: BTC
? Please insert your stake amount: 0.05
? Please insert max_open_trades (Integer or 'unlimited'): 3
? Please insert your timeframe (ticker interval): 5m
? Please insert your display Currency (for reporting): USD
? Select exchange binance
? Do you want to enable Telegram? No
```
## Create new strategy
Creates a new strategy from a template similar to SampleStrategy.
@ -45,7 +77,7 @@ Results will be located in `user_data/strategies/<strategyclassname>.py`.
``` output
usage: freqtrade new-strategy [-h] [--userdir PATH] [-s NAME]
[--template {full,minimal}]
[--template {full,minimal,advanced}]
optional arguments:
-h, --help show this help message and exit
@ -54,10 +86,10 @@ optional arguments:
-s NAME, --strategy NAME
Specify strategy class name which will be used by the
bot.
--template {full,minimal}
Use a template which is either `minimal` or `full`
(containing multiple sample indicators). Default:
`full`.
--template {full,minimal,advanced}
Use a template which is either `minimal`, `full`
(containing multiple sample indicators) or `advanced`.
Default: `full`.
```
@ -73,6 +105,12 @@ With custom user directory
freqtrade new-strategy --userdir ~/.freqtrade/ --strategy AwesomeStrategy
```
Using the advanced template (populates all optional functions and methods)
```bash
freqtrade new-strategy --strategy AwesomeStrategy --template advanced
```
## Create new hyperopt
Creates a new hyperopt from a template similar to SampleHyperopt.
@ -82,7 +120,7 @@ Results will be located in `user_data/hyperopts/<classname>.py`.
``` output
usage: freqtrade new-hyperopt [-h] [--userdir PATH] [--hyperopt NAME]
[--template {full,minimal}]
[--template {full,minimal,advanced}]
optional arguments:
-h, --help show this help message and exit
@ -90,10 +128,10 @@ optional arguments:
Path to userdata directory.
--hyperopt NAME Specify hyperopt class name which will be used by the
bot.
--template {full,minimal}
Use a template which is either `minimal` or `full`
(containing multiple sample indicators). Default:
`full`.
--template {full,minimal,advanced}
Use a template which is either `minimal`, `full`
(containing multiple sample indicators) or `advanced`.
Default: `full`.
```
### Sample usage of new-hyperopt
@ -108,6 +146,97 @@ With custom user directory
freqtrade new-hyperopt --userdir ~/.freqtrade/ --hyperopt AwesomeHyperopt
```
## List Strategies and List Hyperopts
Use the `list-strategies` subcommand to see all strategies in one particular directory and the `list-hyperopts` subcommand to list custom Hyperopts.
These subcommands are useful for finding problems in your environment with loading strategies or hyperopt classes: modules with strategies or hyperopt classes that contain errors and failed to load are printed in red (LOAD FAILED), while strategies or hyperopt classes with duplicate names are printed in yellow (DUPLICATE NAME).
```
usage: freqtrade list-strategies [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[--strategy-path PATH] [-1] [--no-color]
optional arguments:
-h, --help show this help message and exit
--strategy-path PATH Specify additional strategy lookup path.
-1, --one-column Print output in one column.
--no-color Disable colorization of hyperopt results. May be
useful if you are redirecting output to a file.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
```
usage: freqtrade list-hyperopts [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[--hyperopt-path PATH] [-1] [--no-color]
optional arguments:
-h, --help show this help message and exit
--hyperopt-path PATH Specify additional lookup path for Hyperopt and
Hyperopt Loss functions.
-1, --one-column Print output in one column.
--no-color Disable colorization of hyperopt results. May be
useful if you are redirecting output to a file.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
!!! Warning
Using these commands will try to load all python files from a directory. This can be a security risk if untrusted files reside in this directory, since all module-level code is executed.
Example: Search default strategies and hyperopts directories (within the default userdir).
``` bash
freqtrade list-strategies
freqtrade list-hyperopts
```
Example: Search strategies and hyperopts directory within the userdir.
``` bash
freqtrade list-strategies --userdir ~/.freqtrade/
freqtrade list-hyperopts --userdir ~/.freqtrade/
```
Example: Search dedicated strategy path.
``` bash
freqtrade list-strategies --strategy-path ~/.freqtrade/strategies/
```
Example: Search dedicated hyperopt path.
``` bash
freqtrade list-hyperopt --hyperopt-path ~/.freqtrade/hyperopts/
```
## List Exchanges
Use the `list-exchanges` subcommand to see the exchanges available for the bot.
@ -135,23 +264,34 @@ All exchanges supported by the ccxt library: _1btcxe, acx, adara, allcoin, anxpr
## List Timeframes
Use the `list-timeframes` subcommand to see the list of ticker intervals (timeframes) available for the exchange.
Use the `list-timeframes` subcommand to see the list of timeframes (ticker intervals) available for the exchange.
```
usage: freqtrade list-timeframes [-h] [--exchange EXCHANGE] [-1]
usage: freqtrade list-timeframes [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH] [--userdir PATH] [--exchange EXCHANGE] [-1]
optional arguments:
-h, --help show this help message and exit
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
config is provided.
-1, --one-column Print output in one column.
-h, --help show this help message and exit
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no config is provided.
-1, --one-column Print output in one column.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are: 'syslog', 'journald'. See the documentation for more details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`). Multiple --config options may be used. Can be set to `-`
to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
* Example: see the timeframes for the 'binance' exchange, set in the configuration file:
```
$ freqtrade -c config_binance.json list-timeframes
$ freqtrade list-timeframes -c config_binance.json
...
Timeframes available for the exchange `binance`: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M
```
@ -175,14 +315,16 @@ You can print info about any pair/market with these subcommands - and you can fi
These subcommands have same usage and same set of available options:
```
usage: freqtrade list-markets [-h] [--exchange EXCHANGE] [--print-list]
[--print-json] [-1] [--print-csv]
usage: freqtrade list-markets [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--exchange EXCHANGE]
[--print-list] [--print-json] [-1] [--print-csv]
[--base BASE_CURRENCY [BASE_CURRENCY ...]]
[--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]]
[-a]
usage: freqtrade list-pairs [-h] [--exchange EXCHANGE] [--print-list]
[--print-json] [-1] [--print-csv]
usage: freqtrade list-pairs [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--exchange EXCHANGE]
[--print-list] [--print-json] [-1] [--print-csv]
[--base BASE_CURRENCY [BASE_CURRENCY ...]]
[--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]] [-a]
@ -201,6 +343,22 @@ optional arguments:
Specify quote currency(-ies). Space-separated list.
-a, --all Print all pairs or market symbols. By default only
active ones are shown.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default: `config.json`).
Multiple --config options may be used. Can be set to
`-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
By default, only active pairs/markets are shown. Active pairs/markets are those that can currently be traded
@ -222,7 +380,7 @@ $ freqtrade list-pairs --quote USD --print-json
human-readable list with summary:
```
$ freqtrade -c config_binance.json list-pairs --all --base BTC ETH --quote USDT USD --print-list
$ freqtrade list-pairs -c config_binance.json --all --base BTC ETH --quote USDT USD --print-list
```
* Print all markets on exchange "Kraken", in the tabular format:
@ -270,17 +428,53 @@ You can list the hyperoptimization epochs the Hyperopt module evaluated previous
```
usage: freqtrade hyperopt-list [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--best]
[--profitable] [--no-color] [--print-json]
[--no-details]
[--profitable] [--min-trades INT]
[--max-trades INT] [--min-avg-time FLOAT]
[--max-avg-time FLOAT] [--min-avg-profit FLOAT]
[--max-avg-profit FLOAT]
[--min-total-profit FLOAT]
[--max-total-profit FLOAT] [--no-color]
[--print-json] [--no-details]
[--export-csv FILE]
optional arguments:
-h, --help show this help message and exit
--best Select only best epochs.
--profitable Select only profitable epochs.
--min-trades INT Select epochs with more than INT trades.
--max-trades INT Select epochs with less than INT trades.
--min-avg-time FLOAT Select epochs on above average time.
--max-avg-time FLOAT Select epochs on under average time.
--min-avg-profit FLOAT
Select epochs on above average profit.
--max-avg-profit FLOAT
Select epochs on below average profit.
--min-total-profit FLOAT
Select epochs on above total profit.
--max-total-profit FLOAT
Select epochs on below total profit.
--no-color Disable colorization of hyperopt results. May be
useful if you are redirecting output to a file.
--print-json Print best result detailization in JSON format.
--no-details Do not print best epoch details.
--export-csv FILE Export to CSV-File. This will disable table print.
Example: --export-csv hyperopt.csv
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
### Examples
@ -327,3 +521,48 @@ Prints JSON data with details for the last best epoch (i.e., the best of all epo
```
freqtrade hyperopt-show --best -n -1 --print-json --no-header
```
## Show trades
Print selected (or all) trades from database to screen.
```
usage: freqtrade show-trades [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--db-url PATH]
[--trade-ids TRADE_IDS [TRADE_IDS ...]]
[--print-json]
optional arguments:
-h, --help show this help message and exit
--db-url PATH Override trades database URL, this is useful in custom
deployments (default: `sqlite:///tradesv3.sqlite` for
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
Dry Run).
--trade-ids TRADE_IDS [TRADE_IDS ...]
Specify the list of trade ids.
--print-json Print output in JSON format.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
### Examples
Print trades with id 2 and 3 as json
``` bash
freqtrade show-trades --db-url sqlite:///tradesv3.sqlite --trade-ids 2 3 --print-json
```

View File

@ -15,10 +15,20 @@ Sample configuration (tested using IFTTT).
"value2": "limit {limit:8f}",
"value3": "{stake_amount:8f} {stake_currency}"
},
"webhookbuycancel": {
"value1": "Cancelling Open Buy Order for {pair}",
"value2": "limit {limit:8f}",
"value3": "{stake_amount:8f} {stake_currency}"
},
"webhooksell": {
"value1": "Selling {pair}",
"value2": "limit {limit:8f}",
"value3": "profit: {profit_amount:8f} {stake_currency}"
"value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})"
},
"webhooksellcancel": {
"value1": "Cancelling Open Sell Order for {pair}",
"value2": "limit {limit:8f}",
"value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})"
},
"webhookstatus": {
"value1": "Status: {status}",
@ -40,10 +50,29 @@ Possible parameters are:
* `exchange`
* `pair`
* `limit`
* `amount`
* `open_date`
* `stake_amount`
* `stake_currency`
* `fiat_currency`
* `order_type`
* `current_rate`
### Webhookbuycancel
The fields in `webhook.webhookbuycancel` are filled when the bot cancels a buy order. Parameters are filled using string.format.
Possible parameters are:
* `exchange`
* `pair`
* `limit`
* `amount`
* `open_date`
* `stake_amount`
* `stake_currency`
* `fiat_currency`
* `order_type`
* `current_rate`
### Webhooksell
@ -58,7 +87,28 @@ Possible parameters are:
* `open_rate`
* `current_rate`
* `profit_amount`
* `profit_percent`
* `profit_ratio`
* `stake_currency`
* `fiat_currency`
* `sell_reason`
* `order_type`
* `open_date`
* `close_date`
### Webhooksellcancel
The fields in `webhook.webhooksellcancel` are filled when the bot cancels a sell order. Parameters are filled using string.format.
Possible parameters are:
* `exchange`
* `pair`
* `gain`
* `limit`
* `amount`
* `open_rate`
* `current_rate`
* `profit_amount`
* `profit_ratio`
* `stake_currency`
* `fiat_currency`
* `sell_reason`

View File

@ -45,7 +45,7 @@ dependencies:
- pip:
# Required for app
- cython
- coinmarketcap
- pycoingecko
- ccxt
- TA-Lib
- py_find_1st

View File

@ -1,44 +1,34 @@
""" FreqTrade bot """
""" Freqtrade bot """
__version__ = 'develop'
if __version__ == 'develop':
try:
import subprocess
__version__ = 'develop-' + subprocess.check_output(
['git', 'log', '--format="%h"', '-n 1'],
stderr=subprocess.DEVNULL).decode("utf-8").rstrip().strip('"')
# from datetime import datetime
# last_release = subprocess.check_output(
# ['git', 'tag']
# ).decode('utf-8').split()[-1].split(".")
# # Releases are in the format "2020.1" - we increment the latest version for dev.
# prefix = f"{last_release[0]}.{int(last_release[1]) + 1}"
# dev_version = int(datetime.now().timestamp() // 1000)
# __version__ = f"{prefix}.dev{dev_version}"
# subprocess.check_output(
# ['git', 'log', '--format="%h"', '-n 1'],
# stderr=subprocess.DEVNULL).decode("utf-8").rstrip().strip('"')
except Exception:
# git not available, ignore
pass
class DependencyException(Exception):
"""
Indicates that an assumed dependency is not met.
This could happen when there is currently not enough money on the account.
"""
class OperationalException(Exception):
"""
Requires manual intervention and will usually stop the bot.
This happens when an exchange returns an unexpected error during runtime
or given configuration is invalid.
"""
class InvalidOrderException(Exception):
"""
This is returned when the order is not valid. Example:
If stoploss on exchange order is hit, then trying to cancel the order
should return this exception.
"""
class TemporaryError(Exception):
"""
Temporary network or exchange related error.
This could happen when an exchange is congested, unavailable, or the user
has networking problems. Usually resolves itself after a time.
"""
try:
# Try Fallback to freqtrade_commit file (created by CI while building docker image)
from pathlib import Path
versionfile = Path('./freqtrade_commit')
if versionfile.is_file():
__version__ = f"docker-{versionfile.read_text()[:8]}"
except Exception:
pass

View File

@ -0,0 +1,29 @@
# flake8: noqa: F401
"""
Commands module.
Contains all start-commands, subcommands and CLI Interface creation.
Note: Be careful with file-scoped imports in these subfiles.
as they are parsed on startup, nothing containing optional modules should be loaded.
"""
from freqtrade.commands.arguments import Arguments
from freqtrade.commands.build_config_commands import start_new_config
from freqtrade.commands.data_commands import (start_convert_data,
start_download_data)
from freqtrade.commands.deploy_commands import (start_create_userdir,
start_new_hyperopt,
start_new_strategy)
from freqtrade.commands.hyperopt_commands import (start_hyperopt_list,
start_hyperopt_show)
from freqtrade.commands.list_commands import (start_list_exchanges,
start_list_hyperopts,
start_list_markets,
start_list_strategies,
start_list_timeframes,
start_show_trades)
from freqtrade.commands.optimize_commands import (start_backtesting,
start_edge, start_hyperopt)
from freqtrade.commands.pairlist_commands import start_test_pairlist
from freqtrade.commands.plot_commands import (start_plot_dataframe,
start_plot_profit)
from freqtrade.commands.trade_commands import start_trading

View File

@ -6,8 +6,8 @@ from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional
from freqtrade import constants
from freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS
from freqtrade.commands.cli_options import AVAILABLE_CLI_OPTIONS
from freqtrade.constants import DEFAULT_CONFIG
ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
@ -30,6 +30,10 @@ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
ARGS_LIST_STRATEGIES = ["strategy_path", "print_one_column", "print_colorized"]
ARGS_LIST_HYPEROPTS = ["hyperopt_path", "print_one_column", "print_colorized"]
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"]
ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"]
@ -41,28 +45,42 @@ ARGS_TEST_PAIRLIST = ["config", "quote_currencies", "print_one_column", "list_pa
ARGS_CREATE_USERDIR = ["user_data_dir", "reset"]
ARGS_BUILD_CONFIG = ["config"]
ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "template"]
ARGS_BUILD_HYPEROPT = ["user_data_dir", "hyperopt", "template"]
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes"]
ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "download_trades", "exchange",
"timeframes", "erase"]
"timeframes", "erase", "dataformat_ohlcv", "dataformat_trades"]
ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
"db_url", "trade_source", "export", "exportfilename",
"timerange", "ticker_interval"]
"timerange", "ticker_interval", "no_trades"]
ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
"trade_source", "ticker_interval"]
ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable", "print_colorized",
"print_json", "hyperopt_list_no_details"]
ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"]
ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable",
"hyperopt_list_min_trades", "hyperopt_list_max_trades",
"hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time",
"hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit",
"hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit",
"print_colorized", "print_json", "hyperopt_list_no_details",
"export_csv"]
ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index",
"print_json", "hyperopt_show_no_header"]
NO_CONF_REQURIED = ["download-data", "list-timeframes", "list-markets", "list-pairs",
"hyperopt-list", "hyperopt-show", "plot-dataframe", "plot-profit"]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies",
"list-hyperopts", "hyperopt-list", "hyperopt-show",
"plot-dataframe", "plot-profit", "show-trades"]
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-hyperopt", "new-strategy"]
@ -96,10 +114,23 @@ class Arguments:
# Workaround issue in argparse with action='append' and default value
# (see https://bugs.python.org/issue16399)
# Allow no-config for certain commands (like downloading / plotting)
if ('config' in parsed_arg and parsed_arg.config is None and
((Path.cwd() / constants.DEFAULT_CONFIG).is_file() or
not ('command' in parsed_arg and parsed_arg.command in NO_CONF_REQURIED))):
parsed_arg.config = [constants.DEFAULT_CONFIG]
if ('config' in parsed_arg and parsed_arg.config is None):
conf_required = ('command' in parsed_arg and parsed_arg.command in NO_CONF_REQURIED)
if 'user_data_dir' in parsed_arg and parsed_arg.user_data_dir is not None:
user_dir = parsed_arg.user_data_dir
else:
# Default case
user_dir = 'user_data'
# Try loading from "user_data/config.json"
cfgfile = Path(user_dir) / DEFAULT_CONFIG
if cfgfile.is_file():
parsed_arg.config = [str(cfgfile)]
else:
# Else use "config.json".
cfgfile = Path.cwd() / DEFAULT_CONFIG
if cfgfile.is_file() or not conf_required:
parsed_arg.config = [DEFAULT_CONFIG]
return parsed_arg
@ -127,13 +158,16 @@ class Arguments:
self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot')
self._build_args(optionlist=['version'], parser=self.parser)
from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge
from freqtrade.utils import (start_create_userdir, start_download_data,
start_hyperopt_list, start_hyperopt_show,
start_list_exchanges, start_list_markets,
start_new_hyperopt, start_new_strategy,
start_list_timeframes, start_test_pairlist, start_trading)
from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit
from freqtrade.commands import (start_create_userdir, start_convert_data,
start_download_data,
start_hyperopt_list, start_hyperopt_show,
start_list_exchanges, start_list_hyperopts,
start_list_markets, start_list_strategies,
start_list_timeframes, start_new_config,
start_new_hyperopt, start_new_strategy,
start_plot_dataframe, start_plot_profit, start_show_trades,
start_backtesting, start_hyperopt, start_edge,
start_test_pairlist, start_trading)
subparsers = self.parser.add_subparsers(dest='command',
# Use custom message when no subhandler is added
@ -173,6 +207,12 @@ class Arguments:
create_userdir_cmd.set_defaults(func=start_create_userdir)
self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd)
# add new-config subcommand
build_config_cmd = subparsers.add_parser('new-config',
help="Create new config")
build_config_cmd.set_defaults(func=start_new_config)
self._build_args(optionlist=ARGS_BUILD_CONFIG, parser=build_config_cmd)
# add new-strategy subcommand
build_strategy_cmd = subparsers.add_parser('new-strategy',
help="Create new strategy")
@ -185,6 +225,24 @@ class Arguments:
build_hyperopt_cmd.set_defaults(func=start_new_hyperopt)
self._build_args(optionlist=ARGS_BUILD_HYPEROPT, parser=build_hyperopt_cmd)
# Add list-strategies subcommand
list_strategies_cmd = subparsers.add_parser(
'list-strategies',
help='Print available strategies.',
parents=[_common_parser],
)
list_strategies_cmd.set_defaults(func=start_list_strategies)
self._build_args(optionlist=ARGS_LIST_STRATEGIES, parser=list_strategies_cmd)
# Add list-hyperopts subcommand
list_hyperopts_cmd = subparsers.add_parser(
'list-hyperopts',
help='Print available hyperopt classes.',
parents=[_common_parser],
)
list_hyperopts_cmd.set_defaults(func=start_list_hyperopts)
self._build_args(optionlist=ARGS_LIST_HYPEROPTS, parser=list_hyperopts_cmd)
# Add list-exchanges subcommand
list_exchanges_cmd = subparsers.add_parser(
'list-exchanges',
@ -238,6 +296,24 @@ class Arguments:
download_data_cmd.set_defaults(func=start_download_data)
self._build_args(optionlist=ARGS_DOWNLOAD_DATA, parser=download_data_cmd)
# Add convert-data subcommand
convert_data_cmd = subparsers.add_parser(
'convert-data',
help='Convert candle (OHLCV) data from one format to another.',
parents=[_common_parser],
)
convert_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=True))
self._build_args(optionlist=ARGS_CONVERT_DATA_OHLCV, parser=convert_data_cmd)
# Add convert-trade-data subcommand
convert_trade_data_cmd = subparsers.add_parser(
'convert-trade-data',
help='Convert trade data from one format to another.',
parents=[_common_parser],
)
convert_trade_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=False))
self._build_args(optionlist=ARGS_CONVERT_DATA, parser=convert_trade_data_cmd)
# Add Plotting subcommand
plot_dataframe_cmd = subparsers.add_parser(
'plot-dataframe',
@ -256,6 +332,15 @@ class Arguments:
plot_profit_cmd.set_defaults(func=start_plot_profit)
self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)
# Add show-trades subcommand
show_trades = subparsers.add_parser(
'show-trades',
help='Show trades.',
parents=[_common_parser],
)
show_trades.set_defaults(func=start_show_trades)
self._build_args(optionlist=ARGS_SHOW_TRADES, parser=show_trades)
# Add hyperopt-list subcommand
hyperopt_list_cmd = subparsers.add_parser(
'hyperopt-list',

View File

@ -0,0 +1,193 @@
import logging
from pathlib import Path
from typing import Any, Dict
from questionary import Separator, prompt
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
from freqtrade.exchange import available_exchanges, MAP_EXCHANGE_CHILDCLASS
from freqtrade.misc import render_template
from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
def validate_is_int(val):
try:
_ = int(val)
return True
except Exception:
return False
def validate_is_float(val):
try:
_ = float(val)
return True
except Exception:
return False
def ask_user_overwrite(config_path: Path) -> bool:
questions = [
{
"type": "confirm",
"name": "overwrite",
"message": f"File {config_path} already exists. Overwrite?",
"default": False,
},
]
answers = prompt(questions)
return answers['overwrite']
def ask_user_config() -> Dict[str, Any]:
"""
Ask user a few questions to build the configuration.
Interactive questions built using https://github.com/tmbo/questionary
:returns: Dict with keys to put into template
"""
questions = [
{
"type": "confirm",
"name": "dry_run",
"message": "Do you want to enable Dry-run (simulated trades)?",
"default": True,
},
{
"type": "text",
"name": "stake_currency",
"message": "Please insert your stake currency:",
"default": 'BTC',
},
{
"type": "text",
"name": "stake_amount",
"message": "Please insert your stake amount:",
"default": "0.01",
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
},
{
"type": "text",
"name": "max_open_trades",
"message": f"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):",
"default": "3",
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val)
},
{
"type": "text",
"name": "ticker_interval",
"message": "Please insert your timeframe (ticker interval):",
"default": "5m",
},
{
"type": "text",
"name": "fiat_display_currency",
"message": "Please insert your display Currency (for reporting):",
"default": 'USD',
},
{
"type": "select",
"name": "exchange_name",
"message": "Select exchange",
"choices": [
"binance",
"binanceje",
"binanceus",
"bittrex",
"kraken",
Separator(),
"other",
],
},
{
"type": "autocomplete",
"name": "exchange_name",
"message": "Type your exchange name (Must be supported by ccxt)",
"choices": available_exchanges(),
"when": lambda x: x["exchange_name"] == 'other'
},
{
"type": "password",
"name": "exchange_key",
"message": "Insert Exchange Key",
"when": lambda x: not x['dry_run']
},
{
"type": "password",
"name": "exchange_secret",
"message": "Insert Exchange Secret",
"when": lambda x: not x['dry_run']
},
{
"type": "confirm",
"name": "telegram",
"message": "Do you want to enable Telegram?",
"default": False,
},
{
"type": "password",
"name": "telegram_token",
"message": "Insert Telegram token",
"when": lambda x: x['telegram']
},
{
"type": "text",
"name": "telegram_chat_id",
"message": "Insert Telegram chat id",
"when": lambda x: x['telegram']
},
]
answers = prompt(questions)
if not answers:
# Interrupted questionary sessions return an empty dict.
raise OperationalException("User interrupted interactive questions.")
return answers
def deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:
"""
Applies selections to the template and writes the result to config_path
:param config_path: Path object for new config file. Should not exist yet
:param selecions: Dict containing selections taken by the user.
"""
from jinja2.exceptions import TemplateNotFound
try:
exchange_template = MAP_EXCHANGE_CHILDCLASS.get(
selections['exchange_name'], selections['exchange_name'])
selections['exchange'] = render_template(
templatefile=f"subtemplates/exchange_{exchange_template}.j2",
arguments=selections
)
except TemplateNotFound:
selections['exchange'] = render_template(
templatefile="subtemplates/exchange_generic.j2",
arguments=selections
)
config_text = render_template(templatefile='base_config.json.j2',
arguments=selections)
logger.info(f"Writing config to `{config_path}`.")
config_path.write_text(config_text)
def start_new_config(args: Dict[str, Any]) -> None:
"""
Create a new strategy from a template
Asking the user questions to fill out the templateaccordingly.
"""
config_path = Path(args['config'][0])
if config_path.exists():
overwrite = ask_user_overwrite(config_path)
if overwrite:
config_path.unlink()
else:
raise OperationalException(
f"Configuration file `{config_path}` already exists. "
"Please delete it or use a different configuration file name.")
selections = ask_user_config()
deploy_new_config(config_path, selections)

View File

@ -1,7 +1,7 @@
"""
Definition of cli arguments used in arguments.py
"""
import argparse
from argparse import ArgumentTypeError
from freqtrade import __version__, constants
@ -12,7 +12,7 @@ def check_int_positive(value: str) -> int:
if uint <= 0:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
raise ArgumentTypeError(
f"{value} is invalid for this parameter, should be a positive integer value"
)
return uint
@ -24,7 +24,7 @@ def check_int_nonzero(value: str) -> int:
if uint == 0:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
raise ArgumentTypeError(
f"{value} is invalid for this parameter, should be a non-zero integer value"
)
return uint
@ -59,7 +59,8 @@ AVAILABLE_CLI_OPTIONS = {
),
"config": Arg(
'-c', '--config',
help=f'Specify configuration file (default: `{constants.DEFAULT_CONFIG}`). '
help=f'Specify configuration file (default: `userdir/{constants.DEFAULT_CONFIG}` '
f'or `config.json` whichever exists). '
f'Multiple --config options may be used. '
f'Can be set to `-` to read config from stdin.',
action='append',
@ -118,14 +119,14 @@ AVAILABLE_CLI_OPTIONS = {
help='Specify what timerange of data to use.',
),
"max_open_trades": Arg(
'--max_open_trades',
help='Specify max_open_trades to use.',
'--max-open-trades',
help='Override the value of the `max_open_trades` configuration setting.',
type=int,
metavar='INT',
),
"stake_amount": Arg(
'--stake_amount',
help='Specify stake_amount.',
'--stake-amount',
help='Override the value of the `stake_amount` configuration setting.',
type=float,
),
# Backtesting
@ -216,10 +217,17 @@ AVAILABLE_CLI_OPTIONS = {
),
"print_json": Arg(
'--print-json',
help='Print best result detailization in JSON format.',
help='Print output in JSON format.',
action='store_true',
default=False,
),
"export_csv": Arg(
'--export-csv',
help='Export to CSV-File.'
' This will disable table print.'
' Example: --export-csv hyperopt.csv',
metavar='FILE',
),
"hyperopt_jobs": Arg(
'-j', '--job-workers',
help='The number of concurrently running jobs for hyperoptimization '
@ -256,7 +264,8 @@ AVAILABLE_CLI_OPTIONS = {
help='Specify the class name of the hyperopt loss function class (IHyperOptLoss). '
'Different functions can generate completely different results, '
'since the target for optimization is different. Built-in Hyperopt-loss-functions are: '
'DefaultHyperOptLoss, OnlyProfitHyperOptLoss, SharpeHyperOptLoss.'
'DefaultHyperOptLoss, OnlyProfitHyperOptLoss, SharpeHyperOptLoss, SharpeHyperOptLossDaily, '
'SortinoHyperOptLoss, SortinoHyperOptLossDaily.'
'(default: `%(default)s`).',
metavar='NAME',
default=constants.DEFAULT_HYPEROPT_LOSS,
@ -332,6 +341,30 @@ AVAILABLE_CLI_OPTIONS = {
'desired timeframe as specified as --timeframes/-t.',
action='store_true',
),
"format_from": Arg(
'--format-from',
help='Source format for data conversion.',
choices=constants.AVAILABLE_DATAHANDLERS,
required=True,
),
"format_to": Arg(
'--format-to',
help='Destination format for data conversion.',
choices=constants.AVAILABLE_DATAHANDLERS,
required=True,
),
"dataformat_ohlcv": Arg(
'--data-format-ohlcv',
help='Storage format for downloaded candle (OHLCV) data. (default: `%(default)s`).',
choices=constants.AVAILABLE_DATAHANDLERS,
default='json'
),
"dataformat_trades": Arg(
'--data-format-trades',
help='Storage format for downloaded trades data. (default: `%(default)s`).',
choices=constants.AVAILABLE_DATAHANDLERS,
default='jsongz'
),
"exchange": Arg(
'--exchange',
help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). '
@ -339,8 +372,8 @@ AVAILABLE_CLI_OPTIONS = {
),
"timeframes": Arg(
'-t', '--timeframes',
help=f'Specify which tickers to download. Space-separated list. '
f'Default: `1m 5m`.',
help='Specify which tickers to download. Space-separated list. '
'Default: `1m 5m`.',
choices=['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h',
'6h', '8h', '12h', '1d', '3d', '1w'],
default=['1m', '5m'],
@ -354,24 +387,22 @@ AVAILABLE_CLI_OPTIONS = {
# Templating options
"template": Arg(
'--template',
help='Use a template which is either `minimal` or '
'`full` (containing multiple sample indicators). Default: `%(default)s`.',
choices=['full', 'minimal'],
help='Use a template which is either `minimal`, '
'`full` (containing multiple sample indicators) or `advanced`. Default: `%(default)s`.',
choices=['full', 'minimal', 'advanced'],
default='full',
),
# Plot dataframe
"indicators1": Arg(
'--indicators1',
help='Set indicators from your strategy you want in the first row of the graph. '
'Space-separated list. Example: `ema3 ema5`. Default: `%(default)s`.',
default=['sma', 'ema3', 'ema5'],
"Space-separated list. Example: `ema3 ema5`. Default: `['sma', 'ema3', 'ema5']`.",
nargs='+',
),
"indicators2": Arg(
'--indicators2',
help='Set indicators from your strategy you want in the third row of the graph. '
'Space-separated list. Example: `fastd fastk`. Default: `%(default)s`.',
default=['macd', 'macdsignal'],
"Space-separated list. Example: `fastd fastk`. Default: `['macd', 'macdsignal']`.",
nargs='+',
),
"plot_limit": Arg(
@ -382,6 +413,11 @@ AVAILABLE_CLI_OPTIONS = {
metavar='INT',
default=750,
),
"no_trades": Arg(
'--no-trades',
help='Skip using trades from backtesting file and DB.',
action='store_true',
),
"trade_source": Arg(
'--trade-source',
help='Specify the source for trades (Can be DB or file (backtest file)) '
@ -389,6 +425,11 @@ AVAILABLE_CLI_OPTIONS = {
choices=["DB", "file"],
default="file",
),
"trade_ids": Arg(
'--trade-ids',
help='Specify the list of trade ids.',
nargs='+',
),
# hyperopt-list, hyperopt-show
"hyperopt_list_profitable": Arg(
'--profitable',
@ -400,6 +441,54 @@ AVAILABLE_CLI_OPTIONS = {
help='Select only best epochs.',
action='store_true',
),
"hyperopt_list_min_trades": Arg(
'--min-trades',
help='Select epochs with more than INT trades.',
type=check_int_positive,
metavar='INT',
),
"hyperopt_list_max_trades": Arg(
'--max-trades',
help='Select epochs with less than INT trades.',
type=check_int_positive,
metavar='INT',
),
"hyperopt_list_min_avg_time": Arg(
'--min-avg-time',
help='Select epochs on above average time.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_max_avg_time": Arg(
'--max-avg-time',
help='Select epochs on under average time.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_min_avg_profit": Arg(
'--min-avg-profit',
help='Select epochs on above average profit.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_max_avg_profit": Arg(
'--max-avg-profit',
help='Select epochs on below average profit.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_min_total_profit": Arg(
'--min-total-profit',
help='Select epochs on above total profit.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_max_total_profit": Arg(
'--max-total-profit',
help='Select epochs on below total profit.',
type=float,
metavar='FLOAT',
),
"hyperopt_list_no_details": Arg(
'--no-details',
help='Do not print best epoch details.',

View File

@ -0,0 +1,90 @@
import logging
import sys
from typing import Any, Dict, List
import arrow
from freqtrade.configuration import TimeRange, setup_utils_configuration
from freqtrade.data.converter import (convert_ohlcv_format,
convert_trades_format)
from freqtrade.data.history import (convert_trades_to_ohlcv,
refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from freqtrade.exceptions import OperationalException
from freqtrade.resolvers import ExchangeResolver
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def start_download_data(args: Dict[str, Any]) -> None:
"""
Download data (former download_backtest_data.py script)
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
timerange = TimeRange()
if 'days' in config:
time_since = arrow.utcnow().shift(days=-config['days']).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'pairs' not in config:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
logger.info(f'About to download pairs: {config["pairs"]}, '
f'intervals: {config["timeframes"]} to {config["datadir"]}')
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# Manual validations of relevant settings
exchange.validate_pairs(config['pairs'])
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
try:
if config.get('download_trades'):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=config["pairs"], datadir=config['datadir'],
timerange=timerange, erase=bool(config.get("erase")),
data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=config["pairs"], timeframes=config["timeframes"],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get("erase")),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=config["pairs"], timeframes=config["timeframes"],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get("erase")),
data_format=config['dataformat_ohlcv'])
except KeyboardInterrupt:
sys.exit("SIGINT received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
"""
Convert data from one format to another
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])

View File

@ -0,0 +1,139 @@
import logging
import sys
from pathlib import Path
from typing import Any, Dict
from freqtrade.configuration import setup_utils_configuration
from freqtrade.configuration.directory_operations import (copy_sample_files,
create_userdata_dir)
from freqtrade.constants import USERPATH_HYPEROPTS, USERPATH_STRATEGIES
from freqtrade.exceptions import OperationalException
from freqtrade.misc import render_template, render_template_with_fallback
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def start_create_userdir(args: Dict[str, Any]) -> None:
"""
Create "user_data" directory to contain user data strategies, hyperopt, ...)
:param args: Cli args from Arguments()
:return: None
"""
if "user_data_dir" in args and args["user_data_dir"]:
userdir = create_userdata_dir(args["user_data_dir"], create_dir=True)
copy_sample_files(userdir, overwrite=args["reset"])
else:
logger.warning("`create-userdir` requires --userdir to be set.")
sys.exit(1)
def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: str) -> None:
"""
Deploy new strategy from template to strategy_path
"""
fallback = 'full'
indicators = render_template_with_fallback(
templatefile=f"subtemplates/indicators_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/indicators_{fallback}.j2",
)
buy_trend = render_template_with_fallback(
templatefile=f"subtemplates/buy_trend_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/buy_trend_{fallback}.j2",
)
sell_trend = render_template_with_fallback(
templatefile=f"subtemplates/sell_trend_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/sell_trend_{fallback}.j2",
)
plot_config = render_template_with_fallback(
templatefile=f"subtemplates/plot_config_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/plot_config_{fallback}.j2",
)
additional_methods = render_template_with_fallback(
templatefile=f"subtemplates/strategy_methods_{subtemplate}.j2",
templatefallbackfile="subtemplates/strategy_methods_empty.j2",
)
strategy_text = render_template(templatefile='base_strategy.py.j2',
arguments={"strategy": strategy_name,
"indicators": indicators,
"buy_trend": buy_trend,
"sell_trend": sell_trend,
"plot_config": plot_config,
"additional_methods": additional_methods,
})
logger.info(f"Writing strategy to `{strategy_path}`.")
strategy_path.write_text(strategy_text)
def start_new_strategy(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
if args["strategy"] == "DefaultStrategy":
raise OperationalException("DefaultStrategy is not allowed as name.")
new_path = config['user_data_dir'] / USERPATH_STRATEGIES / (args["strategy"] + ".py")
if new_path.exists():
raise OperationalException(f"`{new_path}` already exists. "
"Please choose another Strategy Name.")
deploy_new_strategy(args['strategy'], new_path, args['template'])
else:
raise OperationalException("`new-strategy` requires --strategy to be set.")
def deploy_new_hyperopt(hyperopt_name: str, hyperopt_path: Path, subtemplate: str) -> None:
"""
Deploys a new hyperopt template to hyperopt_path
"""
fallback = 'full'
buy_guards = render_template_with_fallback(
templatefile=f"subtemplates/hyperopt_buy_guards_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/hyperopt_buy_guards_{fallback}.j2",
)
sell_guards = render_template_with_fallback(
templatefile=f"subtemplates/hyperopt_sell_guards_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/hyperopt_sell_guards_{fallback}.j2",
)
buy_space = render_template_with_fallback(
templatefile=f"subtemplates/hyperopt_buy_space_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/hyperopt_buy_space_{fallback}.j2",
)
sell_space = render_template_with_fallback(
templatefile=f"subtemplates/hyperopt_sell_space_{subtemplate}.j2",
templatefallbackfile=f"subtemplates/hyperopt_sell_space_{fallback}.j2",
)
strategy_text = render_template(templatefile='base_hyperopt.py.j2',
arguments={"hyperopt": hyperopt_name,
"buy_guards": buy_guards,
"sell_guards": sell_guards,
"buy_space": buy_space,
"sell_space": sell_space,
})
logger.info(f"Writing hyperopt to `{hyperopt_path}`.")
hyperopt_path.write_text(strategy_text)
def start_new_hyperopt(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "hyperopt" in args and args["hyperopt"]:
if args["hyperopt"] == "DefaultHyperopt":
raise OperationalException("DefaultHyperopt is not allowed as name.")
new_path = config['user_data_dir'] / USERPATH_HYPEROPTS / (args["hyperopt"] + ".py")
if new_path.exists():
raise OperationalException(f"`{new_path}` already exists. "
"Please choose another Strategy Name.")
deploy_new_hyperopt(args['hyperopt'], new_path, args['template'])
else:
raise OperationalException("`new-hyperopt` requires --hyperopt to be set.")

View File

@ -0,0 +1,182 @@
import logging
from operator import itemgetter
from typing import Any, Dict, List
from colorama import init as colorama_init
from freqtrade.configuration import setup_utils_configuration
from freqtrade.exceptions import OperationalException
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def start_hyperopt_list(args: Dict[str, Any]) -> None:
"""
List hyperopt epochs previously evaluated
"""
from freqtrade.optimize.hyperopt import Hyperopt
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_colorized = config.get('print_colorized', False)
print_json = config.get('print_json', False)
export_csv = config.get('export_csv', None)
no_details = config.get('hyperopt_list_no_details', False)
no_header = False
filteroptions = {
'only_best': config.get('hyperopt_list_best', False),
'only_profitable': config.get('hyperopt_list_profitable', False),
'filter_min_trades': config.get('hyperopt_list_min_trades', 0),
'filter_max_trades': config.get('hyperopt_list_max_trades', 0),
'filter_min_avg_time': config.get('hyperopt_list_min_avg_time', None),
'filter_max_avg_time': config.get('hyperopt_list_max_avg_time', None),
'filter_min_avg_profit': config.get('hyperopt_list_min_avg_profit', None),
'filter_max_avg_profit': config.get('hyperopt_list_max_avg_profit', None),
'filter_min_total_profit': config.get('hyperopt_list_min_total_profit', None),
'filter_max_total_profit': config.get('hyperopt_list_max_total_profit', None)
}
results_file = (config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
# Previous evaluations
epochs = Hyperopt.load_previous_results(results_file)
total_epochs = len(epochs)
epochs = _hyperopt_filter_epochs(epochs, filteroptions)
if print_colorized:
colorama_init(autoreset=True)
if not export_csv:
try:
print(Hyperopt.get_result_table(config, epochs, total_epochs,
not filteroptions['only_best'], print_colorized, 0))
except KeyboardInterrupt:
print('User interrupted..')
if epochs and not no_details:
sorted_epochs = sorted(epochs, key=itemgetter('loss'))
results = sorted_epochs[0]
Hyperopt.print_epoch_details(results, total_epochs, print_json, no_header)
if epochs and export_csv:
Hyperopt.export_csv_file(
config, epochs, total_epochs, not filteroptions['only_best'], export_csv
)
def start_hyperopt_show(args: Dict[str, Any]) -> None:
"""
Show details of a hyperopt epoch previously evaluated
"""
from freqtrade.optimize.hyperopt import Hyperopt
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_json = config.get('print_json', False)
no_header = config.get('hyperopt_show_no_header', False)
results_file = (config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
n = config.get('hyperopt_show_index', -1)
filteroptions = {
'only_best': config.get('hyperopt_list_best', False),
'only_profitable': config.get('hyperopt_list_profitable', False),
'filter_min_trades': config.get('hyperopt_list_min_trades', 0),
'filter_max_trades': config.get('hyperopt_list_max_trades', 0),
'filter_min_avg_time': config.get('hyperopt_list_min_avg_time', None),
'filter_max_avg_time': config.get('hyperopt_list_max_avg_time', None),
'filter_min_avg_profit': config.get('hyperopt_list_min_avg_profit', None),
'filter_max_avg_profit': config.get('hyperopt_list_max_avg_profit', None),
'filter_min_total_profit': config.get('hyperopt_list_min_total_profit', None),
'filter_max_total_profit': config.get('hyperopt_list_max_total_profit', None)
}
# Previous evaluations
epochs = Hyperopt.load_previous_results(results_file)
total_epochs = len(epochs)
epochs = _hyperopt_filter_epochs(epochs, filteroptions)
filtered_epochs = len(epochs)
if n > filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be less than {filtered_epochs + 1}.")
if n < -filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be greater than {-filtered_epochs - 1}.")
# Translate epoch index from human-readable format to pythonic
if n > 0:
n -= 1
if epochs:
val = epochs[n]
Hyperopt.print_epoch_details(val, total_epochs, print_json, no_header,
header_str="Epoch details")
def _hyperopt_filter_epochs(epochs: List, filteroptions: dict) -> List:
"""
Filter our items from the list of hyperopt results
"""
if filteroptions['only_best']:
epochs = [x for x in epochs if x['is_best']]
if filteroptions['only_profitable']:
epochs = [x for x in epochs if x['results_metrics']['profit'] > 0]
if filteroptions['filter_min_trades'] > 0:
epochs = [
x for x in epochs
if x['results_metrics']['trade_count'] > filteroptions['filter_min_trades']
]
if filteroptions['filter_max_trades'] > 0:
epochs = [
x for x in epochs
if x['results_metrics']['trade_count'] < filteroptions['filter_max_trades']
]
if filteroptions['filter_min_avg_time'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['duration'] > filteroptions['filter_min_avg_time']
]
if filteroptions['filter_max_avg_time'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['duration'] < filteroptions['filter_max_avg_time']
]
if filteroptions['filter_min_avg_profit'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['avg_profit'] > filteroptions['filter_min_avg_profit']
]
if filteroptions['filter_max_avg_profit'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['avg_profit'] < filteroptions['filter_max_avg_profit']
]
if filteroptions['filter_min_total_profit'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['profit'] > filteroptions['filter_min_total_profit']
]
if filteroptions['filter_max_total_profit'] is not None:
epochs = [x for x in epochs if x['results_metrics']['trade_count'] > 0]
epochs = [
x for x in epochs
if x['results_metrics']['profit'] < filteroptions['filter_max_total_profit']
]
logger.info(f"{len(epochs)} " +
("best " if filteroptions['only_best'] else "") +
("profitable " if filteroptions['only_profitable'] else "") +
"epochs found.")
return epochs

View File

@ -0,0 +1,226 @@
import csv
import logging
import sys
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List
from colorama import init as colorama_init
from colorama import Fore, Style
import rapidjson
from tabulate import tabulate
from freqtrade.configuration import setup_utils_configuration
from freqtrade.constants import USERPATH_HYPEROPTS, USERPATH_STRATEGIES
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import (available_exchanges, ccxt_exchanges,
market_is_active, symbol_is_pair)
from freqtrade.misc import plural
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def start_list_exchanges(args: Dict[str, Any]) -> None:
"""
Print available exchanges
:param args: Cli args from Arguments()
:return: None
"""
exchanges = ccxt_exchanges() if args['list_exchanges_all'] else available_exchanges()
if args['print_one_column']:
print('\n'.join(exchanges))
else:
if args['list_exchanges_all']:
print(f"All exchanges supported by the ccxt library: {', '.join(exchanges)}")
else:
print(f"Exchanges available for Freqtrade: {', '.join(exchanges)}")
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
if print_colorized:
colorama_init(autoreset=True)
red = Fore.RED
yellow = Fore.YELLOW
reset = Style.RESET_ALL
else:
red = ''
yellow = ''
reset = ''
names = [s['name'] for s in objs]
objss_to_print = [{
'name': s['name'] if s['name'] else "--",
'location': s['location'].name,
'status': (red + "LOAD FAILED" + reset if s['class'] is None
else "OK" if names.count(s['name']) == 1
else yellow + "DUPLICATE NAME" + reset)
} for s in objs]
print(tabulate(objss_to_print, headers='keys', tablefmt='psql', stralign='right'))
def start_list_strategies(args: Dict[str, Any]) -> None:
"""
Print files with Strategy custom classes available in the directory
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
directory = Path(config.get('strategy_path', config['user_data_dir'] / USERPATH_STRATEGIES))
strategy_objs = StrategyResolver.search_all_objects(directory, not args['print_one_column'])
# Sort alphabetically
strategy_objs = sorted(strategy_objs, key=lambda x: x['name'])
if args['print_one_column']:
print('\n'.join([s['name'] for s in strategy_objs]))
else:
_print_objs_tabular(strategy_objs, config.get('print_colorized', False))
def start_list_hyperopts(args: Dict[str, Any]) -> None:
"""
Print files with HyperOpt custom classes available in the directory
"""
from freqtrade.resolvers.hyperopt_resolver import HyperOptResolver
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
directory = Path(config.get('hyperopt_path', config['user_data_dir'] / USERPATH_HYPEROPTS))
hyperopt_objs = HyperOptResolver.search_all_objects(directory, not args['print_one_column'])
# Sort alphabetically
hyperopt_objs = sorted(hyperopt_objs, key=lambda x: x['name'])
if args['print_one_column']:
print('\n'.join([s['name'] for s in hyperopt_objs]))
else:
_print_objs_tabular(hyperopt_objs, config.get('print_colorized', False))
def start_list_timeframes(args: Dict[str, Any]) -> None:
"""
Print ticker intervals (timeframes) available on Exchange
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Do not use ticker_interval set in the config
config['ticker_interval'] = None
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
if args['print_one_column']:
print('\n'.join(exchange.timeframes))
else:
print(f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}")
def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
"""
Print pairs/markets on the exchange
:param args: Cli args from Arguments()
:param pairs_only: if True print only pairs, otherwise print all instruments (markets)
:return: None
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False)
base_currencies = args.get('base_currencies', [])
quote_currencies = args.get('quote_currencies', [])
try:
pairs = exchange.get_markets(base_currencies=base_currencies,
quote_currencies=quote_currencies,
pairs_only=pairs_only,
active_only=active_only)
# Sort the pairs/markets by symbol
pairs = OrderedDict(sorted(pairs.items()))
except Exception as e:
raise OperationalException(f"Cannot get markets. Reason: {e}") from e
else:
summary_str = ((f"Exchange {exchange.name} has {len(pairs)} ") +
("active " if active_only else "") +
(plural(len(pairs), "pair" if pairs_only else "market")) +
(f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies else "") +
(" and" if base_currencies and quote_currencies else "") +
(f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies else ""))
headers = ["Id", "Symbol", "Base", "Quote", "Active",
*(['Is pair'] if not pairs_only else [])]
tabular_data = []
for _, v in pairs.items():
tabular_data.append({'Id': v['id'], 'Symbol': v['symbol'],
'Base': v['base'], 'Quote': v['quote'],
'Active': market_is_active(v),
**({'Is pair': symbol_is_pair(v['symbol'])}
if not pairs_only else {})})
if (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
# Print summary string in the log in case of machine-readable
# regular formats.
logger.info(f"{summary_str}.")
else:
# Print empty string separating leading logs and output in case of
# human-readable formats.
print()
if len(pairs):
if args.get('print_list', False):
# print data as a list, with human-readable summary
print(f"{summary_str}: {', '.join(pairs.keys())}.")
elif args.get('print_one_column', False):
print('\n'.join(pairs.keys()))
elif args.get('list_pairs_print_json', False):
print(rapidjson.dumps(list(pairs.keys()), default=str))
elif args.get('print_csv', False):
writer = csv.DictWriter(sys.stdout, fieldnames=headers)
writer.writeheader()
writer.writerows(tabular_data)
else:
# print data as a table, with the human-readable summary
print(f"{summary_str}:")
print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right'))
elif not (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
print(f"{summary_str}.")
def start_show_trades(args: Dict[str, Any]) -> None:
"""
Show trades
"""
from freqtrade.persistence import init, Trade
import json
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if 'db_url' not in config:
raise OperationalException("--db-url is required for this command.")
logger.info(f'Using DB: "{config["db_url"]}"')
init(config['db_url'], clean_open_orders=False)
tfilter = []
if config.get('trade_ids'):
tfilter.append(Trade.id.in_(config['trade_ids']))
trades = Trade.get_trades(tfilter).all()
logger.info(f"Printing {len(trades)} Trades: ")
if config.get('print_json', False):
print(json.dumps([trade.to_json() for trade in trades], indent=4))
else:
for trade in trades:
print(trade)

View File

@ -0,0 +1,107 @@
import logging
from typing import Any, Dict
from freqtrade import constants
from freqtrade.configuration import setup_utils_configuration
from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def setup_optimize_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
"""
Prepare the configuration for the Hyperopt module
:param args: Cli args from Arguments()
:return: Configuration
"""
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.HYPEROPT: 'hyperoptimization',
}
if (method in no_unlimited_runmodes.keys() and
config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT):
raise DependencyException(
f'The value of `stake_amount` cannot be set as "{constants.UNLIMITED_STAKE_AMOUNT}" '
f'for {no_unlimited_runmodes[method]}')
return config
def start_backtesting(args: Dict[str, Any]) -> None:
"""
Start Backtesting script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading backtesting module when it's not used
from freqtrade.optimize.backtesting import Backtesting
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in Backtesting mode')
# Initialize backtesting object
backtesting = Backtesting(config)
backtesting.start()
def start_hyperopt(args: Dict[str, Any]) -> None:
"""
Start hyperopt script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading hyperopt module when it's not used
try:
from filelock import FileLock, Timeout
from freqtrade.optimize.hyperopt import Hyperopt
except ImportError as e:
raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.HYPEROPT)
logger.info('Starting freqtrade in Hyperopt mode')
lock = FileLock(Hyperopt.get_lock_filename(config))
try:
with lock.acquire(timeout=1):
# Remove noisy log messages
logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
logging.getLogger('filelock').setLevel(logging.WARNING)
# Initialize backtesting object
hyperopt = Hyperopt(config)
hyperopt.start()
except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.")
logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines.")
logger.info("Quitting now.")
# TODO: return False here in order to help freqtrade to exit
# with non-zero exit code...
# Same in Edge and Backtesting start() functions.
def start_edge(args: Dict[str, Any]) -> None:
"""
Start Edge script
:param args: Cli args from Arguments()
:return: None
"""
from freqtrade.optimize.edge_cli import EdgeCli
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.EDGE)
logger.info('Starting freqtrade in Edge mode')
# Initialize Edge object
edge_cli = EdgeCli(config)
edge_cli.start()

View File

@ -0,0 +1,42 @@
import logging
from typing import Any, Dict
import rapidjson
from freqtrade.configuration import setup_utils_configuration
from freqtrade.resolvers import ExchangeResolver
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def start_test_pairlist(args: Dict[str, Any]) -> None:
"""
Test Pairlist configuration
"""
from freqtrade.pairlist.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
quote_currencies = args.get('quote_currencies')
if not quote_currencies:
quote_currencies = [config.get('stake_currency')]
results = {}
for curr in quote_currencies:
config['stake_currency'] = curr
# Do not use ticker_interval set in the config
pairlists = PairListManager(exchange, config)
pairlists.refresh_pairlist()
results[curr] = pairlists.whitelist
for curr, pairlist in results.items():
if not args.get('print_one_column', False):
print(f"Pairs for {curr}: ")
if args.get('print_one_column', False):
print('\n'.join(pairlist))
elif args.get('list_pairs_print_json', False):
print(rapidjson.dumps(list(pairlist), default=str))
else:
print(pairlist)

View File

@ -1,11 +1,11 @@
from typing import Any, Dict
from freqtrade import OperationalException
from freqtrade.configuration import setup_utils_configuration
from freqtrade.exceptions import OperationalException
from freqtrade.state import RunMode
from freqtrade.utils import setup_utils_configuration
def validate_plot_args(args: Dict[str, Any]):
def validate_plot_args(args: Dict[str, Any]) -> None:
if not args.get('datadir') and not args.get('config'):
raise OperationalException(
"You need to specify either `--datadir` or `--config` "

View File

@ -0,0 +1,30 @@
import logging
from typing import Any, Dict
logger = logging.getLogger(__name__)
def start_trading(args: Dict[str, Any]) -> int:
"""
Main entry point for trading mode
"""
# Import here to avoid loading worker module when it's not used
from freqtrade.worker import Worker
# Create and run worker
worker = None
try:
worker = Worker(args)
worker.run()
except Exception as e:
logger.error(str(e))
logger.exception("Fatal exception!")
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
finally:
if worker:
logger.info("worker found ... calling exit")
worker.exit()
return 0

View File

@ -1,5 +1,7 @@
from freqtrade.configuration.arguments import Arguments # noqa: F401
from freqtrade.configuration.check_exchange import check_exchange, remove_credentials # noqa: F401
from freqtrade.configuration.timerange import TimeRange # noqa: F401
from freqtrade.configuration.configuration import Configuration # noqa: F401
from freqtrade.configuration.config_validation import validate_config_consistency # noqa: F401
# flake8: noqa: F401
from freqtrade.configuration.config_setup import setup_utils_configuration
from freqtrade.configuration.check_exchange import check_exchange, remove_credentials
from freqtrade.configuration.timerange import TimeRange
from freqtrade.configuration.configuration import Configuration
from freqtrade.configuration.config_validation import validate_config_consistency

View File

@ -1,16 +1,16 @@
import logging
from typing import Any, Dict
from freqtrade import OperationalException
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,
is_exchange_known_ccxt, is_exchange_bad,
is_exchange_bad, is_exchange_known_ccxt,
is_exchange_officially_supported)
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def remove_credentials(config: Dict[str, Any]):
def remove_credentials(config: Dict[str, Any]) -> None:
"""
Removes exchange keys from the configuration and specifies dry-run
Used for backtesting / hyperopt / edge and utils.

View File

@ -0,0 +1,25 @@
import logging
from typing import Any, Dict
from .config_validation import validate_config_consistency
from .configuration import Configuration
from .check_exchange import remove_credentials
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
"""
Prepare the configuration for utils subcommands
:param args: Cli args from Arguments()
:return: Configuration
"""
configuration = Configuration(args, method)
config = configuration.get_config()
# Ensure we do not use Exchange credentials
remove_credentials(config)
validate_config_consistency(config)
return config

View File

@ -1,10 +1,12 @@
import logging
from copy import deepcopy
from typing import Any, Dict
from jsonschema import Draft4Validator, validators
from jsonschema.exceptions import ValidationError, best_match
from freqtrade import constants, OperationalException
from freqtrade import constants
from freqtrade.exceptions import OperationalException
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
@ -41,15 +43,20 @@ def validate_config_schema(conf: Dict[str, Any]) -> Dict[str, Any]:
:param conf: Config in JSON format
:return: Returns the config if valid, otherwise throw an exception
"""
conf_schema = deepcopy(constants.CONF_SCHEMA)
if conf.get('runmode', RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE):
conf_schema['required'] = constants.SCHEMA_TRADE_REQUIRED
else:
conf_schema['required'] = constants.SCHEMA_MINIMAL_REQUIRED
try:
FreqtradeValidator(constants.CONF_SCHEMA).validate(conf)
FreqtradeValidator(conf_schema).validate(conf)
return conf
except ValidationError as e:
logger.critical(
f"Invalid configuration. See config.json.example. Reason: {e}"
)
raise ValidationError(
best_match(Draft4Validator(constants.CONF_SCHEMA).iter_errors(conf)).message
best_match(Draft4Validator(conf_schema).iter_errors(conf)).message
)
@ -66,12 +73,24 @@ def validate_config_consistency(conf: Dict[str, Any]) -> None:
_validate_trailing_stoploss(conf)
_validate_edge(conf)
_validate_whitelist(conf)
_validate_unlimited_amount(conf)
# validate configuration before returning
logger.info('Validating configuration ...')
validate_config_schema(conf)
def _validate_unlimited_amount(conf: Dict[str, Any]) -> None:
"""
If edge is disabled, either max_open_trades or stake_amount need to be set.
:raise: OperationalException if config validation failed
"""
if (not conf.get('edge', {}).get('enabled')
and conf.get('max_open_trades') == float('inf')
and conf.get('stake_amount') == constants.UNLIMITED_STAKE_AMOUNT):
raise OperationalException("`max_open_trades` and `stake_amount` cannot both be unlimited.")
def _validate_trailing_stoploss(conf: Dict[str, Any]) -> None:
if conf.get('stoploss') == 0.0:
@ -131,15 +150,3 @@ def _validate_whitelist(conf: Dict[str, Any]) -> None:
if (pl.get('method') == 'StaticPairList'
and not conf.get('exchange', {}).get('pair_whitelist')):
raise OperationalException("StaticPairList requires pair_whitelist to be set.")
if pl.get('method') == 'StaticPairList':
stake = conf['stake_currency']
invalid_pairs = []
for pair in conf['exchange'].get('pair_whitelist'):
if not pair.endswith(f'/{stake}'):
invalid_pairs.append(pair)
if invalid_pairs:
raise OperationalException(
f"Stake-currency '{stake}' not compatible with pair-whitelist. "
f"Please remove the following pairs: {invalid_pairs}")

View File

@ -7,15 +7,16 @@ from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
from freqtrade import OperationalException, constants
from freqtrade import constants
from freqtrade.configuration.check_exchange import check_exchange
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
from freqtrade.configuration.directory_operations import (create_datadir,
create_userdata_dir)
from freqtrade.configuration.load_config import load_config_file
from freqtrade.exceptions import OperationalException
from freqtrade.loggers import setup_logging
from freqtrade.misc import deep_merge_dicts, json_load
from freqtrade.state import RunMode, TRADING_MODES, NON_UTIL_MODES
from freqtrade.state import NON_UTIL_MODES, TRADING_MODES, RunMode
logger = logging.getLogger(__name__)
@ -95,6 +96,8 @@ class Configuration:
# Keep a copy of the original configuration file
config['original_config'] = deepcopy(config)
self._process_logging_options(config)
self._process_runmode(config)
self._process_common_options(config)
@ -145,8 +148,6 @@ class Configuration:
def _process_common_options(self, config: Dict[str, Any]) -> None:
self._process_logging_options(config)
# Set strategy if not specified in config and or if it's non default
if self.args.get("strategy") or not config.get('strategy'):
config.update({'strategy': self.args.get("strategy")})
@ -166,10 +167,6 @@ class Configuration:
if 'sd_notify' in self.args and self.args["sd_notify"]:
config['internals'].update({'sd_notify': True})
self._args_to_config(config, argname='dry_run',
logstring='Parameter --dry-run detected, '
'overriding dry_run to: {} ...')
def _process_datadir_options(self, config: Dict[str, Any]) -> None:
"""
Extract information for sys.argv and load directory configurations
@ -199,6 +196,7 @@ class Configuration:
if self.args.get('exportfilename'):
self._args_to_config(config, argname='exportfilename',
logstring='Storing backtest results to {} ...')
config['exportfilename'] = Path(config['exportfilename'])
else:
config['exportfilename'] = (config['user_data_dir']
/ 'backtest_results/backtest-result.json')
@ -223,13 +221,13 @@ class Configuration:
logger.info('max_open_trades set to unlimited ...')
elif 'max_open_trades' in self.args and self.args["max_open_trades"]:
config.update({'max_open_trades': self.args["max_open_trades"]})
logger.info('Parameter --max_open_trades detected, '
logger.info('Parameter --max-open-trades detected, '
'overriding max_open_trades to: %s ...', config.get('max_open_trades'))
elif config['runmode'] in NON_UTIL_MODES:
logger.info('Using max_open_trades: %s ...', config.get('max_open_trades'))
self._args_to_config(config, argname='stake_amount',
logstring='Parameter --stake_amount detected, '
logstring='Parameter --stake-amount detected, '
'overriding stake_amount to: {} ...')
self._args_to_config(config, argname='fee',
@ -285,6 +283,9 @@ class Configuration:
self._args_to_config(config, argname='print_json',
logstring='Parameter --print-json detected ...')
self._args_to_config(config, argname='export_csv',
logstring='Parameter --export-csv detected: {}')
self._args_to_config(config, argname='hyperopt_jobs',
logstring='Parameter -j/--job-workers detected: {}')
@ -309,6 +310,30 @@ class Configuration:
self._args_to_config(config, argname='hyperopt_list_profitable',
logstring='Parameter --profitable detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_trades',
logstring='Parameter --min-trades detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_trades',
logstring='Parameter --max-trades detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_avg_time',
logstring='Parameter --min-avg-time detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_avg_time',
logstring='Parameter --max-avg-time detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_avg_profit',
logstring='Parameter --min-avg-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_avg_profit',
logstring='Parameter --max-avg-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_total_profit',
logstring='Parameter --min-total-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_total_profit',
logstring='Parameter --max-total-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_no_details',
logstring='Parameter --no-details detected: {}')
@ -326,28 +351,46 @@ class Configuration:
self._args_to_config(config, argname='indicators2',
logstring='Using indicators2: {}')
self._args_to_config(config, argname='trade_ids',
logstring='Filtering on trade_ids: {}')
self._args_to_config(config, argname='plot_limit',
logstring='Limiting plot to: {}')
self._args_to_config(config, argname='trade_source',
logstring='Using trades from: {}')
self._args_to_config(config, argname='erase',
logstring='Erase detected. Deleting existing data.')
self._args_to_config(config, argname='no_trades',
logstring='Parameter --no-trades detected.')
self._args_to_config(config, argname='timeframes',
logstring='timeframes --timeframes: {}')
self._args_to_config(config, argname='days',
logstring='Detected --days: {}')
self._args_to_config(config, argname='download_trades',
logstring='Detected --dl-trades: {}')
self._args_to_config(config, argname='dataformat_ohlcv',
logstring='Using "{}" to store OHLCV data.')
self._args_to_config(config, argname='dataformat_trades',
logstring='Using "{}" to store trades data.')
def _process_runmode(self, config: Dict[str, Any]) -> None:
self._args_to_config(config, argname='dry_run',
logstring='Parameter --dry-run detected, '
'overriding dry_run to: {} ...')
if not self.runmode:
# Handle real mode, infer dry/live from config
self.runmode = RunMode.DRY_RUN if config.get('dry_run', True) else RunMode.LIVE
logger.info(f"Runmode set to {self.runmode}.")
logger.info(f"Runmode set to {self.runmode.value}.")
config.update({'runmode': self.runmode})
@ -403,7 +446,7 @@ class Configuration:
config['pairs'] = config.get('exchange', {}).get('pair_whitelist')
else:
# Fall back to /dl_path/pairs.json
pairs_file = Path(config['datadir']) / "pairs.json"
pairs_file = config['datadir'] / "pairs.json"
if pairs_file.exists():
with pairs_file.open('r') as f:
config['pairs'] = json_load(f)

View File

@ -5,7 +5,7 @@ Functions to handle deprecated settings
import logging
from typing import Any, Dict
from freqtrade import OperationalException
from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
def check_conflicting_settings(config: Dict[str, Any],
section1: str, name1: str,
section2: str, name2: str):
section2: str, name2: str) -> None:
section1_config = config.get(section1, {})
section2_config = config.get(section2, {})
if name1 in section1_config and name2 in section2_config:
@ -28,7 +28,7 @@ def check_conflicting_settings(config: Dict[str, Any],
def process_deprecated_setting(config: Dict[str, Any],
section1: str, name1: str,
section2: str, name2: str):
section2: str, name2: str) -> None:
section2_config = config.get(section2, {})
if name2 in section2_config:
@ -58,25 +58,12 @@ def process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:
process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
'experimental', 'ignore_roi_if_buy_signal')
if not config.get('pairlists') and not config.get('pairlists'):
config['pairlists'] = [{'method': 'StaticPairList'}]
if (config.get('edge', {}).get('enabled', False)
and 'capital_available_percentage' in config.get('edge', {})):
logger.warning(
"DEPRECATED: "
"Pairlists must be defined explicitly in the future."
"Defaulting to StaticPairList for now.")
if config.get('pairlist', {}).get("method") == 'VolumePairList':
logger.warning(
"DEPRECATED: "
f"Using VolumePairList in pairlist is deprecated and must be moved to pairlists. "
"Please refer to the docs on configuration details")
pl = {'method': 'VolumePairList'}
pl.update(config.get('pairlist', {}).get('config'))
config['pairlists'].append(pl)
if config.get('pairlist', {}).get('config', {}).get('precision_filter'):
logger.warning(
"DEPRECATED: "
f"Using precision_filter setting is deprecated and has been replaced by"
"PrecisionFilter. Please refer to the docs on configuration details")
config['pairlists'].append({'method': 'PrecisionFilter'})
"Using 'edge.capital_available_percentage' has been deprecated in favor of "
"'tradable_balance_ratio'. Please migrate your configuration to "
"'tradable_balance_ratio' and remove 'capital_available_percentage' "
"from the edge configuration."
)

View File

@ -3,13 +3,13 @@ import shutil
from pathlib import Path
from typing import Any, Dict, Optional
from freqtrade import OperationalException
from freqtrade.exceptions import OperationalException
from freqtrade.constants import USER_DATA_FILES
logger = logging.getLogger(__name__)
def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> str:
def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:
folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
if not datadir:
@ -20,10 +20,10 @@ def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> str
if not folder.is_dir():
folder.mkdir(parents=True)
logger.info(f'Created data directory: {datadir}')
return str(folder)
return folder
def create_userdata_dir(directory: str, create_dir=False) -> Path:
def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
"""
Create userdata directory structure.
if create_dir is True, then the parent-directory will be created if it does not exist.
@ -33,8 +33,8 @@ def create_userdata_dir(directory: str, create_dir=False) -> Path:
:param create_dir: Create directory if it does not exist.
:return: Path object containing the directory
"""
sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks",
"plot", "strategies", ]
sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs",
"notebooks", "plot", "strategies", ]
folder = Path(directory)
if not folder.is_dir():
if create_dir:

View File

@ -1,13 +1,15 @@
"""
This module contain functions to load the configuration file
"""
import rapidjson
import logging
import re
import sys
from pathlib import Path
from typing import Any, Dict
from freqtrade import OperationalException
import rapidjson
from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
@ -15,6 +17,26 @@ logger = logging.getLogger(__name__)
CONFIG_PARSE_MODE = rapidjson.PM_COMMENTS | rapidjson.PM_TRAILING_COMMAS
def log_config_error_range(path: str, errmsg: str) -> str:
"""
Parses configuration file and prints range around error
"""
if path != '-':
offsetlist = re.findall(r'(?<=Parse\serror\sat\soffset\s)\d+', errmsg)
if offsetlist:
offset = int(offsetlist[0])
text = Path(path).read_text()
# Fetch an offset of 80 characters around the error line
subtext = text[offset-min(80, offset):offset+80]
segments = subtext.split('\n')
if len(segments) > 3:
# Remove first and last lines, to avoid odd truncations
return '\n'.join(segments[1:-1])
else:
return subtext
return ''
def load_config_file(path: str) -> Dict[str, Any]:
"""
Loads a config file from the given path
@ -29,5 +51,12 @@ def load_config_file(path: str) -> Dict[str, Any]:
raise OperationalException(
f'Config file "{path}" not found!'
' Please create a config file or check whether it exists.')
except rapidjson.JSONDecodeError as e:
err_range = log_config_error_range(path, str(e))
raise OperationalException(
f'{e}\n'
f'Please verify the following segment of your configuration:\n{err_range}'
if err_range else 'Please verify your configuration file for syntax errors.'
)
return config

View File

@ -7,6 +7,7 @@ from typing import Optional
import arrow
logger = logging.getLogger(__name__)
@ -30,7 +31,7 @@ class TimeRange:
return (self.starttype == other.starttype and self.stoptype == other.stoptype
and self.startts == other.startts and self.stopts == other.stopts)
def subtract_start(self, seconds) -> None:
def subtract_start(self, seconds: int) -> None:
"""
Subtracts <seconds> from startts if startts is set.
:param seconds: Seconds to subtract from starttime
@ -44,7 +45,7 @@ class TimeRange:
"""
Adjust startts by <startup_candles> candles.
Applies only if no startup-candles have been available.
:param timeframe_secs: Ticker timeframe in seconds e.g. `timeframe_to_seconds('5m')`
:param timeframe_secs: Timeframe in seconds e.g. `timeframe_to_seconds('5m')`
:param startup_candles: Number of candles to move start-date forward
:param min_date: Minimum data date loaded. Key kriterium to decide if start-time
has to be moved
@ -59,7 +60,7 @@ class TimeRange:
self.starttype = 'date'
@staticmethod
def parse_timerange(text: Optional[str]):
def parse_timerange(text: Optional[str]) -> 'TimeRange':
"""
Parse the value of the argument --timerange to determine what is the range desired
:param text: value from --timerange

View File

@ -10,41 +10,43 @@ HYPEROPT_EPOCH = 100 # epochs
RETRY_TIMEOUT = 30 # sec
DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss'
DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite'
DEFAULT_DB_DRYRUN_URL = 'sqlite://'
DEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite'
UNLIMITED_STAKE_AMOUNT = 'unlimited'
DEFAULT_AMOUNT_RESERVE_PERCENT = 0.05
REQUIRED_ORDERTIF = ['buy', 'sell']
REQUIRED_ORDERTYPES = ['buy', 'sell', 'stoploss', 'stoploss_on_exchange']
ORDERBOOK_SIDES = ['ask', 'bid']
ORDERTYPE_POSSIBILITIES = ['limit', 'market']
ORDERTIF_POSSIBILITIES = ['gtc', 'fok', 'ioc']
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'PrecisionFilter', 'PriceFilter']
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList',
'PrecisionFilter', 'PriceFilter', 'SpreadFilter']
AVAILABLE_DATAHANDLERS = ['json', 'jsongz']
DRY_RUN_WALLET = 1000
MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons
DEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume']
# Don't modify sequence of DEFAULT_TRADES_COLUMNS
# it has wide consequences for stored trades files
DEFAULT_TRADES_COLUMNS = ['timestamp', 'id', 'type', 'side', 'price', 'amount', 'cost']
USERPATH_HYPEROPTS = 'hyperopts'
USERPATH_STRATEGY = 'strategies'
USERPATH_STRATEGIES = 'strategies'
USERPATH_NOTEBOOKS = 'notebooks'
# Soure files with destination directories within user-directory
USER_DATA_FILES = {
'sample_strategy.py': USERPATH_STRATEGY,
'sample_strategy.py': USERPATH_STRATEGIES,
'sample_hyperopt_advanced.py': USERPATH_HYPEROPTS,
'sample_hyperopt_loss.py': USERPATH_HYPEROPTS,
'sample_hyperopt.py': USERPATH_HYPEROPTS,
'strategy_analysis_example.ipynb': 'notebooks',
'strategy_analysis_example.ipynb': USERPATH_NOTEBOOKS,
}
TIMEFRAMES = [
'1m', '3m', '5m', '15m', '30m',
'1h', '2h', '4h', '6h', '8h', '12h',
'1d', '3d', '1w',
]
SUPPORTED_FIAT = [
"AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK",
"EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY",
"KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN",
"RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD",
"BTC", "XBT", "ETH", "XRP", "LTC", "BCH", "USDT"
"BTC", "ETH", "XRP", "LTC", "BCH"
]
MINIMAL_CONFIG = {
@ -66,16 +68,27 @@ CONF_SCHEMA = {
'type': 'object',
'properties': {
'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1},
'ticker_interval': {'type': 'string', 'enum': TIMEFRAMES},
'stake_currency': {'type': 'string', 'enum': ['BTC', 'XBT', 'ETH', 'USDT', 'EUR', 'USD']},
'ticker_interval': {'type': 'string'},
'stake_currency': {'type': 'string'},
'stake_amount': {
'type': ['number', 'string'],
'minimum': 0.0001,
'pattern': UNLIMITED_STAKE_AMOUNT
},
'tradable_balance_ratio': {
'type': 'number',
'minimum': 0.1,
'maximum': 1,
'default': 0.99
},
'amend_last_stake_amount': {'type': 'boolean', 'default': False},
'last_stake_amount_min_ratio': {
'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5
},
'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT},
'dry_run': {'type': 'boolean'},
'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET},
'cancel_open_orders_on_exit': {'type': 'boolean', 'default': False},
'process_only_new_candles': {'type': 'boolean'},
'minimal_roi': {
'type': 'object',
@ -105,15 +118,16 @@ CONF_SCHEMA = {
'minimum': 0,
'maximum': 1,
'exclusiveMaximum': False,
'use_order_book': {'type': 'boolean'},
'order_book_top': {'type': 'integer', 'maximum': 20, 'minimum': 1},
'check_depth_of_market': {
'type': 'object',
'properties': {
'enabled': {'type': 'boolean'},
'bids_to_ask_delta': {'type': 'number', 'minimum': 0},
}
},
},
'price_side': {'type': 'string', 'enum': ORDERBOOK_SIDES, 'default': 'bid'},
'use_order_book': {'type': 'boolean'},
'order_book_top': {'type': 'integer', 'maximum': 20, 'minimum': 1},
'check_depth_of_market': {
'type': 'object',
'properties': {
'enabled': {'type': 'boolean'},
'bids_to_ask_delta': {'type': 'number', 'minimum': 0},
}
},
},
'required': ['ask_last_balance']
@ -121,6 +135,7 @@ CONF_SCHEMA = {
'ask_strategy': {
'type': 'object',
'properties': {
'price_side': {'type': 'string', 'enum': ORDERBOOK_SIDES, 'default': 'ask'},
'use_order_book': {'type': 'boolean'},
'order_book_min': {'type': 'integer', 'minimum': 1},
'order_book_max': {'type': 'integer', 'minimum': 1, 'maximum': 50},
@ -185,7 +200,9 @@ CONF_SCHEMA = {
'properties': {
'enabled': {'type': 'boolean'},
'webhookbuy': {'type': 'object'},
'webhookbuycancel': {'type': 'object'},
'webhooksell': {'type': 'object'},
'webhooksellcancel': {'type': 'object'},
'webhookstatus': {'type': 'object'},
},
},
@ -209,11 +226,22 @@ CONF_SCHEMA = {
'forcebuy_enable': {'type': 'boolean'},
'internals': {
'type': 'object',
'default': {},
'properties': {
'process_throttle_secs': {'type': 'integer'},
'interval': {'type': 'integer'},
'sd_notify': {'type': 'boolean'},
}
},
'dataformat_ohlcv': {
'type': 'string',
'enum': AVAILABLE_DATAHANDLERS,
'default': 'json'
},
'dataformat_trades': {
'type': 'string',
'enum': AVAILABLE_DATAHANDLERS,
'default': 'jsongz'
}
},
'definitions': {
@ -230,7 +258,6 @@ CONF_SCHEMA = {
'type': 'array',
'items': {
'type': 'string',
'pattern': '^[0-9A-Z]+/[0-9A-Z]+$'
},
'uniqueItems': True
},
@ -238,7 +265,6 @@ CONF_SCHEMA = {
'type': 'array',
'items': {
'type': 'string',
'pattern': '^[0-9A-Z]+/[0-9A-Z]+$'
},
'uniqueItems': True
},
@ -266,19 +292,40 @@ CONF_SCHEMA = {
'max_trade_duration_minute': {'type': 'integer'},
'remove_pumps': {'type': 'boolean'}
},
'required': ['process_throttle_secs', 'allowed_risk', 'capital_available_percentage']
'required': ['process_throttle_secs', 'allowed_risk']
}
},
'required': [
'exchange',
'max_open_trades',
'stake_currency',
'stake_amount',
'dry_run',
'dry_run_wallet',
'bid_strategy',
'unfilledtimeout',
'stoploss',
'minimal_roi',
]
}
SCHEMA_TRADE_REQUIRED = [
'exchange',
'max_open_trades',
'stake_currency',
'stake_amount',
'tradable_balance_ratio',
'last_stake_amount_min_ratio',
'dry_run',
'dry_run_wallet',
'ask_strategy',
'bid_strategy',
'unfilledtimeout',
'stoploss',
'minimal_roi',
'internals',
'dataformat_ohlcv',
'dataformat_trades',
]
SCHEMA_MINIMAL_REQUIRED = [
'exchange',
'dry_run',
'dataformat_ohlcv',
'dataformat_trades',
]
CANCEL_REASON = {
"TIMEOUT": "cancelled due to timeout",
"PARTIALLY_FILLED": "partially filled - keeping order open",
"ALL_CANCELLED": "cancelled (all unfilled and partially filled open orders cancelled)",
"CANCELLED_ON_EXCHANGE": "cancelled on exchange",
}

View File

@ -3,7 +3,7 @@ Helpers when analyzing backtest data
"""
import logging
from pathlib import Path
from typing import Dict
from typing import Dict, Union, Tuple
import numpy as np
import pandas as pd
@ -20,7 +20,7 @@ BT_DATA_COLUMNS = ["pair", "profitperc", "open_time", "close_time", "index", "du
"open_rate", "close_rate", "open_at_end", "sell_reason"]
def load_backtest_data(filename) -> pd.DataFrame:
def load_backtest_data(filename: Union[Path, str]) -> pd.DataFrame:
"""
Load backtest data file.
:param filename: pathlib.Path object, or string pointing to the file.
@ -47,7 +47,7 @@ def load_backtest_data(filename) -> pd.DataFrame:
utc=True,
infer_datetime_format=True
)
df['profitabs'] = df['close_rate'] - df['open_rate']
df['profit'] = df['close_rate'] - df['open_rate']
df = df.sort_values("open_time").reset_index(drop=True)
return df
@ -111,7 +111,7 @@ def load_trades_from_db(db_url: str) -> pd.DataFrame:
t.calc_profit(), t.calc_profit_ratio(),
t.open_rate, t.close_rate, t.amount,
(round((t.close_date.timestamp() - t.open_date.timestamp()) / 60, 2)
if t.close_date else None),
if t.close_date else None),
t.sell_reason,
t.fee_open, t.fee_close,
t.open_rate_requested,
@ -129,38 +129,56 @@ def load_trades_from_db(db_url: str) -> pd.DataFrame:
return trades
def load_trades(source: str, db_url: str, exportfilename: str) -> pd.DataFrame:
def load_trades(source: str, db_url: str, exportfilename: Path,
no_trades: bool = False) -> pd.DataFrame:
"""
Based on configuration option "trade_source":
* loads data from DB (using `db_url`)
* loads data from backtestfile (using `exportfilename`)
:param source: "DB" or "file" - specify source to load from
:param db_url: sqlalchemy formatted url to a database
:param exportfilename: Json file generated by backtesting
:param no_trades: Skip using trades, only return backtesting data columns
:return: DataFrame containing trades
"""
if no_trades:
df = pd.DataFrame(columns=BT_DATA_COLUMNS)
return df
if source == "DB":
return load_trades_from_db(db_url)
elif source == "file":
return load_backtest_data(Path(exportfilename))
return load_backtest_data(exportfilename)
def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame) -> pd.DataFrame:
def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,
date_index=False) -> pd.DataFrame:
"""
Compare trades and backtested pair DataFrames to get trades performed on backtested period
:return: the DataFrame of a trades of period
"""
trades = trades.loc[(trades['open_time'] >= dataframe.iloc[0]['date']) &
(trades['close_time'] <= dataframe.iloc[-1]['date'])]
if date_index:
trades_start = dataframe.index[0]
trades_stop = dataframe.index[-1]
else:
trades_start = dataframe.iloc[0]['date']
trades_stop = dataframe.iloc[-1]['date']
trades = trades.loc[(trades['open_time'] >= trades_start) &
(trades['close_time'] <= trades_stop)]
return trades
def combine_tickers_with_mean(tickers: Dict[str, pd.DataFrame], column: str = "close"):
def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame],
column: str = "close") -> pd.DataFrame:
"""
Combine multiple dataframes "column"
:param tickers: Dict of Dataframes, dict key should be pair.
:param data: Dict of Dataframes, dict key should be pair.
:param column: Column in the original dataframes to use
:return: DataFrame with the column renamed to the dict key, and a column
named mean, containing the mean of all pairs.
"""
df_comb = pd.concat([tickers[pair].set_index('date').rename(
{column: pair}, axis=1)[pair] for pair in tickers], axis=1)
df_comb = pd.concat([data[pair].set_index('date').rename(
{column: pair}, axis=1)[pair] for pair in data], axis=1)
df_comb['mean'] = df_comb.mean(axis=1)
@ -187,3 +205,30 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
# FFill to get continuous
df[col_name] = df[col_name].ffill()
return df
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_time',
value_col: str = 'profitperc'
) -> Tuple[float, pd.Timestamp, pd.Timestamp]:
"""
Calculate max drawdown and the corresponding close dates
:param trades: DataFrame containing trades (requires columns close_time and profitperc)
:param date_col: Column in DataFrame to use for dates (defaults to 'close_time')
:param value_col: Column in DataFrame to use for values (defaults to 'profitperc')
:return: Tuple (float, highdate, lowdate) with absolute max drawdown, high and low time
:raise: ValueError if trade-dataframe was found empty.
"""
if len(trades) == 0:
raise ValueError("Trade dataframe empty.")
profit_results = trades.sort_values(date_col).reset_index(drop=True)
max_drawdown_df = pd.DataFrame()
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
idxmin = max_drawdown_df['drawdown'].idxmin()
if idxmin == 0:
raise ValueError("No losing trade, therefore no drawdown.")
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]
low_date = profit_results.loc[idxmin, date_col]
return abs(min(max_drawdown_df['drawdown'])), high_date, low_date

View File

@ -1,21 +1,27 @@
"""
Functions to convert data from one format to another
"""
import itertools
import logging
from datetime import datetime, timezone
from operator import itemgetter
from typing import Any, Dict, List
import pandas as pd
from pandas import DataFrame, to_datetime
from freqtrade.constants import (DEFAULT_DATAFRAME_COLUMNS,
DEFAULT_TRADES_COLUMNS)
logger = logging.getLogger(__name__)
def parse_ticker_dataframe(ticker: list, timeframe: str, pair: str, *,
fill_missing: bool = True,
drop_incomplete: bool = True) -> DataFrame:
def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
fill_missing: bool = True, drop_incomplete: bool = True) -> DataFrame:
"""
Converts a ticker-list (format ccxt.fetch_ohlcv) to a Dataframe
:param ticker: ticker list, as returned by exchange.async_get_candle_history
Converts a list with candle (OHLCV) data (in format returned by ccxt.fetch_ohlcv)
to a Dataframe
:param ohlcv: list with candle (OHLCV) data, as returned by exchange.async_get_candle_history
:param timeframe: timeframe (e.g. 5m). Used to fill up eventual missing data
:param pair: Pair this data is for (used to warn if fillup was necessary)
:param fill_missing: fill up missing candles with 0 candles
@ -23,23 +29,40 @@ def parse_ticker_dataframe(ticker: list, timeframe: str, pair: str, *,
:param drop_incomplete: Drop the last candle of the dataframe, assuming it's incomplete
:return: DataFrame
"""
logger.debug("Parsing tickerlist to dataframe")
cols = ['date', 'open', 'high', 'low', 'close', 'volume']
frame = DataFrame(ticker, columns=cols)
logger.debug(f"Converting candle (OHLCV) data to dataframe for pair {pair}.")
cols = DEFAULT_DATAFRAME_COLUMNS
df = DataFrame(ohlcv, columns=cols)
frame['date'] = to_datetime(frame['date'],
unit='ms',
utc=True,
infer_datetime_format=True)
df['date'] = to_datetime(df['date'], unit='ms', utc=True, infer_datetime_format=True)
# Some exchanges return int values for volume and even for ohlc.
# Some exchanges return int values for Volume and even for OHLC.
# Convert them since TA-LIB indicators used in the strategy assume floats
# and fail with exception...
frame = frame.astype(dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float',
'volume': 'float'})
df = df.astype(dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float',
'volume': 'float'})
return clean_ohlcv_dataframe(df, timeframe, pair,
fill_missing=fill_missing,
drop_incomplete=drop_incomplete)
def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
fill_missing: bool = True,
drop_incomplete: bool = True) -> DataFrame:
"""
Clense a OHLCV dataframe by
* Grouping it by date (removes duplicate tics)
* dropping last candles if requested
* Filling up missing data (if requested)
:param data: DataFrame containing candle (OHLCV) data.
:param timeframe: timeframe (e.g. 5m). Used to fill up eventual missing data
:param pair: Pair this data is for (used to warn if fillup was necessary)
:param fill_missing: fill up missing candles with 0 candles
(see ohlcv_fill_up_missing_data for details)
:param drop_incomplete: Drop the last candle of the dataframe, assuming it's incomplete
:return: DataFrame
"""
# group by index and aggregate results to eliminate duplicate ticks
frame = frame.groupby(by='date', as_index=False, sort=True).agg({
data = data.groupby(by='date', as_index=False, sort=True).agg({
'open': 'first',
'high': 'max',
'low': 'min',
@ -48,13 +71,13 @@ def parse_ticker_dataframe(ticker: list, timeframe: str, pair: str, *,
})
# eliminate partial candle
if drop_incomplete:
frame.drop(frame.tail(1).index, inplace=True)
data.drop(data.tail(1).index, inplace=True)
logger.debug('Dropping last candle')
if fill_missing:
return ohlcv_fill_up_missing_data(frame, timeframe, pair)
return ohlcv_fill_up_missing_data(data, timeframe, pair)
else:
return frame
return data
def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) -> DataFrame:
@ -65,16 +88,16 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
"""
from freqtrade.exchange import timeframe_to_minutes
ohlc_dict = {
ohlcv_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'
}
ticker_minutes = timeframe_to_minutes(timeframe)
timeframe_minutes = timeframe_to_minutes(timeframe)
# Resample to create "NAN" values
df = dataframe.resample(f'{ticker_minutes}min', on='date').agg(ohlc_dict)
df = dataframe.resample(f'{timeframe_minutes}min', on='date').agg(ohlcv_dict)
# Forwardfill close for missing columns
df['close'] = df['close'].fillna(method='ffill')
@ -92,8 +115,26 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
return df
def trim_dataframe(df: DataFrame, timerange, df_date_col: str = 'date') -> DataFrame:
"""
Trim dataframe based on given timerange
:param df: Dataframe to trim
:param timerange: timerange (use start and end date if available)
:param: df_date_col: Column in the dataframe to use as Date column
:return: trimmed dataframe
"""
if timerange.starttype == 'date':
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
df = df.loc[df[df_date_col] >= start, :]
if timerange.stoptype == 'date':
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
df = df.loc[df[df_date_col] <= stop, :]
return df
def order_book_to_dataframe(bids: list, asks: list) -> DataFrame:
"""
TODO: This should get a dedicated test
Gets order book list, returns dataframe with below format per suggested by creslin
-------------------------------------------------------------------
b_sum b_size bids asks a_size a_sum
@ -116,23 +157,105 @@ def order_book_to_dataframe(bids: list, asks: list) -> DataFrame:
return frame
def trades_to_ohlcv(trades: list, timeframe: str) -> list:
def trades_remove_duplicates(trades: List[List]) -> List[List]:
"""
Converts trades list to ohlcv list
Removes duplicates from the trades list.
Uses itertools.groupby to avoid converting to pandas.
Tests show it as being pretty efficient on lists of 4M Lists.
:param trades: List of Lists with constants.DEFAULT_TRADES_COLUMNS as columns
:return: same format as above, but with duplicates removed
"""
return [i for i, _ in itertools.groupby(sorted(trades, key=itemgetter(0)))]
def trades_dict_to_list(trades: List[Dict]) -> List[List]:
"""
Convert fetch_trades result into a List (to be more memory efficient).
:param trades: List of trades, as returned by ccxt.fetch_trades.
:param timeframe: Ticker timeframe to resample data to
:return: ohlcv timeframe as list (as returned by ccxt.fetch_ohlcv)
:return: List of Lists, with constants.DEFAULT_TRADES_COLUMNS as columns
"""
return [[t[col] for col in DEFAULT_TRADES_COLUMNS] for t in trades]
def trades_to_ohlcv(trades: List, timeframe: str) -> DataFrame:
"""
Converts trades list to OHLCV list
TODO: This should get a dedicated test
:param trades: List of trades, as returned by ccxt.fetch_trades.
:param timeframe: Timeframe to resample data to
:return: OHLCV Dataframe.
"""
from freqtrade.exchange import timeframe_to_minutes
ticker_minutes = timeframe_to_minutes(timeframe)
df = pd.DataFrame(trades)
df['datetime'] = pd.to_datetime(df['datetime'])
df = df.set_index('datetime')
timeframe_minutes = timeframe_to_minutes(timeframe)
df = pd.DataFrame(trades, columns=DEFAULT_TRADES_COLUMNS)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms',
utc=True,)
df = df.set_index('timestamp')
df_new = df['price'].resample(f'{ticker_minutes}min').ohlc()
df_new['volume'] = df['amount'].resample(f'{ticker_minutes}min').sum()
df_new['date'] = df_new.index.astype("int64") // 10 ** 6
df_new = df['price'].resample(f'{timeframe_minutes}min').ohlc()
df_new['volume'] = df['amount'].resample(f'{timeframe_minutes}min').sum()
df_new['date'] = df_new.index
# Drop 0 volume rows
df_new = df_new.dropna()
columns = ["date", "open", "high", "low", "close", "volume"]
return list(zip(*[df_new[x].values.tolist() for x in columns]))
return df_new[DEFAULT_DATAFRAME_COLUMNS]
def convert_trades_format(config: Dict[str, Any], convert_from: str, convert_to: str, erase: bool):
"""
Convert trades from one format to another format.
:param config: Config dictionary
:param convert_from: Source format
:param convert_to: Target format
:param erase: Erase souce data (does not apply if source and target format are identical)
"""
from freqtrade.data.history.idatahandler import get_datahandler
src = get_datahandler(config['datadir'], convert_from)
trg = get_datahandler(config['datadir'], convert_to)
if 'pairs' not in config:
config['pairs'] = src.trades_get_pairs(config['datadir'])
logger.info(f"Converting trades for {config['pairs']}")
for pair in config['pairs']:
data = src.trades_load(pair=pair)
logger.info(f"Converting {len(data)} trades for {pair}")
trg.trades_store(pair, data)
if erase and convert_from != convert_to:
logger.info(f"Deleting source Trade data for {pair}.")
src.trades_purge(pair=pair)
def convert_ohlcv_format(config: Dict[str, Any], convert_from: str, convert_to: str, erase: bool):
"""
Convert OHLCV from one format to another
:param config: Config dictionary
:param convert_from: Source format
:param convert_to: Target format
:param erase: Erase souce data (does not apply if source and target format are identical)
"""
from freqtrade.data.history.idatahandler import get_datahandler
src = get_datahandler(config['datadir'], convert_from)
trg = get_datahandler(config['datadir'], convert_to)
timeframes = config.get('timeframes', [config.get('ticker_interval')])
logger.info(f"Converting candle (OHLCV) for timeframe {timeframes}")
if 'pairs' not in config:
config['pairs'] = []
# Check timeframes or fall back to ticker_interval.
for timeframe in timeframes:
config['pairs'].extend(src.ohlcv_get_pairs(config['datadir'],
timeframe))
logger.info(f"Converting candle (OHLCV) data for {config['pairs']}")
for timeframe in timeframes:
for pair in config['pairs']:
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=False,
startup_candles=0)
logger.info(f"Converting {len(data)} candles for {pair}")
trg.ohlcv_store(pair=pair, timeframe=timeframe, data=data)
if erase and convert_from != convert_to:
logger.info(f"Deleting source data for {pair} / {timeframe}")
src.ohlcv_purge(pair=pair, timeframe=timeframe)

View File

@ -1,16 +1,16 @@
"""
Dataprovider
Responsible to provide data to the bot
including Klines, tickers, historic data
including ticker and orderbook data, live and historical candle (OHLCV) data
Common Interface for bot and strategy to access data.
"""
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from pandas import DataFrame
from freqtrade.data.history import load_pair_history
from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.exchange import Exchange
from freqtrade.state import RunMode
@ -19,9 +19,10 @@ logger = logging.getLogger(__name__)
class DataProvider:
def __init__(self, config: dict, exchange: Exchange) -> None:
def __init__(self, config: dict, exchange: Exchange, pairlists=None) -> None:
self._config = config
self._exchange = exchange
self._pairlists = pairlists
def refresh(self,
pairlist: List[Tuple[str, str]],
@ -44,10 +45,10 @@ class DataProvider:
def ohlcv(self, pair: str, timeframe: str = None, copy: bool = True) -> DataFrame:
"""
Get ohlcv data for the given pair as DataFrame
Get candle (OHLCV) data for the given pair as DataFrame
Please use the `available_pairs` method to verify which pairs are currently cached.
:param pair: pair to get the data for
:param timeframe: Ticker timeframe to get data for
:param timeframe: Timeframe to get data for
:param copy: copy dataframe before returning if True.
Use False only for read-only operations (where the dataframe is not modified)
"""
@ -59,28 +60,28 @@ class DataProvider:
def historic_ohlcv(self, pair: str, timeframe: str = None) -> DataFrame:
"""
Get stored historic ohlcv data
Get stored historical candle (OHLCV) data
:param pair: pair to get the data for
:param timeframe: timeframe to get data for
"""
return load_pair_history(pair=pair,
timeframe=timeframe or self._config['ticker_interval'],
datadir=Path(self._config['datadir'])
datadir=self._config['datadir']
)
def get_pair_dataframe(self, pair: str, timeframe: str = None) -> DataFrame:
"""
Return pair ohlcv data, either live or cached historical -- depending
Return pair candle (OHLCV) data, either live or cached historical -- depending
on the runmode.
:param pair: pair to get the data for
:param timeframe: timeframe to get data for
:return: Dataframe for this pair
"""
if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):
# Get live ohlcv data.
# Get live OHLCV data.
data = self.ohlcv(pair=pair, timeframe=timeframe)
else:
# Get historic ohlcv data (cached on disk).
# Get historical OHLCV data (cached on disk).
data = self.historic_ohlcv(pair=pair, timeframe=timeframe)
if len(data) == 0:
logger.warning(f"No data found for ({pair}, {timeframe}).")
@ -96,10 +97,14 @@ class DataProvider:
def ticker(self, pair: str):
"""
Return last ticker data
Return last ticker data from exchange
:param pair: Pair to get the data for
:return: Ticker dict from exchange or empty dict if ticker is not available for the pair
"""
# TODO: Implement me
pass
try:
return self._exchange.fetch_ticker(pair)
except DependencyException:
return {}
def orderbook(self, pair: str, maximum: int) -> Dict[str, List]:
"""
@ -117,3 +122,17 @@ class DataProvider:
can be "live", "dry-run", "backtest", "edgecli", "hyperopt" or "other".
"""
return RunMode(self._config.get('runmode', RunMode.OTHER))
def current_whitelist(self) -> List[str]:
"""
fetch latest available whitelist.
Useful when you have a large whitelist and need to call each pair as an informative pair.
As available pairs does not show whitelist until after informative pairs have been cached.
:return: list of pairs in whitelist
"""
if self._pairlists:
return self._pairlists.whitelist
else:
raise OperationalException("Dataprovider was not initialized with a pairlist provider.")

View File

@ -1,479 +0,0 @@
"""
Handle historic data (ohlcv).
Includes:
* load data for a pair (or a list of pairs) from disk
* download data from exchange and store to disk
"""
import logging
import operator
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import arrow
from pandas import DataFrame
from freqtrade import OperationalException, misc
from freqtrade.configuration import TimeRange
from freqtrade.data.converter import parse_ticker_dataframe, trades_to_ohlcv
from freqtrade.exchange import Exchange, timeframe_to_minutes, timeframe_to_seconds
logger = logging.getLogger(__name__)
def trim_tickerlist(tickerlist: List[Dict], timerange: TimeRange) -> List[Dict]:
"""
Trim tickerlist based on given timerange
"""
if not tickerlist:
return tickerlist
start_index = 0
stop_index = len(tickerlist)
if timerange.starttype == 'date':
while (start_index < len(tickerlist) and
tickerlist[start_index][0] < timerange.startts * 1000):
start_index += 1
if timerange.stoptype == 'date':
while (stop_index > 0 and
tickerlist[stop_index-1][0] > timerange.stopts * 1000):
stop_index -= 1
if start_index > stop_index:
raise ValueError(f'The timerange [{timerange.startts},{timerange.stopts}] is incorrect')
return tickerlist[start_index:stop_index]
def trim_dataframe(df: DataFrame, timerange: TimeRange, df_date_col: str = 'date') -> DataFrame:
"""
Trim dataframe based on given timerange
:param df: Dataframe to trim
:param timerange: timerange (use start and end date if available)
:param: df_date_col: Column in the dataframe to use as Date column
:return: trimmed dataframe
"""
if timerange.starttype == 'date':
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
df = df.loc[df[df_date_col] >= start, :]
if timerange.stoptype == 'date':
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
df = df.loc[df[df_date_col] <= stop, :]
return df
def load_tickerdata_file(datadir: Path, pair: str, timeframe: str,
timerange: Optional[TimeRange] = None) -> List[Dict]:
"""
Load a pair from file, either .json.gz or .json
:return: tickerlist or None if unsuccessful
"""
filename = pair_data_filename(datadir, pair, timeframe)
pairdata = misc.file_load_json(filename)
if not pairdata:
return []
if timerange:
pairdata = trim_tickerlist(pairdata, timerange)
return pairdata
def store_tickerdata_file(datadir: Path, pair: str,
timeframe: str, data: list, is_zip: bool = False):
"""
Stores tickerdata to file
"""
filename = pair_data_filename(datadir, pair, timeframe)
misc.file_dump_json(filename, data, is_zip=is_zip)
def load_trades_file(datadir: Path, pair: str,
timerange: Optional[TimeRange] = None) -> List[Dict]:
"""
Load a pair from file, either .json.gz or .json
:return: tradelist or empty list if unsuccesful
"""
filename = pair_trades_filename(datadir, pair)
tradesdata = misc.file_load_json(filename)
if not tradesdata:
return []
return tradesdata
def store_trades_file(datadir: Path, pair: str,
data: list, is_zip: bool = True):
"""
Stores tickerdata to file
"""
filename = pair_trades_filename(datadir, pair)
misc.file_dump_json(filename, data, is_zip=is_zip)
def _validate_pairdata(pair, pairdata, timerange: TimeRange):
if timerange.starttype == 'date' and pairdata[0][0] > timerange.startts * 1000:
logger.warning('Missing data at start for pair %s, data starts at %s',
pair, arrow.get(pairdata[0][0] // 1000).strftime('%Y-%m-%d %H:%M:%S'))
if timerange.stoptype == 'date' and pairdata[-1][0] < timerange.stopts * 1000:
logger.warning('Missing data at end for pair %s, data ends at %s',
pair, arrow.get(pairdata[-1][0] // 1000).strftime('%Y-%m-%d %H:%M:%S'))
def load_pair_history(pair: str,
timeframe: str,
datadir: Path,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
drop_incomplete: bool = True,
startup_candles: int = 0,
) -> DataFrame:
"""
Load cached ticker history for the given pair.
:param pair: Pair to load data for
:param timeframe: Ticker timeframe (e.g. "5m")
:param datadir: Path to the data storage location.
:param timerange: Limit data to be loaded to this timerange
:param fill_up_missing: Fill missing values with "No action"-candles
:param drop_incomplete: Drop last candle assuming it may be incomplete.
:param startup_candles: Additional candles to load at the start of the period
:return: DataFrame with ohlcv data, or empty DataFrame
"""
timerange_startup = deepcopy(timerange)
if startup_candles > 0 and timerange_startup:
timerange_startup.subtract_start(timeframe_to_seconds(timeframe) * startup_candles)
pairdata = load_tickerdata_file(datadir, pair, timeframe, timerange=timerange_startup)
if pairdata:
if timerange_startup:
_validate_pairdata(pair, pairdata, timerange_startup)
return parse_ticker_dataframe(pairdata, timeframe, pair=pair,
fill_missing=fill_up_missing,
drop_incomplete=drop_incomplete)
else:
logger.warning(
f'No history data for pair: "{pair}", timeframe: {timeframe}. '
'Use `freqtrade download-data` to download the data'
)
return DataFrame()
def load_data(datadir: Path,
timeframe: str,
pairs: List[str],
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False
) -> Dict[str, DataFrame]:
"""
Load ticker history data for a list of pairs.
:param datadir: Path to the data storage location.
:param timeframe: Ticker Timeframe (e.g. "5m")
:param pairs: List of pairs to load
:param timerange: Limit data to be loaded to this timerange
:param fill_up_missing: Fill missing values with "No action"-candles
:param startup_candles: Additional candles to load at the start of the period
:param fail_without_data: Raise OperationalException if no data is found.
:return: dict(<pair>:<Dataframe>)
"""
result: Dict[str, DataFrame] = {}
if startup_candles > 0 and timerange:
logger.info(f'Using indicator startup period: {startup_candles} ...')
for pair in pairs:
hist = load_pair_history(pair=pair, timeframe=timeframe,
datadir=datadir, timerange=timerange,
fill_up_missing=fill_up_missing,
startup_candles=startup_candles)
if not hist.empty:
result[pair] = hist
if fail_without_data and not result:
raise OperationalException("No data found. Terminating.")
return result
def refresh_data(datadir: Path,
timeframe: str,
pairs: List[str],
exchange: Exchange,
timerange: Optional[TimeRange] = None,
) -> None:
"""
Refresh ticker history data for a list of pairs.
:param datadir: Path to the data storage location.
:param timeframe: Ticker Timeframe (e.g. "5m")
:param pairs: List of pairs to load
:param exchange: Exchange object
:param timerange: Limit data to be loaded to this timerange
"""
for pair in pairs:
_download_pair_history(pair=pair, timeframe=timeframe,
datadir=datadir, timerange=timerange,
exchange=exchange)
def pair_data_filename(datadir: Path, pair: str, timeframe: str) -> Path:
pair_s = pair.replace("/", "_")
filename = datadir.joinpath(f'{pair_s}-{timeframe}.json')
return filename
def pair_trades_filename(datadir: Path, pair: str) -> Path:
pair_s = pair.replace("/", "_")
filename = datadir.joinpath(f'{pair_s}-trades.json.gz')
return filename
def _load_cached_data_for_updating(datadir: Path, pair: str, timeframe: str,
timerange: Optional[TimeRange]) -> Tuple[List[Any],
Optional[int]]:
"""
Load cached data to download more data.
If timerange is passed in, checks whether data from an before the stored data will be
downloaded.
If that's the case then what's available should be completely overwritten.
Only used by download_pair_history().
"""
since_ms = None
# user sets timerange, so find the start time
if timerange:
if timerange.starttype == 'date':
since_ms = timerange.startts * 1000
elif timerange.stoptype == 'line':
num_minutes = timerange.stopts * timeframe_to_minutes(timeframe)
since_ms = arrow.utcnow().shift(minutes=num_minutes).timestamp * 1000
# read the cached file
# Intentionally don't pass timerange in - since we need to load the full dataset.
data = load_tickerdata_file(datadir, pair, timeframe)
# remove the last item, could be incomplete candle
if data:
data.pop()
else:
data = []
if data:
if since_ms and since_ms < data[0][0]:
# Earlier data than existing data requested, redownload all
data = []
else:
# a part of the data was already downloaded, so download unexist data only
since_ms = data[-1][0] + 1
return (data, since_ms)
def _download_pair_history(datadir: Path,
exchange: Exchange,
pair: str,
timeframe: str = '5m',
timerange: Optional[TimeRange] = None) -> bool:
"""
Download latest candles from the exchange for the pair and timeframe passed in parameters
The data is downloaded starting from the last correct data that
exists in a cache. If timerange starts earlier than the data in the cache,
the full data will be redownloaded
Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
:param pair: pair to download
:param timeframe: Ticker Timeframe (e.g 5m)
:param timerange: range of time to download
:return: bool with success state
"""
try:
logger.info(
f'Download history data for pair: "{pair}", timeframe: {timeframe} '
f'and store in {datadir}.'
)
data, since_ms = _load_cached_data_for_updating(datadir, pair, timeframe, timerange)
logger.debug("Current Start: %s", misc.format_ms_time(data[1][0]) if data else 'None')
logger.debug("Current End: %s", misc.format_ms_time(data[-1][0]) if data else 'None')
# Default since_ms to 30 days if nothing is given
new_data = exchange.get_historic_ohlcv(pair=pair,
timeframe=timeframe,
since_ms=since_ms if since_ms else
int(arrow.utcnow().shift(
days=-30).float_timestamp) * 1000
)
data.extend(new_data)
logger.debug("New Start: %s", misc.format_ms_time(data[0][0]))
logger.debug("New End: %s", misc.format_ms_time(data[-1][0]))
store_tickerdata_file(datadir, pair, timeframe, data=data)
return True
except Exception as e:
logger.error(
f'Failed to download history data for pair: "{pair}", timeframe: {timeframe}. '
f'Error: {e}'
)
return False
def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes: List[str],
datadir: Path, timerange: Optional[TimeRange] = None,
erase=False) -> List[str]:
"""
Refresh stored ohlcv data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
pairs_not_available = []
for pair in pairs:
if pair not in exchange.markets:
pairs_not_available.append(pair)
logger.info(f"Skipping pair {pair}...")
continue
for timeframe in timeframes:
dl_file = pair_data_filename(datadir, pair, timeframe)
if erase and dl_file.exists():
logger.info(
f'Deleting existing data for pair {pair}, interval {timeframe}.')
dl_file.unlink()
logger.info(f'Downloading pair {pair}, interval {timeframe}.')
_download_pair_history(datadir=datadir, exchange=exchange,
pair=pair, timeframe=str(timeframe),
timerange=timerange)
return pairs_not_available
def _download_trades_history(datadir: Path,
exchange: Exchange,
pair: str,
timerange: Optional[TimeRange] = None) -> bool:
"""
Download trade history from the exchange.
Appends to previously downloaded trades data.
"""
try:
since = timerange.startts * 1000 if timerange and timerange.starttype == 'date' else None
trades = load_trades_file(datadir, pair)
from_id = trades[-1]['id'] if trades else None
logger.debug("Current Start: %s", trades[0]['datetime'] if trades else 'None')
logger.debug("Current End: %s", trades[-1]['datetime'] if trades else 'None')
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(pair=pair,
since=since if since else
int(arrow.utcnow().shift(
days=-30).float_timestamp) * 1000,
from_id=from_id,
)
trades.extend(new_trades[1])
store_trades_file(datadir, pair, trades)
logger.debug("New Start: %s", trades[0]['datetime'])
logger.debug("New End: %s", trades[-1]['datetime'])
logger.info(f"New Amount of trades: {len(trades)}")
return True
except Exception as e:
logger.error(
f'Failed to download historic trades for pair: "{pair}". '
f'Error: {e}'
)
return False
def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: Path,
timerange: TimeRange, erase=False) -> List[str]:
"""
Refresh stored trades data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
pairs_not_available = []
for pair in pairs:
if pair not in exchange.markets:
pairs_not_available.append(pair)
logger.info(f"Skipping pair {pair}...")
continue
dl_file = pair_trades_filename(datadir, pair)
if erase and dl_file.exists():
logger.info(
f'Deleting existing data for pair {pair}.')
dl_file.unlink()
logger.info(f'Downloading trades for pair {pair}.')
_download_trades_history(datadir=datadir, exchange=exchange,
pair=pair,
timerange=timerange)
return pairs_not_available
def convert_trades_to_ohlcv(pairs: List[str], timeframes: List[str],
datadir: Path, timerange: TimeRange, erase=False) -> None:
"""
Convert stored trades data to ohlcv data
"""
for pair in pairs:
trades = load_trades_file(datadir, pair)
for timeframe in timeframes:
ohlcv_file = pair_data_filename(datadir, pair, timeframe)
if erase and ohlcv_file.exists():
logger.info(f'Deleting existing data for pair {pair}, interval {timeframe}.')
ohlcv_file.unlink()
ohlcv = trades_to_ohlcv(trades, timeframe)
# Store ohlcv
store_tickerdata_file(datadir, pair, timeframe, data=ohlcv)
def get_timerange(data: Dict[str, DataFrame]) -> Tuple[arrow.Arrow, arrow.Arrow]:
"""
Get the maximum common timerange for the given backtest data.
:param data: dictionary with preprocessed backtesting data
:return: tuple containing min_date, max_date
"""
timeranges = [
(arrow.get(frame['date'].min()), arrow.get(frame['date'].max()))
for frame in data.values()
]
return (min(timeranges, key=operator.itemgetter(0))[0],
max(timeranges, key=operator.itemgetter(1))[1])
def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
max_date: datetime, timeframe_min: int) -> bool:
"""
Validates preprocessed backtesting data for missing values and shows warnings about it that.
:param data: preprocessed backtesting data (as DataFrame)
:param pair: pair used for log output.
:param min_date: start-date of the data
:param max_date: end-date of the data
:param timeframe_min: ticker Timeframe in minutes
"""
# total difference in minutes / timeframe-minutes
expected_frames = int((max_date - min_date).total_seconds() // 60 // timeframe_min)
found_missing = False
dflen = len(data)
if dflen < expected_frames:
found_missing = True
logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values",
pair, expected_frames, dflen, expected_frames - dflen)
return found_missing

View File

@ -0,0 +1,14 @@
"""
Handle historic data (ohlcv).
Includes:
* load data for a pair (or a list of pairs) from disk
* download data from exchange and store to disk
"""
from .history_utils import (convert_trades_to_ohlcv, # noqa: F401
get_timerange, load_data, load_pair_history,
refresh_backtest_ohlcv_data,
refresh_backtest_trades_data, refresh_data,
validate_backtest_data)
from .idatahandler import get_datahandler # noqa: F401

View File

@ -0,0 +1,391 @@
import logging
import operator
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import arrow
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS
from freqtrade.data.converter import (ohlcv_to_dataframe,
trades_remove_duplicates,
trades_to_ohlcv)
from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
from freqtrade.misc import format_ms_time
logger = logging.getLogger(__name__)
def load_pair_history(pair: str,
timeframe: str,
datadir: Path, *,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
drop_incomplete: bool = True,
startup_candles: int = 0,
data_format: str = None,
data_handler: IDataHandler = None,
) -> DataFrame:
"""
Load cached ohlcv history for the given pair.
:param pair: Pair to load data for
:param timeframe: Timeframe (e.g. "5m")
:param datadir: Path to the data storage location.
:param data_format: Format of the data. Ignored if data_handler is set.
:param timerange: Limit data to be loaded to this timerange
:param fill_up_missing: Fill missing values with "No action"-candles
:param drop_incomplete: Drop last candle assuming it may be incomplete.
:param startup_candles: Additional candles to load at the start of the period
:param data_handler: Initialized data-handler to use.
Will be initialized from data_format if not set
:return: DataFrame with ohlcv data, or empty DataFrame
"""
data_handler = get_datahandler(datadir, data_format, data_handler)
return data_handler.ohlcv_load(pair=pair,
timeframe=timeframe,
timerange=timerange,
fill_missing=fill_up_missing,
drop_incomplete=drop_incomplete,
startup_candles=startup_candles,
)
def load_data(datadir: Path,
timeframe: str,
pairs: List[str], *,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False,
data_format: str = 'json',
) -> Dict[str, DataFrame]:
"""
Load ohlcv history data for a list of pairs.
:param datadir: Path to the data storage location.
:param timeframe: Timeframe (e.g. "5m")
:param pairs: List of pairs to load
:param timerange: Limit data to be loaded to this timerange
:param fill_up_missing: Fill missing values with "No action"-candles
:param startup_candles: Additional candles to load at the start of the period
:param fail_without_data: Raise OperationalException if no data is found.
:param data_format: Data format which should be used. Defaults to json
:return: dict(<pair>:<Dataframe>)
"""
result: Dict[str, DataFrame] = {}
if startup_candles > 0 and timerange:
logger.info(f'Using indicator startup period: {startup_candles} ...')
data_handler = get_datahandler(datadir, data_format)
for pair in pairs:
hist = load_pair_history(pair=pair, timeframe=timeframe,
datadir=datadir, timerange=timerange,
fill_up_missing=fill_up_missing,
startup_candles=startup_candles,
data_handler=data_handler
)
if not hist.empty:
result[pair] = hist
if fail_without_data and not result:
raise OperationalException("No data found. Terminating.")
return result
def refresh_data(datadir: Path,
timeframe: str,
pairs: List[str],
exchange: Exchange,
data_format: str = None,
timerange: Optional[TimeRange] = None,
) -> None:
"""
Refresh ohlcv history data for a list of pairs.
:param datadir: Path to the data storage location.
:param timeframe: Timeframe (e.g. "5m")
:param pairs: List of pairs to load
:param exchange: Exchange object
:param timerange: Limit data to be loaded to this timerange
"""
data_handler = get_datahandler(datadir, data_format)
for pair in pairs:
_download_pair_history(pair=pair, timeframe=timeframe,
datadir=datadir, timerange=timerange,
exchange=exchange, data_handler=data_handler)
def _load_cached_data_for_updating(pair: str, timeframe: str, timerange: Optional[TimeRange],
data_handler: IDataHandler) -> Tuple[DataFrame, Optional[int]]:
"""
Load cached data to download more data.
If timerange is passed in, checks whether data from an before the stored data will be
downloaded.
If that's the case then what's available should be completely overwritten.
Otherwise downloads always start at the end of the available data to avoid data gaps.
Note: Only used by download_pair_history().
"""
start = None
if timerange:
if timerange.starttype == 'date':
# TODO: convert to date for conversion
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
# Intentionally don't pass timerange in - since we need to load the full dataset.
data = data_handler.ohlcv_load(pair, timeframe=timeframe,
timerange=None, fill_missing=False,
drop_incomplete=True, warn_no_data=False)
if not data.empty:
if start and start < data.iloc[0]['date']:
# Earlier data than existing data requested, redownload all
data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS)
else:
start = data.iloc[-1]['date']
start_ms = int(start.timestamp() * 1000) if start else None
return data, start_ms
def _download_pair_history(datadir: Path,
exchange: Exchange,
pair: str, *,
timeframe: str = '5m',
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler = None) -> bool:
"""
Download latest candles from the exchange for the pair and timeframe passed in parameters
The data is downloaded starting from the last correct data that
exists in a cache. If timerange starts earlier than the data in the cache,
the full data will be redownloaded
Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
:param pair: pair to download
:param timeframe: Timeframe (e.g "5m")
:param timerange: range of time to download
:return: bool with success state
"""
data_handler = get_datahandler(datadir, data_handler=data_handler)
try:
logger.info(
f'Download history data for pair: "{pair}", timeframe: {timeframe} '
f'and store in {datadir}.'
)
# data, since_ms = _load_cached_data_for_updating_old(datadir, pair, timeframe, timerange)
data, since_ms = _load_cached_data_for_updating(pair, timeframe, timerange,
data_handler=data_handler)
logger.debug("Current Start: %s",
f"{data.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
logger.debug("Current End: %s",
f"{data.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
# Default since_ms to 30 days if nothing is given
new_data = exchange.get_historic_ohlcv(pair=pair,
timeframe=timeframe,
since_ms=since_ms if since_ms else
int(arrow.utcnow().shift(
days=-30).float_timestamp) * 1000
)
# TODO: Maybe move parsing to exchange class (?)
new_dataframe = ohlcv_to_dataframe(new_data, timeframe, pair,
fill_missing=False, drop_incomplete=True)
if data.empty:
data = new_dataframe
else:
data = data.append(new_dataframe)
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
logger.debug("New End: %s",
f"{data.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
data_handler.ohlcv_store(pair, timeframe, data=data)
return True
except Exception as e:
logger.error(
f'Failed to download history data for pair: "{pair}", timeframe: {timeframe}. '
f'Error: {e}'
)
return False
def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes: List[str],
datadir: Path, timerange: Optional[TimeRange] = None,
erase: bool = False, data_format: str = None) -> List[str]:
"""
Refresh stored ohlcv data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format)
for pair in pairs:
if pair not in exchange.markets:
pairs_not_available.append(pair)
logger.info(f"Skipping pair {pair}...")
continue
for timeframe in timeframes:
if erase:
if data_handler.ohlcv_purge(pair, timeframe):
logger.info(
f'Deleting existing data for pair {pair}, interval {timeframe}.')
logger.info(f'Downloading pair {pair}, interval {timeframe}.')
_download_pair_history(datadir=datadir, exchange=exchange,
pair=pair, timeframe=str(timeframe),
timerange=timerange, data_handler=data_handler)
return pairs_not_available
def _download_trades_history(exchange: Exchange,
pair: str, *,
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler
) -> bool:
"""
Download trade history from the exchange.
Appends to previously downloaded trades data.
"""
try:
since = timerange.startts * 1000 if \
(timerange and timerange.starttype == 'date') else int(arrow.utcnow().shift(
days=-30).float_timestamp) * 1000
trades = data_handler.trades_load(pair)
# TradesList columns are defined in constants.DEFAULT_TRADES_COLUMNS
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
from_id = trades[-1][1] if trades else None
if trades and since < trades[-1][0]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades[-1][0] - (5 * 1000)
logger.info(f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}.")
logger.debug(f"Current Start: {format_ms_time(trades[0][0]) if trades else 'None'}")
logger.debug(f"Current End: {format_ms_time(trades[-1][0]) if trades else 'None'}")
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(pair=pair,
since=since,
from_id=from_id,
)
trades.extend(new_trades[1])
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_remove_duplicates(trades)
data_handler.trades_store(pair, data=trades)
logger.debug(f"New Start: {format_ms_time(trades[0][0])}")
logger.debug(f"New End: {format_ms_time(trades[-1][0])}")
logger.info(f"New Amount of trades: {len(trades)}")
return True
except Exception as e:
logger.error(
f'Failed to download historic trades for pair: "{pair}". '
f'Error: {e}'
)
return False
def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: Path,
timerange: TimeRange, erase: bool = False,
data_format: str = 'jsongz') -> List[str]:
"""
Refresh stored trades data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format=data_format)
for pair in pairs:
if pair not in exchange.markets:
pairs_not_available.append(pair)
logger.info(f"Skipping pair {pair}...")
continue
if erase:
if data_handler.trades_purge(pair):
logger.info(f'Deleting existing data for pair {pair}.')
logger.info(f'Downloading trades for pair {pair}.')
_download_trades_history(exchange=exchange,
pair=pair,
timerange=timerange,
data_handler=data_handler)
return pairs_not_available
def convert_trades_to_ohlcv(pairs: List[str], timeframes: List[str],
datadir: Path, timerange: TimeRange, erase: bool = False,
data_format_ohlcv: str = 'json',
data_format_trades: str = 'jsongz') -> None:
"""
Convert stored trades data to ohlcv data
"""
data_handler_trades = get_datahandler(datadir, data_format=data_format_trades)
data_handler_ohlcv = get_datahandler(datadir, data_format=data_format_ohlcv)
for pair in pairs:
trades = data_handler_trades.trades_load(pair)
for timeframe in timeframes:
if erase:
if data_handler_ohlcv.ohlcv_purge(pair, timeframe):
logger.info(f'Deleting existing data for pair {pair}, interval {timeframe}.')
ohlcv = trades_to_ohlcv(trades, timeframe)
# Store ohlcv
data_handler_ohlcv.ohlcv_store(pair, timeframe, data=ohlcv)
def get_timerange(data: Dict[str, DataFrame]) -> Tuple[arrow.Arrow, arrow.Arrow]:
"""
Get the maximum common timerange for the given backtest data.
:param data: dictionary with preprocessed backtesting data
:return: tuple containing min_date, max_date
"""
timeranges = [
(arrow.get(frame['date'].min()), arrow.get(frame['date'].max()))
for frame in data.values()
]
return (min(timeranges, key=operator.itemgetter(0))[0],
max(timeranges, key=operator.itemgetter(1))[1])
def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
max_date: datetime, timeframe_min: int) -> bool:
"""
Validates preprocessed backtesting data for missing values and shows warnings about it that.
:param data: preprocessed backtesting data (as DataFrame)
:param pair: pair used for log output.
:param min_date: start-date of the data
:param max_date: end-date of the data
:param timeframe_min: Timeframe in minutes
"""
# total difference in minutes / timeframe-minutes
expected_frames = int((max_date - min_date).total_seconds() // 60 // timeframe_min)
found_missing = False
dflen = len(data)
if dflen < expected_frames:
found_missing = True
logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values",
pair, expected_frames, dflen, expected_frames - dflen)
return found_missing

View File

@ -0,0 +1,248 @@
"""
Abstract datahandler interface.
It's subclasses handle and storing data from disk.
"""
import logging
from abc import ABC, abstractclassmethod, abstractmethod
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from typing import List, Optional, Type
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.data.converter import (clean_ohlcv_dataframe,
trades_remove_duplicates, trim_dataframe)
from freqtrade.exchange import timeframe_to_seconds
logger = logging.getLogger(__name__)
# Type for trades list
TradeList = List[List]
class IDataHandler(ABC):
def __init__(self, datadir: Path) -> None:
self._datadir = datadir
@abstractclassmethod
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str) -> List[str]:
"""
Returns a list of all pairs with ohlcv data available in this datadir
for the specified timeframe
:param datadir: Directory to search for ohlcv files
:param timeframe: Timeframe to search pairs for
:return: List of Pairs
"""
@abstractmethod
def ohlcv_store(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Store data in json format "values".
format looks as follows:
[[<date>,<open>,<high>,<low>,<close>]]
:param pair: Pair - used to generate filename
:timeframe: Timeframe - used to generate filename
:data: Dataframe containing OHLCV data
:return: None
"""
@abstractmethod
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange] = None,
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
Timerange trimming and dataframe validation happens outside of this method.
:param pair: Pair to load data
:param timeframe: Timeframe (e.g. "5m")
:param timerange: Limit data to be loaded to this timerange.
Optionally implemented by subclasses to avoid loading
all data where possible.
:return: DataFrame with ohlcv data, or empty DataFrame
"""
@abstractmethod
def ohlcv_purge(self, pair: str, timeframe: str) -> bool:
"""
Remove data for this pair
:param pair: Delete data for this pair.
:param timeframe: Timeframe (e.g. "5m")
:return: True when deleted, false if file did not exist.
"""
@abstractmethod
def ohlcv_append(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Append data to existing data structures
:param pair: Pair
:param timeframe: Timeframe this ohlcv data is for
:param data: Data to append.
"""
@abstractclassmethod
def trades_get_pairs(cls, datadir: Path) -> List[str]:
"""
Returns a list of all pairs for which trade data is available in this
:param datadir: Directory to search for ohlcv files
:return: List of Pairs
"""
@abstractmethod
def trades_store(self, pair: str, data: TradeList) -> None:
"""
Store trades data (list of Dicts) to file
:param pair: Pair - used for filename
:param data: List of Lists containing trade data,
column sequence as in DEFAULT_TRADES_COLUMNS
"""
@abstractmethod
def trades_append(self, pair: str, data: TradeList):
"""
Append data to existing files
:param pair: Pair - used for filename
:param data: List of Lists containing trade data,
column sequence as in DEFAULT_TRADES_COLUMNS
"""
@abstractmethod
def _trades_load(self, pair: str, timerange: Optional[TimeRange] = None) -> TradeList:
"""
Load a pair from file, either .json.gz or .json
:param pair: Load trades for this pair
:param timerange: Timerange to load trades for - currently not implemented
:return: List of trades
"""
@abstractmethod
def trades_purge(self, pair: str) -> bool:
"""
Remove data for this pair
:param pair: Delete data for this pair.
:return: True when deleted, false if file did not exist.
"""
def trades_load(self, pair: str, timerange: Optional[TimeRange] = None) -> TradeList:
"""
Load a pair from file, either .json.gz or .json
Removes duplicates in the process.
:param pair: Load trades for this pair
:param timerange: Timerange to load trades for - currently not implemented
:return: List of trades
"""
return trades_remove_duplicates(self._trades_load(pair, timerange=timerange))
def ohlcv_load(self, pair, timeframe: str,
timerange: Optional[TimeRange] = None,
fill_missing: bool = True,
drop_incomplete: bool = True,
startup_candles: int = 0,
warn_no_data: bool = True
) -> DataFrame:
"""
Load cached candle (OHLCV) data for the given pair.
:param pair: Pair to load data for
:param timeframe: Timeframe (e.g. "5m")
:param timerange: Limit data to be loaded to this timerange
:param fill_missing: Fill missing values with "No action"-candles
:param drop_incomplete: Drop last candle assuming it may be incomplete.
:param startup_candles: Additional candles to load at the start of the period
:param warn_no_data: Log a warning message when no data is found
:return: DataFrame with ohlcv data, or empty DataFrame
"""
# Fix startup period
timerange_startup = deepcopy(timerange)
if startup_candles > 0 and timerange_startup:
timerange_startup.subtract_start(timeframe_to_seconds(timeframe) * startup_candles)
pairdf = self._ohlcv_load(pair, timeframe,
timerange=timerange_startup)
if self._check_empty_df(pairdf, pair, timeframe, warn_no_data):
return pairdf
else:
enddate = pairdf.iloc[-1]['date']
if timerange_startup:
self._validate_pairdata(pair, pairdf, timerange_startup)
pairdf = trim_dataframe(pairdf, timerange_startup)
if self._check_empty_df(pairdf, pair, timeframe, warn_no_data):
return pairdf
# incomplete candles should only be dropped if we didn't trim the end beforehand.
pairdf = clean_ohlcv_dataframe(pairdf, timeframe,
pair=pair,
fill_missing=fill_missing,
drop_incomplete=(drop_incomplete and
enddate == pairdf.iloc[-1]['date']))
self._check_empty_df(pairdf, pair, timeframe, warn_no_data)
return pairdf
def _check_empty_df(self, pairdf: DataFrame, pair: str, timeframe: str, warn_no_data: bool):
"""
Warn on empty dataframe
"""
if pairdf.empty:
if warn_no_data:
logger.warning(
f'No history data for pair: "{pair}", timeframe: {timeframe}. '
'Use `freqtrade download-data` to download the data'
)
return True
return False
def _validate_pairdata(self, pair, pairdata: DataFrame, timerange: TimeRange):
"""
Validates pairdata for missing data at start end end and logs warnings.
:param pairdata: Dataframe to validate
:param timerange: Timerange specified for start and end dates
"""
if timerange.starttype == 'date':
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
if pairdata.iloc[0]['date'] > start:
logger.warning(f"Missing data at start for pair {pair}, "
f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}")
if timerange.stoptype == 'date':
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
if pairdata.iloc[-1]['date'] < stop:
logger.warning(f"Missing data at end for pair {pair}, "
f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}")
def get_datahandlerclass(datatype: str) -> Type[IDataHandler]:
"""
Get datahandler class.
Could be done using Resolvers, but since this may be called often and resolvers
are rather expensive, doing this directly should improve performance.
:param datatype: datatype to use.
:return: Datahandler class
"""
if datatype == 'json':
from .jsondatahandler import JsonDataHandler
return JsonDataHandler
elif datatype == 'jsongz':
from .jsondatahandler import JsonGzDataHandler
return JsonGzDataHandler
else:
raise ValueError(f"No datahandler for datatype {datatype} available.")
def get_datahandler(datadir: Path, data_format: str = None,
data_handler: IDataHandler = None) -> IDataHandler:
"""
:param datadir: Folder to save data
:data_format: dataformat to use
:data_handler: returns this datahandler if it exists or initializes a new one
"""
if not data_handler:
HandlerClass = get_datahandlerclass(data_format or 'json')
data_handler = HandlerClass(datadir)
return data_handler

View File

@ -0,0 +1,191 @@
import logging
import re
from pathlib import Path
from typing import List, Optional
import numpy as np
from pandas import DataFrame, read_json, to_datetime
from freqtrade import misc
from freqtrade.configuration import TimeRange
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS
from freqtrade.data.converter import trades_dict_to_list
from .idatahandler import IDataHandler, TradeList
logger = logging.getLogger(__name__)
class JsonDataHandler(IDataHandler):
_use_zip = False
_columns = DEFAULT_DATAFRAME_COLUMNS
@classmethod
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str) -> List[str]:
"""
Returns a list of all pairs with ohlcv data available in this datadir
for the specified timeframe
:param datadir: Directory to search for ohlcv files
:param timeframe: Timeframe to search pairs for
:return: List of Pairs
"""
_tmp = [re.search(r'^(\S+)(?=\-' + timeframe + '.json)', p.name)
for p in datadir.glob(f"*{timeframe}.{cls._get_file_extension()}")]
# Check if regex found something and only return these results
return [match[0].replace('_', '/') for match in _tmp if match]
def ohlcv_store(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Store data in json format "values".
format looks as follows:
[[<date>,<open>,<high>,<low>,<close>]]
:param pair: Pair - used to generate filename
:timeframe: Timeframe - used to generate filename
:data: Dataframe containing OHLCV data
:return: None
"""
filename = self._pair_data_filename(self._datadir, pair, timeframe)
_data = data.copy()
# Convert date to int
_data['date'] = _data['date'].astype(np.int64) // 1000 // 1000
# Reset index, select only appropriate columns and save as json
_data.reset_index(drop=True).loc[:, self._columns].to_json(
filename, orient="values",
compression='gzip' if self._use_zip else None)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange] = None,
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
Timerange trimming and dataframe validation happens outside of this method.
:param pair: Pair to load data
:param timeframe: Timeframe (e.g. "5m")
:param timerange: Limit data to be loaded to this timerange.
Optionally implemented by subclasses to avoid loading
all data where possible.
:return: DataFrame with ohlcv data, or empty DataFrame
"""
filename = self._pair_data_filename(self._datadir, pair, timeframe)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_json(filename, orient='values')
pairdata.columns = self._columns
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
'low': 'float', 'close': 'float', 'volume': 'float'})
pairdata['date'] = to_datetime(pairdata['date'],
unit='ms',
utc=True,
infer_datetime_format=True)
return pairdata
def ohlcv_purge(self, pair: str, timeframe: str) -> bool:
"""
Remove data for this pair
:param pair: Delete data for this pair.
:param timeframe: Timeframe (e.g. "5m")
:return: True when deleted, false if file did not exist.
"""
filename = self._pair_data_filename(self._datadir, pair, timeframe)
if filename.exists():
filename.unlink()
return True
return False
def ohlcv_append(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Append data to existing data structures
:param pair: Pair
:param timeframe: Timeframe this ohlcv data is for
:param data: Data to append.
"""
raise NotImplementedError()
@classmethod
def trades_get_pairs(cls, datadir: Path) -> List[str]:
"""
Returns a list of all pairs for which trade data is available in this
:param datadir: Directory to search for ohlcv files
:return: List of Pairs
"""
_tmp = [re.search(r'^(\S+)(?=\-trades.json)', p.name)
for p in datadir.glob(f"*trades.{cls._get_file_extension()}")]
# Check if regex found something and only return these results to avoid exceptions.
return [match[0].replace('_', '/') for match in _tmp if match]
def trades_store(self, pair: str, data: TradeList) -> None:
"""
Store trades data (list of Dicts) to file
:param pair: Pair - used for filename
:param data: List of Lists containing trade data,
column sequence as in DEFAULT_TRADES_COLUMNS
"""
filename = self._pair_trades_filename(self._datadir, pair)
misc.file_dump_json(filename, data, is_zip=self._use_zip)
def trades_append(self, pair: str, data: TradeList):
"""
Append data to existing files
:param pair: Pair - used for filename
:param data: List of Lists containing trade data,
column sequence as in DEFAULT_TRADES_COLUMNS
"""
raise NotImplementedError()
def _trades_load(self, pair: str, timerange: Optional[TimeRange] = None) -> TradeList:
"""
Load a pair from file, either .json.gz or .json
# TODO: respect timerange ...
:param pair: Load trades for this pair
:param timerange: Timerange to load trades for - currently not implemented
:return: List of trades
"""
filename = self._pair_trades_filename(self._datadir, pair)
tradesdata = misc.file_load_json(filename)
if not tradesdata:
return []
if isinstance(tradesdata[0], dict):
# Convert trades dict to list
logger.info("Old trades format detected - converting")
tradesdata = trades_dict_to_list(tradesdata)
pass
return tradesdata
def trades_purge(self, pair: str) -> bool:
"""
Remove data for this pair
:param pair: Delete data for this pair.
:return: True when deleted, false if file did not exist.
"""
filename = self._pair_trades_filename(self._datadir, pair)
if filename.exists():
filename.unlink()
return True
return False
@classmethod
def _pair_data_filename(cls, datadir: Path, pair: str, timeframe: str) -> Path:
pair_s = misc.pair_to_filename(pair)
filename = datadir.joinpath(f'{pair_s}-{timeframe}.{cls._get_file_extension()}')
return filename
@classmethod
def _get_file_extension(cls):
return "json.gz" if cls._use_zip else "json"
@classmethod
def _pair_trades_filename(cls, datadir: Path, pair: str) -> Path:
pair_s = misc.pair_to_filename(pair)
filename = datadir.joinpath(f'{pair_s}-trades.{cls._get_file_extension()}')
return filename
class JsonGzDataHandler(JsonDataHandler):
_use_zip = True

View File

@ -1,463 +1 @@
# pragma pylint: disable=W0603
""" Edge positioning package """
import logging
from pathlib import Path
from typing import Any, Dict, NamedTuple
import arrow
import numpy as np
import utils_find_1st as utf1st
from pandas import DataFrame
from freqtrade import constants, OperationalException
from freqtrade.configuration import TimeRange
from freqtrade.data import history
from freqtrade.strategy.interface import SellType
logger = logging.getLogger(__name__)
class PairInfo(NamedTuple):
stoploss: float
winrate: float
risk_reward_ratio: float
required_risk_reward: float
expectancy: float
nb_trades: int
avg_trade_duration: float
class Edge:
"""
Calculates Win Rate, Risk Reward Ratio, Expectancy
against historical data for a give set of markets and a strategy
it then adjusts stoploss and position size accordingly
and force it into the strategy
Author: https://github.com/mishaker
"""
config: Dict = {}
_cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
def __init__(self, config: Dict[str, Any], exchange, strategy) -> None:
self.config = config
self.exchange = exchange
self.strategy = strategy
self.edge_config = self.config.get('edge', {})
self._cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
self._final_pairs: list = []
# checking max_open_trades. it should be -1 as with Edge
# the number of trades is determined by position size
if self.config['max_open_trades'] != float('inf'):
logger.critical('max_open_trades should be -1 in config !')
if self.config['stake_amount'] != constants.UNLIMITED_STAKE_AMOUNT:
raise OperationalException('Edge works only with unlimited stake amount')
self._capital_percentage: float = self.edge_config.get('capital_available_percentage')
self._allowed_risk: float = self.edge_config.get('allowed_risk')
self._since_number_of_days: int = self.edge_config.get('calculate_since_number_of_days', 14)
self._last_updated: int = 0 # Timestamp of pairs last updated time
self._refresh_pairs = True
self._stoploss_range_min = float(self.edge_config.get('stoploss_range_min', -0.01))
self._stoploss_range_max = float(self.edge_config.get('stoploss_range_max', -0.05))
self._stoploss_range_step = float(self.edge_config.get('stoploss_range_step', -0.001))
# calculating stoploss range
self._stoploss_range = np.arange(
self._stoploss_range_min,
self._stoploss_range_max,
self._stoploss_range_step
)
self._timerange: TimeRange = TimeRange.parse_timerange("%s-" % arrow.now().shift(
days=-1 * self._since_number_of_days).format('YYYYMMDD'))
if config.get('fee'):
self.fee = config['fee']
else:
self.fee = self.exchange.get_fee(symbol=self.config['exchange']['pair_whitelist'][0])
def calculate(self) -> bool:
pairs = self.config['exchange']['pair_whitelist']
heartbeat = self.edge_config.get('process_throttle_secs')
if (self._last_updated > 0) and (
self._last_updated + heartbeat > arrow.utcnow().timestamp):
return False
data: Dict[str, Any] = {}
logger.info('Using stake_currency: %s ...', self.config['stake_currency'])
logger.info('Using local backtesting data (using whitelist in given config) ...')
if self._refresh_pairs:
history.refresh_data(
datadir=Path(self.config['datadir']),
pairs=pairs,
exchange=self.exchange,
timeframe=self.strategy.ticker_interval,
timerange=self._timerange,
)
data = history.load_data(
datadir=Path(self.config['datadir']),
pairs=pairs,
timeframe=self.strategy.ticker_interval,
timerange=self._timerange,
startup_candles=self.strategy.startup_candle_count,
)
if not data:
# Reinitializing cached pairs
self._cached_pairs = {}
logger.critical("No data found. Edge is stopped ...")
return False
preprocessed = self.strategy.tickerdata_to_dataframe(data)
# Print timeframe
min_date, max_date = history.get_timerange(preprocessed)
logger.info(
'Measuring data from %s up to %s (%s days) ...',
min_date.isoformat(),
max_date.isoformat(),
(max_date - min_date).days
)
headers = ['date', 'buy', 'open', 'close', 'sell', 'high', 'low']
trades: list = []
for pair, pair_data in preprocessed.items():
# Sorting dataframe by date and reset index
pair_data = pair_data.sort_values(by=['date'])
pair_data = pair_data.reset_index(drop=True)
ticker_data = self.strategy.advise_sell(
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()
trades += self._find_trades_for_stoploss_range(ticker_data, pair, self._stoploss_range)
# If no trade found then exit
if len(trades) == 0:
logger.info("No trades found.")
return False
# Fill missing, calculable columns, profit, duration , abs etc.
trades_df = self._fill_calculable_fields(DataFrame(trades))
self._cached_pairs = self._process_expectancy(trades_df)
self._last_updated = arrow.utcnow().timestamp
return True
def stake_amount(self, pair: str, free_capital: float,
total_capital: float, capital_in_trade: float) -> float:
stoploss = self.stoploss(pair)
available_capital = (total_capital + capital_in_trade) * self._capital_percentage
allowed_capital_at_risk = available_capital * self._allowed_risk
max_position_size = abs(allowed_capital_at_risk / stoploss)
position_size = min(max_position_size, free_capital)
if pair in self._cached_pairs:
logger.info(
'winrate: %s, expectancy: %s, position size: %s, pair: %s,'
' capital in trade: %s, free capital: %s, total capital: %s,'
' stoploss: %s, available capital: %s.',
self._cached_pairs[pair].winrate,
self._cached_pairs[pair].expectancy,
position_size, pair,
capital_in_trade, free_capital, total_capital,
stoploss, available_capital
)
return round(position_size, 15)
def stoploss(self, pair: str) -> float:
if pair in self._cached_pairs:
return self._cached_pairs[pair].stoploss
else:
logger.warning('tried to access stoploss of a non-existing pair, '
'strategy stoploss is returned instead.')
return self.strategy.stoploss
def adjust(self, pairs) -> list:
"""
Filters out and sorts "pairs" according to Edge calculated pairs
"""
final = []
for pair, info in self._cached_pairs.items():
if info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and \
info.winrate > float(self.edge_config.get('minimum_winrate', 0.60)) and \
pair in pairs:
final.append(pair)
if self._final_pairs != final:
self._final_pairs = final
if self._final_pairs:
logger.info(
'Minimum expectancy and minimum winrate are met only for %s,'
' so other pairs are filtered out.',
self._final_pairs
)
else:
logger.info(
'Edge removed all pairs as no pair with minimum expectancy '
'and minimum winrate was found !'
)
return self._final_pairs
def accepted_pairs(self) -> list:
"""
return a list of accepted pairs along with their winrate, expectancy and stoploss
"""
final = []
for pair, info in self._cached_pairs.items():
if info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and \
info.winrate > float(self.edge_config.get('minimum_winrate', 0.60)):
final.append({
'Pair': pair,
'Winrate': info.winrate,
'Expectancy': info.expectancy,
'Stoploss': info.stoploss,
})
return final
def _fill_calculable_fields(self, result: DataFrame) -> DataFrame:
"""
The result frame contains a number of columns that are calculable
from other columns. These are left blank till all rows are added,
to be populated in single vector calls.
Columns to be populated are:
- Profit
- trade duration
- profit abs
:param result Dataframe
:return: result Dataframe
"""
# stake and fees
# stake = 0.015
# 0.05% is 0.0005
# fee = 0.001
# we set stake amount to an arbitrary amount.
# as it doesn't change the calculation.
# all returned values are relative. they are percentages.
stake = 0.015
fee = self.fee
open_fee = fee / 2
close_fee = fee / 2
result['trade_duration'] = result['close_time'] - result['open_time']
result['trade_duration'] = result['trade_duration'].map(
lambda x: int(x.total_seconds() / 60))
# Spends, Takes, Profit, Absolute Profit
# Buy Price
result['buy_vol'] = stake / result['open_rate'] # How many target are we buying
result['buy_fee'] = stake * open_fee
result['buy_spend'] = stake + result['buy_fee'] # How much we're spending
# Sell price
result['sell_sum'] = result['buy_vol'] * result['close_rate']
result['sell_fee'] = result['sell_sum'] * close_fee
result['sell_take'] = result['sell_sum'] - result['sell_fee']
# profit_percent
result['profit_percent'] = (result['sell_take'] - result['buy_spend']) / result['buy_spend']
# Absolute profit
result['profit_abs'] = result['sell_take'] - result['buy_spend']
return result
def _process_expectancy(self, results: DataFrame) -> Dict[str, Any]:
"""
This calculates WinRate, Required Risk Reward, Risk Reward and Expectancy of all pairs
The calulation will be done per pair and per strategy.
"""
# Removing pairs having less than min_trades_number
min_trades_number = self.edge_config.get('min_trade_number', 10)
results = results.groupby(['pair', 'stoploss']).filter(lambda x: len(x) > min_trades_number)
###################################
# Removing outliers (Only Pumps) from the dataset
# The method to detect outliers is to calculate standard deviation
# Then every value more than (standard deviation + 2*average) is out (pump)
#
# Removing Pumps
if self.edge_config.get('remove_pumps', False):
results = results.groupby(['pair', 'stoploss']).apply(
lambda x: x[x['profit_abs'] < 2 * x['profit_abs'].std() + x['profit_abs'].mean()])
##########################################################################
# Removing trades having a duration more than X minutes (set in config)
max_trade_duration = self.edge_config.get('max_trade_duration_minute', 1440)
results = results[results.trade_duration < max_trade_duration]
#######################################################################
if results.empty:
return {}
groupby_aggregator = {
'profit_abs': [
('nb_trades', 'count'), # number of all trades
('profit_sum', lambda x: x[x > 0].sum()), # cumulative profit of all winning trades
('loss_sum', lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades
('nb_win_trades', lambda x: x[x > 0].count()) # number of winning trades
],
'trade_duration': [('avg_trade_duration', 'mean')]
}
# Group by (pair and stoploss) by applying above aggregator
df = results.groupby(['pair', 'stoploss'])['profit_abs', 'trade_duration'].agg(
groupby_aggregator).reset_index(col_level=1)
# Dropping level 0 as we don't need it
df.columns = df.columns.droplevel(0)
# Calculating number of losing trades, average win and average loss
df['nb_loss_trades'] = df['nb_trades'] - df['nb_win_trades']
df['average_win'] = df['profit_sum'] / df['nb_win_trades']
df['average_loss'] = df['loss_sum'] / df['nb_loss_trades']
# Win rate = number of profitable trades / number of trades
df['winrate'] = df['nb_win_trades'] / df['nb_trades']
# risk_reward_ratio = average win / average loss
df['risk_reward_ratio'] = df['average_win'] / df['average_loss']
# required_risk_reward = (1 / winrate) - 1
df['required_risk_reward'] = (1 / df['winrate']) - 1
# expectancy = (risk_reward_ratio * winrate) - (lossrate)
df['expectancy'] = (df['risk_reward_ratio'] * df['winrate']) - (1 - df['winrate'])
# sort by expectancy and stoploss
df = df.sort_values(by=['expectancy', 'stoploss'], ascending=False).groupby(
'pair').first().sort_values(by=['expectancy'], ascending=False).reset_index()
final = {}
for x in df.itertuples():
final[x.pair] = PairInfo(
x.stoploss,
x.winrate,
x.risk_reward_ratio,
x.required_risk_reward,
x.expectancy,
x.nb_trades,
x.avg_trade_duration
)
# Returning a list of pairs in order of "expectancy"
return final
def _find_trades_for_stoploss_range(self, ticker_data, pair, stoploss_range):
buy_column = ticker_data['buy'].values
sell_column = ticker_data['sell'].values
date_column = ticker_data['date'].values
ohlc_columns = ticker_data[['open', 'high', 'low', 'close']].values
result: list = []
for stoploss in stoploss_range:
result += self._detect_next_stop_or_sell_point(
buy_column, sell_column, date_column, ohlc_columns, round(stoploss, 6), pair
)
return result
def _detect_next_stop_or_sell_point(self, buy_column, sell_column, date_column,
ohlc_columns, stoploss, pair):
"""
Iterate through ohlc_columns in order to find the next trade
Next trade opens from the first buy signal noticed to
The sell or stoploss signal after it.
It then cuts OHLC, buy_column, sell_column and date_column.
Cut from (the exit trade index) + 1.
Author: https://github.com/mishaker
"""
result: list = []
start_point = 0
while True:
open_trade_index = utf1st.find_1st(buy_column, 1, utf1st.cmp_equal)
# Return empty if we don't find trade entry (i.e. buy==1) or
# we find a buy but at the end of array
if open_trade_index == -1 or open_trade_index == len(buy_column) - 1:
break
else:
# When a buy signal is seen,
# trade opens in reality on the next candle
open_trade_index += 1
stop_price_percentage = stoploss + 1
open_price = ohlc_columns[open_trade_index, 0]
stop_price = (open_price * stop_price_percentage)
# Searching for the index where stoploss is hit
stop_index = utf1st.find_1st(
ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller)
# If we don't find it then we assume stop_index will be far in future (infinite number)
if stop_index == -1:
stop_index = float('inf')
# Searching for the index where sell is hit
sell_index = utf1st.find_1st(sell_column[open_trade_index:], 1, utf1st.cmp_equal)
# If we don't find it then we assume sell_index will be far in future (infinite number)
if sell_index == -1:
sell_index = float('inf')
# Check if we don't find any stop or sell point (in that case trade remains open)
# It is not interesting for Edge to consider it so we simply ignore the trade
# And stop iterating there is no more entry
if stop_index == sell_index == float('inf'):
break
if stop_index <= sell_index:
exit_index = open_trade_index + stop_index
exit_type = SellType.STOP_LOSS
exit_price = stop_price
elif stop_index > sell_index:
# If exit is SELL then we exit at the next candle
exit_index = open_trade_index + sell_index + 1
# Check if we have the next candle
if len(ohlc_columns) - 1 < exit_index:
break
exit_type = SellType.SELL_SIGNAL
exit_price = ohlc_columns[exit_index, 0]
trade = {'pair': pair,
'stoploss': stoploss,
'profit_percent': '',
'profit_abs': '',
'open_time': date_column[open_trade_index],
'close_time': date_column[exit_index],
'open_index': start_point + open_trade_index,
'close_index': start_point + exit_index,
'trade_duration': '',
'open_rate': round(open_price, 15),
'close_rate': round(exit_price, 15),
'exit_type': exit_type
}
result.append(trade)
# Giving a view of exit_index till the end of array
buy_column = buy_column[exit_index:]
sell_column = sell_column[exit_index:]
date_column = date_column[exit_index:]
ohlc_columns = ohlc_columns[exit_index:]
start_point += exit_index
return result
from .edge_positioning import Edge, PairInfo # noqa: F401

View File

@ -0,0 +1,454 @@
# pragma pylint: disable=W0603
""" Edge positioning package """
import logging
from typing import Any, Dict, List, NamedTuple
import arrow
import numpy as np
import utils_find_1st as utf1st
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
from freqtrade.exceptions import OperationalException
from freqtrade.data.history import get_timerange, load_data, refresh_data
from freqtrade.strategy.interface import SellType
logger = logging.getLogger(__name__)
class PairInfo(NamedTuple):
stoploss: float
winrate: float
risk_reward_ratio: float
required_risk_reward: float
expectancy: float
nb_trades: int
avg_trade_duration: float
class Edge:
"""
Calculates Win Rate, Risk Reward Ratio, Expectancy
against historical data for a give set of markets and a strategy
it then adjusts stoploss and position size accordingly
and force it into the strategy
Author: https://github.com/mishaker
"""
config: Dict = {}
_cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
def __init__(self, config: Dict[str, Any], exchange, strategy) -> None:
self.config = config
self.exchange = exchange
self.strategy = strategy
self.edge_config = self.config.get('edge', {})
self._cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
self._final_pairs: list = []
# checking max_open_trades. it should be -1 as with Edge
# the number of trades is determined by position size
if self.config['max_open_trades'] != float('inf'):
logger.critical('max_open_trades should be -1 in config !')
if self.config['stake_amount'] != UNLIMITED_STAKE_AMOUNT:
raise OperationalException('Edge works only with unlimited stake amount')
# Deprecated capital_available_percentage. Will use tradable_balance_ratio in the future.
self._capital_percentage: float = self.edge_config.get(
'capital_available_percentage', self.config['tradable_balance_ratio'])
self._allowed_risk: float = self.edge_config.get('allowed_risk')
self._since_number_of_days: int = self.edge_config.get('calculate_since_number_of_days', 14)
self._last_updated: int = 0 # Timestamp of pairs last updated time
self._refresh_pairs = True
self._stoploss_range_min = float(self.edge_config.get('stoploss_range_min', -0.01))
self._stoploss_range_max = float(self.edge_config.get('stoploss_range_max', -0.05))
self._stoploss_range_step = float(self.edge_config.get('stoploss_range_step', -0.001))
# calculating stoploss range
self._stoploss_range = np.arange(
self._stoploss_range_min,
self._stoploss_range_max,
self._stoploss_range_step
)
self._timerange: TimeRange = TimeRange.parse_timerange("%s-" % arrow.now().shift(
days=-1 * self._since_number_of_days).format('YYYYMMDD'))
if config.get('fee'):
self.fee = config['fee']
else:
self.fee = self.exchange.get_fee(symbol=self.config['exchange']['pair_whitelist'][0])
def calculate(self) -> bool:
pairs = self.config['exchange']['pair_whitelist']
heartbeat = self.edge_config.get('process_throttle_secs')
if (self._last_updated > 0) and (
self._last_updated + heartbeat > arrow.utcnow().timestamp):
return False
data: Dict[str, Any] = {}
logger.info('Using stake_currency: %s ...', self.config['stake_currency'])
logger.info('Using local backtesting data (using whitelist in given config) ...')
if self._refresh_pairs:
refresh_data(
datadir=self.config['datadir'],
pairs=pairs,
exchange=self.exchange,
timeframe=self.strategy.ticker_interval,
timerange=self._timerange,
)
data = load_data(
datadir=self.config['datadir'],
pairs=pairs,
timeframe=self.strategy.ticker_interval,
timerange=self._timerange,
startup_candles=self.strategy.startup_candle_count,
data_format=self.config.get('dataformat_ohlcv', 'json'),
)
if not data:
# Reinitializing cached pairs
self._cached_pairs = {}
logger.critical("No data found. Edge is stopped ...")
return False
preprocessed = self.strategy.ohlcvdata_to_dataframe(data)
# Print timeframe
min_date, max_date = get_timerange(preprocessed)
logger.info(
'Measuring data from %s up to %s (%s days) ...',
min_date.isoformat(),
max_date.isoformat(),
(max_date - min_date).days
)
headers = ['date', 'buy', 'open', 'close', 'sell', 'high', 'low']
trades: list = []
for pair, pair_data in preprocessed.items():
# Sorting dataframe by date and reset index
pair_data = pair_data.sort_values(by=['date'])
pair_data = pair_data.reset_index(drop=True)
df_analyzed = self.strategy.advise_sell(
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()
trades += self._find_trades_for_stoploss_range(df_analyzed, pair, self._stoploss_range)
# If no trade found then exit
if len(trades) == 0:
logger.info("No trades found.")
return False
# Fill missing, calculable columns, profit, duration , abs etc.
trades_df = self._fill_calculable_fields(DataFrame(trades))
self._cached_pairs = self._process_expectancy(trades_df)
self._last_updated = arrow.utcnow().timestamp
return True
def stake_amount(self, pair: str, free_capital: float,
total_capital: float, capital_in_trade: float) -> float:
stoploss = self.stoploss(pair)
available_capital = (total_capital + capital_in_trade) * self._capital_percentage
allowed_capital_at_risk = available_capital * self._allowed_risk
max_position_size = abs(allowed_capital_at_risk / stoploss)
position_size = min(max_position_size, free_capital)
if pair in self._cached_pairs:
logger.info(
'winrate: %s, expectancy: %s, position size: %s, pair: %s,'
' capital in trade: %s, free capital: %s, total capital: %s,'
' stoploss: %s, available capital: %s.',
self._cached_pairs[pair].winrate,
self._cached_pairs[pair].expectancy,
position_size, pair,
capital_in_trade, free_capital, total_capital,
stoploss, available_capital
)
return round(position_size, 15)
def stoploss(self, pair: str) -> float:
if pair in self._cached_pairs:
return self._cached_pairs[pair].stoploss
else:
logger.warning('tried to access stoploss of a non-existing pair, '
'strategy stoploss is returned instead.')
return self.strategy.stoploss
def adjust(self, pairs: List[str]) -> list:
"""
Filters out and sorts "pairs" according to Edge calculated pairs
"""
final = []
for pair, info in self._cached_pairs.items():
if info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and \
info.winrate > float(self.edge_config.get('minimum_winrate', 0.60)) and \
pair in pairs:
final.append(pair)
if self._final_pairs != final:
self._final_pairs = final
if self._final_pairs:
logger.info(
'Minimum expectancy and minimum winrate are met only for %s,'
' so other pairs are filtered out.',
self._final_pairs
)
else:
logger.info(
'Edge removed all pairs as no pair with minimum expectancy '
'and minimum winrate was found !'
)
return self._final_pairs
def accepted_pairs(self) -> list:
"""
return a list of accepted pairs along with their winrate, expectancy and stoploss
"""
final = []
for pair, info in self._cached_pairs.items():
if info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and \
info.winrate > float(self.edge_config.get('minimum_winrate', 0.60)):
final.append({
'Pair': pair,
'Winrate': info.winrate,
'Expectancy': info.expectancy,
'Stoploss': info.stoploss,
})
return final
def _fill_calculable_fields(self, result: DataFrame) -> DataFrame:
"""
The result frame contains a number of columns that are calculable
from other columns. These are left blank till all rows are added,
to be populated in single vector calls.
Columns to be populated are:
- Profit
- trade duration
- profit abs
:param result Dataframe
:return: result Dataframe
"""
# We set stake amount to an arbitrary amount, as it doesn't change the calculation.
# All returned values are relative, they are defined as ratios.
stake = 0.015
result['trade_duration'] = result['close_time'] - result['open_time']
result['trade_duration'] = result['trade_duration'].map(
lambda x: int(x.total_seconds() / 60))
# Spends, Takes, Profit, Absolute Profit
# Buy Price
result['buy_vol'] = stake / result['open_rate'] # How many target are we buying
result['buy_fee'] = stake * self.fee
result['buy_spend'] = stake + result['buy_fee'] # How much we're spending
# Sell price
result['sell_sum'] = result['buy_vol'] * result['close_rate']
result['sell_fee'] = result['sell_sum'] * self.fee
result['sell_take'] = result['sell_sum'] - result['sell_fee']
# profit_ratio
result['profit_ratio'] = (result['sell_take'] - result['buy_spend']) / result['buy_spend']
# Absolute profit
result['profit_abs'] = result['sell_take'] - result['buy_spend']
return result
def _process_expectancy(self, results: DataFrame) -> Dict[str, Any]:
"""
This calculates WinRate, Required Risk Reward, Risk Reward and Expectancy of all pairs
The calulation will be done per pair and per strategy.
"""
# Removing pairs having less than min_trades_number
min_trades_number = self.edge_config.get('min_trade_number', 10)
results = results.groupby(['pair', 'stoploss']).filter(lambda x: len(x) > min_trades_number)
###################################
# Removing outliers (Only Pumps) from the dataset
# The method to detect outliers is to calculate standard deviation
# Then every value more than (standard deviation + 2*average) is out (pump)
#
# Removing Pumps
if self.edge_config.get('remove_pumps', False):
results = results.groupby(['pair', 'stoploss']).apply(
lambda x: x[x['profit_abs'] < 2 * x['profit_abs'].std() + x['profit_abs'].mean()])
##########################################################################
# Removing trades having a duration more than X minutes (set in config)
max_trade_duration = self.edge_config.get('max_trade_duration_minute', 1440)
results = results[results.trade_duration < max_trade_duration]
#######################################################################
if results.empty:
return {}
groupby_aggregator = {
'profit_abs': [
('nb_trades', 'count'), # number of all trades
('profit_sum', lambda x: x[x > 0].sum()), # cumulative profit of all winning trades
('loss_sum', lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades
('nb_win_trades', lambda x: x[x > 0].count()) # number of winning trades
],
'trade_duration': [('avg_trade_duration', 'mean')]
}
# Group by (pair and stoploss) by applying above aggregator
df = results.groupby(['pair', 'stoploss'])[['profit_abs', 'trade_duration']].agg(
groupby_aggregator).reset_index(col_level=1)
# Dropping level 0 as we don't need it
df.columns = df.columns.droplevel(0)
# Calculating number of losing trades, average win and average loss
df['nb_loss_trades'] = df['nb_trades'] - df['nb_win_trades']
df['average_win'] = df['profit_sum'] / df['nb_win_trades']
df['average_loss'] = df['loss_sum'] / df['nb_loss_trades']
# Win rate = number of profitable trades / number of trades
df['winrate'] = df['nb_win_trades'] / df['nb_trades']
# risk_reward_ratio = average win / average loss
df['risk_reward_ratio'] = df['average_win'] / df['average_loss']
# required_risk_reward = (1 / winrate) - 1
df['required_risk_reward'] = (1 / df['winrate']) - 1
# expectancy = (risk_reward_ratio * winrate) - (lossrate)
df['expectancy'] = (df['risk_reward_ratio'] * df['winrate']) - (1 - df['winrate'])
# sort by expectancy and stoploss
df = df.sort_values(by=['expectancy', 'stoploss'], ascending=False).groupby(
'pair').first().sort_values(by=['expectancy'], ascending=False).reset_index()
final = {}
for x in df.itertuples():
final[x.pair] = PairInfo(
x.stoploss,
x.winrate,
x.risk_reward_ratio,
x.required_risk_reward,
x.expectancy,
x.nb_trades,
x.avg_trade_duration
)
# Returning a list of pairs in order of "expectancy"
return final
def _find_trades_for_stoploss_range(self, df, pair, stoploss_range):
buy_column = df['buy'].values
sell_column = df['sell'].values
date_column = df['date'].values
ohlc_columns = df[['open', 'high', 'low', 'close']].values
result: list = []
for stoploss in stoploss_range:
result += self._detect_next_stop_or_sell_point(
buy_column, sell_column, date_column, ohlc_columns, round(stoploss, 6), pair
)
return result
def _detect_next_stop_or_sell_point(self, buy_column, sell_column, date_column,
ohlc_columns, stoploss, pair):
"""
Iterate through ohlc_columns in order to find the next trade
Next trade opens from the first buy signal noticed to
The sell or stoploss signal after it.
It then cuts OHLC, buy_column, sell_column and date_column.
Cut from (the exit trade index) + 1.
Author: https://github.com/mishaker
"""
result: list = []
start_point = 0
while True:
open_trade_index = utf1st.find_1st(buy_column, 1, utf1st.cmp_equal)
# Return empty if we don't find trade entry (i.e. buy==1) or
# we find a buy but at the end of array
if open_trade_index == -1 or open_trade_index == len(buy_column) - 1:
break
else:
# When a buy signal is seen,
# trade opens in reality on the next candle
open_trade_index += 1
open_price = ohlc_columns[open_trade_index, 0]
stop_price = (open_price * (stoploss + 1))
# Searching for the index where stoploss is hit
stop_index = utf1st.find_1st(
ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller)
# If we don't find it then we assume stop_index will be far in future (infinite number)
if stop_index == -1:
stop_index = float('inf')
# Searching for the index where sell is hit
sell_index = utf1st.find_1st(sell_column[open_trade_index:], 1, utf1st.cmp_equal)
# If we don't find it then we assume sell_index will be far in future (infinite number)
if sell_index == -1:
sell_index = float('inf')
# Check if we don't find any stop or sell point (in that case trade remains open)
# It is not interesting for Edge to consider it so we simply ignore the trade
# And stop iterating there is no more entry
if stop_index == sell_index == float('inf'):
break
if stop_index <= sell_index:
exit_index = open_trade_index + stop_index
exit_type = SellType.STOP_LOSS
exit_price = stop_price
elif stop_index > sell_index:
# If exit is SELL then we exit at the next candle
exit_index = open_trade_index + sell_index + 1
# Check if we have the next candle
if len(ohlc_columns) - 1 < exit_index:
break
exit_type = SellType.SELL_SIGNAL
exit_price = ohlc_columns[exit_index, 0]
trade = {'pair': pair,
'stoploss': stoploss,
'profit_ratio': '',
'profit_abs': '',
'open_time': date_column[open_trade_index],
'close_time': date_column[exit_index],
'open_index': start_point + open_trade_index,
'close_index': start_point + exit_index,
'trade_duration': '',
'open_rate': round(open_price, 15),
'close_rate': round(exit_price, 15),
'exit_type': exit_type
}
result.append(trade)
# Giving a view of exit_index till the end of array
buy_column = buy_column[exit_index:]
sell_column = sell_column[exit_index:]
date_column = date_column[exit_index:]
ohlc_columns = ohlc_columns[exit_index:]
start_point += exit_index
return result

44
freqtrade/exceptions.py Normal file
View File

@ -0,0 +1,44 @@
class FreqtradeException(Exception):
"""
Freqtrade base exception. Handled at the outermost level.
All other exception types are subclasses of this exception type.
"""
class OperationalException(FreqtradeException):
"""
Requires manual intervention and will stop the bot.
Most of the time, this is caused by an invalid Configuration.
"""
class DependencyException(FreqtradeException):
"""
Indicates that an assumed dependency is not met.
This could happen when there is currently not enough money on the account.
"""
class InvalidOrderException(FreqtradeException):
"""
This is returned when the order is not valid. Example:
If stoploss on exchange order is hit, then trying to cancel the order
should return this exception.
"""
class TemporaryError(FreqtradeException):
"""
Temporary network or exchange related error.
This could happen when an exchange is congested, unavailable, or the user
has networking problems. Usually resolves itself after a time.
"""
class StrategyError(FreqtradeException):
"""
Errors with custom user-code deteced.
Usually caused by errors in the strategy.
"""

View File

@ -1,18 +1,20 @@
from freqtrade.exchange.common import MAP_EXCHANGE_CHILDCLASS # noqa: F401
from freqtrade.exchange.exchange import Exchange # noqa: F401
from freqtrade.exchange.exchange import (get_exchange_bad_reason, # noqa: F401
# flake8: noqa: F401
from freqtrade.exchange.common import MAP_EXCHANGE_CHILDCLASS
from freqtrade.exchange.exchange import Exchange
from freqtrade.exchange.exchange import (get_exchange_bad_reason,
is_exchange_bad,
is_exchange_known_ccxt,
is_exchange_officially_supported,
ccxt_exchanges,
available_exchanges)
from freqtrade.exchange.exchange import (timeframe_to_seconds, # noqa: F401
from freqtrade.exchange.exchange import (timeframe_to_seconds,
timeframe_to_minutes,
timeframe_to_msecs,
timeframe_to_next_date,
timeframe_to_prev_date)
from freqtrade.exchange.exchange import (market_is_active, # noqa: F401
from freqtrade.exchange.exchange import (market_is_active,
symbol_is_pair)
from freqtrade.exchange.kraken import Kraken # noqa: F401
from freqtrade.exchange.binance import Binance # noqa: F401
from freqtrade.exchange.bibox import Bibox # noqa: F401
from freqtrade.exchange.kraken import Kraken
from freqtrade.exchange.binance import Binance
from freqtrade.exchange.bibox import Bibox
from freqtrade.exchange.ftx import Ftx

View File

@ -4,8 +4,8 @@ from typing import Dict
import ccxt
from freqtrade import (DependencyException, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.exceptions import (DependencyException, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.exchange import Exchange
logger = logging.getLogger(__name__)
@ -32,16 +32,26 @@ class Binance(Exchange):
return super().get_order_book(pair, limit)
def stoploss_limit(self, pair: str, amount: float, stop_price: float, rate: float) -> Dict:
def stoploss_adjust(self, stop_loss: float, order: Dict) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
"""
return order['type'] == 'stop_loss_limit' and stop_loss > float(order['info']['stopPrice'])
def stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict) -> Dict:
"""
creates a stoploss limit order.
this stoploss-limit is binance-specific.
It may work with a limited number of other exchanges, but this has not been tested yet.
"""
# Limit price threshold: As limit price should always be below stop-price
limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99)
rate = stop_price * limit_price_pct
ordertype = "stop_loss_limit"
stop_price = self.symbol_price_prec(pair, stop_price)
stop_price = self.price_to_precision(pair, stop_price)
# Ensure rate is less than stop price
if stop_price <= rate:
@ -57,12 +67,12 @@ class Binance(Exchange):
params = self._params.copy()
params.update({'stopPrice': stop_price})
amount = self.symbol_amount_prec(pair, amount)
amount = self.amount_to_precision(pair, amount)
rate = self.symbol_price_prec(pair, rate)
rate = self.price_to_precision(pair, rate)
order = self._api.create_order(pair, ordertype, 'sell',
amount, rate, params)
order = self._api.create_order(symbol=pair, type=ordertype, side='sell',
amount=amount, price=rate, params=params)
logger.info('stoploss limit order added for %s. '
'stop price: %s. limit: %s', pair, stop_price, rate)
return order

View File

@ -1,6 +1,6 @@
import logging
from freqtrade import DependencyException, TemporaryError
from freqtrade.exceptions import DependencyException, TemporaryError
logger = logging.getLogger(__name__)

View File

@ -7,21 +7,25 @@ import inspect
import logging
from copy import deepcopy
from datetime import datetime, timezone
from math import ceil, floor
from math import ceil
from random import randint
from typing import Any, Dict, List, Optional, Tuple
import arrow
import ccxt
import ccxt.async_support as ccxt_async
from ccxt.base.decimal_to_precision import ROUND_DOWN, ROUND_UP
from ccxt.base.decimal_to_precision import (ROUND_DOWN, ROUND_UP, TICK_SIZE,
TRUNCATE, decimal_to_precision)
from pandas import DataFrame
from freqtrade import (DependencyException, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.data.converter import parse_ticker_dataframe
from freqtrade.data.converter import ohlcv_to_dataframe, trades_dict_to_list
from freqtrade.exceptions import (DependencyException, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.exchange.common import BAD_EXCHANGES, retrier, retrier_async
from freqtrade.misc import deep_merge_dicts
from freqtrade.misc import deep_merge_dicts, safe_value_fallback
CcxtModuleType = Any
logger = logging.getLogger(__name__)
@ -50,7 +54,7 @@ class Exchange:
}
_ft_has: Dict = {}
def __init__(self, config: dict, validate: bool = True) -> None:
def __init__(self, config: Dict[str, Any], validate: bool = True) -> None:
"""
Initializes this module with the given config,
it does basic validation whether the specified exchange and pairs are valid.
@ -61,8 +65,6 @@ class Exchange:
self._config.update(config)
self._cached_ticker: Dict[str, Any] = {}
# Holds last candle refreshed time of each pair
self._pairs_last_refresh_time: Dict[Tuple[str, str], int] = {}
# Timestamp of last markets refresh
@ -116,6 +118,7 @@ class Exchange:
self._load_markets()
# Check if all pairs are available
self.validate_stakecurrency(config['stake_currency'])
self.validate_pairs(config['exchange']['pair_whitelist'])
self.validate_ordertypes(config.get('order_types', {}))
self.validate_order_time_in_force(config.get('order_time_in_force', {}))
@ -133,7 +136,7 @@ class Exchange:
if self._api_async and inspect.iscoroutinefunction(self._api_async.close):
asyncio.get_event_loop().run_until_complete(self._api_async.close())
def _init_ccxt(self, exchange_config: dict, ccxt_module=ccxt,
def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt,
ccxt_kwargs: dict = None) -> ccxt.Exchange:
"""
Initialize ccxt with given config and return valid
@ -188,6 +191,11 @@ class Exchange:
self._load_markets()
return self._api.markets
@property
def precisionMode(self) -> str:
"""exchange ccxt precisionMode"""
return self._api.precisionMode
def get_markets(self, base_currencies: List[str] = None, quote_currencies: List[str] = None,
pairs_only: bool = False, active_only: bool = False) -> Dict:
"""
@ -210,13 +218,32 @@ class Exchange:
markets = {k: v for k, v in markets.items() if market_is_active(v)}
return markets
def klines(self, pair_interval: Tuple[str, str], copy=True) -> DataFrame:
def get_quote_currencies(self) -> List[str]:
"""
Return a list of supported quote currencies
"""
markets = self.markets
return sorted(set([x['quote'] for _, x in markets.items()]))
def get_pair_quote_currency(self, pair: str) -> str:
"""
Return a pair's quote currency
"""
return self.markets.get(pair, {}).get('quote', '')
def get_pair_base_currency(self, pair: str) -> str:
"""
Return a pair's quote currency
"""
return self.markets.get(pair, {}).get('base', '')
def klines(self, pair_interval: Tuple[str, str], copy: bool = True) -> DataFrame:
if pair_interval in self._klines:
return self._klines[pair_interval].copy() if copy else self._klines[pair_interval]
else:
return DataFrame()
def set_sandbox(self, api, exchange_config: dict, name: str):
def set_sandbox(self, api: ccxt.Exchange, exchange_config: dict, name: str) -> None:
if exchange_config.get('sandbox'):
if api.urls.get('test'):
api.urls['api'] = api.urls['test']
@ -226,7 +253,7 @@ class Exchange:
"Please check your config.json")
raise OperationalException(f'Exchange {name} does not provide a sandbox api')
def _load_async_markets(self, reload=False) -> None:
def _load_async_markets(self, reload: bool = False) -> None:
try:
if self._api_async:
asyncio.get_event_loop().run_until_complete(
@ -259,18 +286,30 @@ class Exchange:
except ccxt.BaseError:
logger.exception("Could not reload markets.")
def validate_stakecurrency(self, stake_currency: str) -> None:
"""
Checks stake-currency against available currencies on the exchange.
:param stake_currency: Stake-currency to validate
:raise: OperationalException if stake-currency is not available.
"""
quote_currencies = self.get_quote_currencies()
if stake_currency not in quote_currencies:
raise OperationalException(
f"{stake_currency} is not available as stake on {self.name}. "
f"Available currencies are: {', '.join(quote_currencies)}")
def validate_pairs(self, pairs: List[str]) -> None:
"""
Checks if all given pairs are tradable on the current exchange.
Raises OperationalException if one pair is not available.
:param pairs: list of pairs
:raise: OperationalException if one pair is not available
:return: None
"""
if not self.markets:
logger.warning('Unable to validate pairs (assuming they are correct).')
return
invalid_pairs = []
for pair in pairs:
# Note: ccxt has BaseCurrency/QuoteCurrency format for pairs
# TODO: add a support for having coins in BTC/USDT format
@ -278,14 +317,29 @@ class Exchange:
raise OperationalException(
f'Pair {pair} is not available on {self.name}. '
f'Please remove {pair} from your whitelist.')
elif self.markets[pair].get('info', {}).get('IsRestricted', False):
# From ccxt Documentation:
# markets.info: An associative array of non-common market properties,
# including fees, rates, limits and other general market information.
# The internal info array is different for each particular market,
# its contents depend on the exchange.
# It can also be a string or similar ... so we need to verify that first.
elif (isinstance(self.markets[pair].get('info', None), dict)
and self.markets[pair].get('info', {}).get('IsRestricted', False)):
# Warn users about restricted pairs in whitelist.
# We cannot determine reliably if Users are affected.
logger.warning(f"Pair {pair} is restricted for some users on this exchange."
f"Please check if you are impacted by this restriction "
f"on the exchange and eventually remove {pair} from your whitelist.")
if (self._config['stake_currency'] and
self.get_pair_quote_currency(pair) != self._config['stake_currency']):
invalid_pairs.append(pair)
if invalid_pairs:
raise OperationalException(
f"Stake-currency '{self._config['stake_currency']}' not compatible with "
f"pair-whitelist. Please remove the following pairs: {invalid_pairs}")
def get_valid_pair_combination(self, curr_1, curr_2) -> str:
def get_valid_pair_combination(self, curr_1: str, curr_2: str) -> str:
"""
Get valid pair combination of curr_1 and curr_2 by trying both combinations.
"""
@ -296,7 +350,7 @@ class Exchange:
def validate_timeframes(self, timeframe: Optional[str]) -> None:
"""
Checks if ticker interval from config is a supported timeframe on the exchange
Check if timeframe from config is a supported timeframe on the exchange
"""
if not hasattr(self._api, "timeframes") or self._api.timeframes is None:
# If timeframes attribute is missing (or is None), the exchange probably
@ -309,7 +363,10 @@ class Exchange:
if timeframe and (timeframe not in self.timeframes):
raise OperationalException(
f"Invalid ticker interval '{timeframe}'. This exchange supports: {self.timeframes}")
f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}")
if timeframe and timeframe_to_minutes(timeframe) < 1:
raise OperationalException("Timeframes < 1m are currently not supported by Freqtrade.")
def validate_ordertypes(self, order_types: Dict) -> None:
"""
@ -335,7 +392,7 @@ class Exchange:
raise OperationalException(
f'Time in force policies are not supported for {self.name} yet.')
def validate_required_startup_candles(self, startup_candles) -> None:
def validate_required_startup_candles(self, startup_candles: int) -> None:
"""
Checks if required startup_candles is more than ohlcv_candle_limit.
Requires a grace-period of 5 candles - so a startup-period up to 494 is allowed by default.
@ -354,58 +411,91 @@ class Exchange:
"""
return endpoint in self._api.has and self._api.has[endpoint]
def symbol_amount_prec(self, pair, amount: float):
def amount_to_precision(self, pair: str, amount: float) -> float:
'''
Returns the amount to buy or sell to a precision the Exchange accepts
Rounded down
Reimplementation of ccxt internal methods - ensuring we can test the result is correct
based on our definitions.
'''
if self.markets[pair]['precision']['amount']:
symbol_prec = self.markets[pair]['precision']['amount']
big_amount = amount * pow(10, symbol_prec)
amount = floor(big_amount) / pow(10, symbol_prec)
amount = float(decimal_to_precision(amount, rounding_mode=TRUNCATE,
precision=self.markets[pair]['precision']['amount'],
counting_mode=self.precisionMode,
))
return amount
def symbol_price_prec(self, pair, price: float):
def price_to_precision(self, pair: str, price: float) -> float:
'''
Returns the price buying or selling with to the precision the Exchange accepts
Returns the price rounded up to the precision the Exchange accepts.
Partial Reimplementation of ccxt internal method decimal_to_precision(),
which does not support rounding up
TODO: If ccxt supports ROUND_UP for decimal_to_precision(), we could remove this and
align with amount_to_precision().
Rounds up
'''
if self.markets[pair]['precision']['price']:
symbol_prec = self.markets[pair]['precision']['price']
big_price = price * pow(10, symbol_prec)
price = ceil(big_price) / pow(10, symbol_prec)
# price = float(decimal_to_precision(price, rounding_mode=ROUND,
# precision=self.markets[pair]['precision']['price'],
# counting_mode=self.precisionMode,
# ))
if self.precisionMode == TICK_SIZE:
precision = self.markets[pair]['precision']['price']
missing = price % precision
if missing != 0:
price = price - missing + precision
else:
symbol_prec = self.markets[pair]['precision']['price']
big_price = price * pow(10, symbol_prec)
price = ceil(big_price) / pow(10, symbol_prec)
return price
def price_get_one_pip(self, pair: str, price: float) -> float:
"""
Get's the "1 pip" value for this pair.
Used in PriceFilter to calculate the 1pip movements.
"""
precision = self.markets[pair]['precision']['price']
if self.precisionMode == TICK_SIZE:
return precision
else:
return 1 / pow(10, precision)
def dry_run_order(self, pair: str, ordertype: str, side: str, amount: float,
rate: float, params: Dict = {}) -> Dict[str, Any]:
order_id = f'dry_run_{side}_{randint(0, 10**6)}'
_amount = self.symbol_amount_prec(pair, amount)
_amount = self.amount_to_precision(pair, amount)
dry_order = {
"id": order_id,
'pair': pair,
'price': rate,
'amount': _amount,
"cost": _amount * rate,
'cost': _amount * rate,
'type': ordertype,
'side': side,
'remaining': _amount,
'datetime': arrow.utcnow().isoformat(),
'status': "closed" if ordertype == "market" else "open",
'fee': None,
"info": {}
'info': {}
}
self._store_dry_order(dry_order)
self._store_dry_order(dry_order, pair)
# Copy order and close it - so the returned order is open unless it's a market order
return dry_order
def _store_dry_order(self, dry_order: Dict) -> None:
def _store_dry_order(self, dry_order: Dict, pair: str) -> None:
closed_order = dry_order.copy()
if closed_order["type"] in ["market", "limit"]:
if closed_order['type'] in ["market", "limit"]:
closed_order.update({
"status": "closed",
"filled": closed_order["amount"],
"remaining": 0
})
'status': 'closed',
'filled': closed_order['amount'],
'remaining': 0,
'fee': {
'currency': self.get_pair_quote_currency(pair),
'cost': dry_order['cost'] * self.get_fee(pair),
'rate': self.get_fee(pair)
}
})
if closed_order["type"] in ["stop_loss_limit"]:
closed_order["info"].update({"stopPrice": closed_order["price"]})
self._dry_run_open_orders[closed_order["id"]] = closed_order
@ -414,13 +504,13 @@ class Exchange:
rate: float, params: Dict = {}) -> Dict:
try:
# Set the precision for amount and price(rate) as accepted by the exchange
amount = self.symbol_amount_prec(pair, amount)
amount = self.amount_to_precision(pair, amount)
needs_price = (ordertype != 'market'
or self._api.options.get("createMarketBuyOrderRequiresPrice", False))
rate = self.symbol_price_prec(pair, rate) if needs_price else None
rate_for_order = self.price_to_precision(pair, rate) if needs_price else None
return self._api.create_order(pair, ordertype, side,
amount, rate, params)
amount, rate_for_order, params)
except ccxt.InsufficientFunds as e:
raise DependencyException(
@ -439,7 +529,7 @@ class Exchange:
raise OperationalException(e) from e
def buy(self, pair: str, ordertype: str, amount: float,
rate: float, time_in_force) -> Dict:
rate: float, time_in_force: str) -> Dict:
if self._config['dry_run']:
dry_order = self.dry_run_order(pair, ordertype, "buy", amount, rate)
@ -452,7 +542,7 @@ class Exchange:
return self.create_order(pair, ordertype, 'buy', amount, rate, params)
def sell(self, pair: str, ordertype: str, amount: float,
rate: float, time_in_force='gtc') -> Dict:
rate: float, time_in_force: str = 'gtc') -> Dict:
if self._config['dry_run']:
dry_order = self.dry_run_order(pair, ordertype, "sell", amount, rate)
@ -464,9 +554,17 @@ class Exchange:
return self.create_order(pair, ordertype, 'sell', amount, rate, params)
def stoploss_limit(self, pair: str, amount: float, stop_price: float, rate: float) -> Dict:
def stoploss_adjust(self, stop_loss: float, order: Dict) -> bool:
"""
creates a stoploss limit order.
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
"""
raise OperationalException(f"stoploss is not implemented for {self.name}.")
def stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict) -> Dict:
"""
creates a stoploss order.
The precise ordertype is determined by the order_types dict or exchange default.
Since ccxt does not unify stoploss-limit orders yet, this needs to be implemented in each
exchange's subclass.
The exception below should never raise, since we disallow
@ -474,7 +572,7 @@ class Exchange:
Note: Changes to this interface need to be applied to all sub-classes too.
"""
raise OperationalException(f"stoploss_limit is not implemented for {self.name}.")
raise OperationalException(f"stoploss is not implemented for {self.name}.")
@retrier
def get_balance(self, currency: str) -> float:
@ -515,7 +613,7 @@ class Exchange:
return self._api.fetch_tickers()
except ccxt.NotSupported as e:
raise OperationalException(
f'Exchange {self._api.name} does not support fetching tickers in batch.'
f'Exchange {self._api.name} does not support fetching tickers in batch. '
f'Message: {e}') from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
@ -524,39 +622,28 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def get_ticker(self, pair: str, refresh: Optional[bool] = True) -> dict:
if refresh or pair not in self._cached_ticker.keys():
try:
if pair not in self._api.markets or not self._api.markets[pair].get('active'):
raise DependencyException(f"Pair {pair} not available")
data = self._api.fetch_ticker(pair)
try:
self._cached_ticker[pair] = {
'bid': float(data['bid']),
'ask': float(data['ask']),
}
except KeyError:
logger.debug("Could not cache ticker data for %s", pair)
return data
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
else:
logger.info("returning cached ticker-data for %s", pair)
return self._cached_ticker[pair]
def fetch_ticker(self, pair: str) -> dict:
try:
if pair not in self._api.markets or not self._api.markets[pair].get('active'):
raise DependencyException(f"Pair {pair} not available")
data = self._api.fetch_ticker(pair)
return data
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def get_historic_ohlcv(self, pair: str, timeframe: str,
since_ms: int) -> List:
"""
Gets candle history using asyncio and returns the list of candles.
Handles all async doing.
Async over one pair, assuming we get `_ohlcv_candle_limit` candles per call.
Get candle history using asyncio and returns the list of candles.
Handles all async work for this.
Async over one pair, assuming we get `self._ohlcv_candle_limit` candles per call.
:param pair: Pair to download
:param timeframe: Ticker Timeframe to get
:param timeframe: Timeframe to get data for
:param since_ms: Timestamp in milliseconds to get history from
:returns List of tickers
:returns List with candle (OHLCV) data
"""
return asyncio.get_event_loop().run_until_complete(
self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe,
@ -576,26 +663,27 @@ class Exchange:
pair, timeframe, since) for since in
range(since_ms, arrow.utcnow().timestamp * 1000, one_call)]
tickers = await asyncio.gather(*input_coroutines, return_exceptions=True)
results = await asyncio.gather(*input_coroutines, return_exceptions=True)
# Combine tickers
# Combine gathered results
data: List = []
for p, timeframe, ticker in tickers:
for p, timeframe, res in results:
if p == pair:
data.extend(ticker)
data.extend(res)
# Sort data again after extending the result - above calls return in "async order"
data = sorted(data, key=lambda x: x[0])
logger.info("downloaded %s with length %s.", pair, len(data))
logger.info("Downloaded data for %s with length %s.", pair, len(data))
return data
def refresh_latest_ohlcv(self, pair_list: List[Tuple[str, str]]) -> List[Tuple[str, List]]:
"""
Refresh in-memory ohlcv asynchronously and set `_klines` with the result
Refresh in-memory OHLCV asynchronously and set `_klines` with the result
Loops asynchronously over pair_list and downloads all pairs async (semi-parallel).
Only used in the dataprovider.refresh() method.
:param pair_list: List of 2 element tuples containing pair, interval to refresh
:return: Returns a List of ticker-dataframes.
:return: TODO: return value is only used in the tests, get rid of it
"""
logger.debug("Refreshing ohlcv data for %d pairs", len(pair_list))
logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list))
input_coroutines = []
@ -606,15 +694,15 @@ class Exchange:
input_coroutines.append(self._async_get_candle_history(pair, timeframe))
else:
logger.debug(
"Using cached ohlcv data for pair %s, timeframe %s ...",
"Using cached candle (OHLCV) data for pair %s, timeframe %s ...",
pair, timeframe
)
tickers = asyncio.get_event_loop().run_until_complete(
results = asyncio.get_event_loop().run_until_complete(
asyncio.gather(*input_coroutines, return_exceptions=True))
# handle caching
for res in tickers:
for res in results:
if isinstance(res, Exception):
logger.warning("Async code raised an exception: %s", res.__class__.__name__)
continue
@ -625,13 +713,14 @@ class Exchange:
if ticks:
self._pairs_last_refresh_time[(pair, timeframe)] = ticks[-1][0] // 1000
# keeping parsed dataframe in cache
self._klines[(pair, timeframe)] = parse_ticker_dataframe(
self._klines[(pair, timeframe)] = ohlcv_to_dataframe(
ticks, timeframe, pair=pair, fill_missing=True,
drop_incomplete=self._ohlcv_partial_candle)
return tickers
return results
def _now_is_time_to_refresh(self, pair: str, timeframe: str) -> bool:
# Calculating ticker interval in seconds
# Timeframe in seconds
interval_in_sec = timeframe_to_seconds(timeframe)
return not ((self._pairs_last_refresh_time.get((pair, timeframe), 0)
@ -641,11 +730,11 @@ class Exchange:
async def _async_get_candle_history(self, pair: str, timeframe: str,
since_ms: Optional[int] = None) -> Tuple[str, str, List]:
"""
Asynchronously gets candle histories using fetch_ohlcv
Asynchronously get candle history data using fetch_ohlcv
returns tuple: (pair, timeframe, ohlcv_list)
"""
try:
# fetch ohlcv asynchronously
# Fetch OHLCV asynchronously
s = '(' + arrow.get(since_ms // 1000).isoformat() + ') ' if since_ms is not None else ''
logger.debug(
"Fetching pair %s, interval %s, since %s %s...",
@ -655,9 +744,9 @@ class Exchange:
data = await self._api_async.fetch_ohlcv(pair, timeframe=timeframe,
since=since_ms)
# Because some exchange sort Tickers ASC and other DESC.
# Ex: Bittrex returns a list of tickers ASC (oldest first, newest last)
# when GDAX returns a list of tickers DESC (newest first, oldest last)
# Some exchanges sort OHLCV in ASC order and others in DESC.
# Ex: Bittrex returns the list of OHLCV in ASC order (oldest first, newest last)
# while GDAX returns the list of OHLCV in DESC order (newest first, oldest last)
# Only sort if necessary to save computing time
try:
if data and data[0][0] > data[-1][0]:
@ -670,18 +759,20 @@ class Exchange:
except ccxt.NotSupported as e:
raise OperationalException(
f'Exchange {self._api.name} does not support fetching historical candlestick data.'
f'Message: {e}') from e
f'Exchange {self._api.name} does not support fetching historical '
f'candle (OHLCV) data. Message: {e}') from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(f'Could not load ticker history due to {e.__class__.__name__}. '
raise TemporaryError(f'Could not fetch historical candle (OHLCV) data '
f'for pair {pair} due to {e.__class__.__name__}. '
f'Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(f'Could not fetch ticker data. Msg: {e}') from e
raise OperationalException(f'Could not fetch historical candle (OHLCV) data '
f'for pair {pair}. Message: {e}') from e
@retrier_async
async def _async_fetch_trades(self, pair: str,
since: Optional[int] = None,
params: Optional[dict] = None) -> List[Dict]:
params: Optional[dict] = None) -> List[List]:
"""
Asyncronously gets trade history using fetch_trades.
Handles exchange errors, does one call to the exchange.
@ -701,7 +792,7 @@ class Exchange:
'(' + arrow.get(since // 1000).isoformat() + ') ' if since is not None else ''
)
trades = await self._api_async.fetch_trades(pair, since=since, limit=1000)
return trades
return trades_dict_to_list(trades)
except ccxt.NotSupported as e:
raise OperationalException(
f'Exchange {self._api.name} does not support fetching historical trade data.'
@ -715,7 +806,7 @@ class Exchange:
async def _async_get_trade_history_id(self, pair: str,
until: int,
since: Optional[int] = None,
from_id: Optional[str] = None) -> Tuple[str, List[Dict]]:
from_id: Optional[str] = None) -> Tuple[str, List[List]]:
"""
Asyncronously gets trade history using fetch_trades
use this when exchange uses id-based iteration (check `self._trades_pagination`)
@ -726,7 +817,7 @@ class Exchange:
returns tuple: (pair, trades-list)
"""
trades: List[Dict] = []
trades: List[List] = []
if not from_id:
# Fetch first elements using timebased method to get an ID to paginate on
@ -735,7 +826,9 @@ class Exchange:
# e.g. Binance returns the "last 1000" candles within a 1h time interval
# - so we will miss the first trades.
t = await self._async_fetch_trades(pair, since=since)
from_id = t[-1]['id']
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
from_id = t[-1][1]
trades.extend(t[:-1])
while True:
t = await self._async_fetch_trades(pair,
@ -743,21 +836,21 @@ class Exchange:
if len(t):
# Skip last id since its the key for the next call
trades.extend(t[:-1])
if from_id == t[-1]['id'] or t[-1]['timestamp'] > until:
if from_id == t[-1][1] or t[-1][0] > until:
logger.debug(f"Stopping because from_id did not change. "
f"Reached {t[-1]['timestamp']} > {until}")
f"Reached {t[-1][0]} > {until}")
# Reached the end of the defined-download period - add last trade as well.
trades.extend(t[-1:])
break
from_id = t[-1]['id']
from_id = t[-1][1]
else:
break
return (pair, trades)
async def _async_get_trade_history_time(self, pair: str, until: int,
since: Optional[int] = None) -> Tuple[str, List]:
since: Optional[int] = None) -> Tuple[str, List[List]]:
"""
Asyncronously gets trade history using fetch_trades,
when the exchange uses time-based iteration (check `self._trades_pagination`)
@ -767,16 +860,18 @@ class Exchange:
returns tuple: (pair, trades-list)
"""
trades: List[Dict] = []
trades: List[List] = []
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
while True:
t = await self._async_fetch_trades(pair, since=since)
if len(t):
since = t[-1]['timestamp']
since = t[-1][1]
trades.extend(t)
# Reached the end of the defined-download period
if until and t[-1]['timestamp'] > until:
if until and t[-1][0] > until:
logger.debug(
f"Stopping because until was reached. {t[-1]['timestamp']} > {until}")
f"Stopping because until was reached. {t[-1][0]} > {until}")
break
else:
break
@ -786,7 +881,7 @@ class Exchange:
async def _async_get_trade_history(self, pair: str,
since: Optional[int] = None,
until: Optional[int] = None,
from_id: Optional[str] = None) -> Tuple[str, List[Dict]]:
from_id: Optional[str] = None) -> Tuple[str, List[List]]:
"""
Async wrapper handling downloading trades using either time or id based methods.
"""
@ -809,14 +904,14 @@ class Exchange:
until: Optional[int] = None,
from_id: Optional[str] = None) -> Tuple[str, List]:
"""
Gets candle history using asyncio and returns the list of candles.
Handles all async doing.
Async over one pair, assuming we get `_ohlcv_candle_limit` candles per call.
Get trade history data using asyncio.
Handles all async work and returns the list of candles.
Async over one pair, assuming we get `self._ohlcv_candle_limit` candles per call.
:param pair: Pair to download
:param since: Timestamp in milliseconds to get history from
:param until: Timestamp in milliseconds. Defaults to current timestamp if not defined.
:param from_id: Download data starting with ID (if id is known)
:returns List of tickers
:returns List of trade data
"""
if not self.exchange_has("fetchTrades"):
raise OperationalException("This exchange does not suport downloading Trades.")
@ -825,10 +920,18 @@ class Exchange:
self._async_get_trade_history(pair=pair, since=since,
until=until, from_id=from_id))
def check_order_canceled_empty(self, order: Dict) -> bool:
"""
Verify if an order has been cancelled without being partially filled
:param order: Order dict as returned from get_order()
:return: True if order has been cancelled without being filled, False otherwise.
"""
return order.get('status') in ('closed', 'canceled') and order.get('filled') == 0.0
@retrier
def cancel_order(self, order_id: str, pair: str) -> None:
def cancel_order(self, order_id: str, pair: str) -> Dict:
if self._config['dry_run']:
return
return {}
try:
return self._api.cancel_order(order_id, pair)
@ -841,6 +944,37 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def is_cancel_order_result_suitable(self, corder) -> bool:
if not isinstance(corder, dict):
return False
required = ('fee', 'status', 'amount')
return all(k in corder for k in required)
def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict:
"""
Cancel order returning a result.
Creates a fake result if cancel order returns a non-usable result
and get_order does not work (certain exchanges don't return cancelled orders)
:param order_id: Orderid to cancel
:param pair: Pair corresponding to order_id
:param amount: Amount to use for fake response
:return: Result from either cancel_order if usable, or fetch_order
"""
try:
corder = self.cancel_order(order_id, pair)
if self.is_cancel_order_result_suitable(corder):
return corder
except InvalidOrderException:
logger.warning(f"Could not cancel order {order_id}.")
try:
order = self.get_order(order_id, pair)
except InvalidOrderException:
logger.warning(f"Could not fetch cancelled order {order_id}.")
order = {'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}}
return order
@retrier
def get_order(self, order_id: str, pair: str) -> Dict:
if self._config['dry_run']:
@ -914,15 +1048,15 @@ class Exchange:
return matched_trades
except ccxt.NetworkError as e:
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not get trades due to networking error. Message: {e}') from e
f'Could not get trades due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
@retrier
def get_fee(self, symbol, type='', side='', amount=1,
price=1, taker_or_maker='maker') -> float:
def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1,
price: float = 1, taker_or_maker: str = 'maker') -> float:
try:
# validate that markets are loaded before trying to get fee
if self._api.markets is None or len(self._api.markets) == 0:
@ -936,6 +1070,61 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
@staticmethod
def order_has_fee(order: Dict) -> bool:
"""
Verifies if the passed in order dict has the needed keys to extract fees,
and that these keys (currency, cost) are not empty.
:param order: Order or trade (one trade) dict
:return: True if the fee substructure contains currency and cost, false otherwise
"""
if not isinstance(order, dict):
return False
return ('fee' in order and order['fee'] is not None
and (order['fee'].keys() >= {'currency', 'cost'})
and order['fee']['currency'] is not None
and order['fee']['cost'] is not None
)
def calculate_fee_rate(self, order: Dict) -> Optional[float]:
"""
Calculate fee rate if it's not given by the exchange.
:param order: Order or trade (one trade) dict
"""
if order['fee'].get('rate') is not None:
return order['fee'].get('rate')
fee_curr = order['fee']['currency']
# Calculate fee based on order details
if fee_curr in self.get_pair_base_currency(order['symbol']):
# Base currency - divide by amount
return round(
order['fee']['cost'] / safe_value_fallback(order, order, 'filled', 'amount'), 8)
elif fee_curr in self.get_pair_quote_currency(order['symbol']):
# Quote currency - divide by cost
return round(order['fee']['cost'] / order['cost'], 8)
else:
# If Fee currency is a different currency
try:
comb = self.get_valid_pair_combination(fee_curr, self._config['stake_currency'])
tick = self.fetch_ticker(comb)
fee_to_quote_rate = safe_value_fallback(tick, tick, 'last', 'ask')
return round((order['fee']['cost'] * fee_to_quote_rate) / order['cost'], 8)
except DependencyException:
return None
def extract_cost_curr_rate(self, order: Dict) -> Tuple[float, str, Optional[float]]:
"""
Extract tuple of cost, currency, rate.
Requires order_has_fee to run first!
:param order: Order or trade (one trade) dict
:return: Tuple with cost, currency, rate of the given fee dict
"""
return (order['fee']['cost'],
order['fee']['currency'],
self.calculate_fee_rate(order))
# calculate rate ? (order['fee']['cost'] / (order['amount'] * order['price']))
def is_exchange_bad(exchange_name: str) -> bool:
return exchange_name in BAD_EXCHANGES
@ -945,22 +1134,22 @@ def get_exchange_bad_reason(exchange_name: str) -> str:
return BAD_EXCHANGES.get(exchange_name, "")
def is_exchange_known_ccxt(exchange_name: str, ccxt_module=None) -> bool:
def is_exchange_known_ccxt(exchange_name: str, ccxt_module: CcxtModuleType = None) -> bool:
return exchange_name in ccxt_exchanges(ccxt_module)
def is_exchange_officially_supported(exchange_name: str) -> bool:
return exchange_name in ['bittrex', 'binance']
return exchange_name in ['bittrex', 'binance', 'kraken']
def ccxt_exchanges(ccxt_module=None) -> List[str]:
def ccxt_exchanges(ccxt_module: CcxtModuleType = None) -> List[str]:
"""
Return the list of all exchanges known to ccxt
"""
return ccxt_module.exchanges if ccxt_module is not None else ccxt.exchanges
def available_exchanges(ccxt_module=None) -> List[str]:
def available_exchanges(ccxt_module: CcxtModuleType = None) -> List[str]:
"""
Return exchanges available to the bot, i.e. non-bad exchanges in the ccxt list
"""
@ -1020,7 +1209,8 @@ def timeframe_to_next_date(timeframe: str, date: datetime = None) -> datetime:
return datetime.fromtimestamp(new_timestamp, tz=timezone.utc)
def symbol_is_pair(market_symbol: str, base_currency: str = None, quote_currency: str = None):
def symbol_is_pair(market_symbol: str, base_currency: str = None,
quote_currency: str = None) -> bool:
"""
Check if the market symbol is a pair, i.e. that its symbol consists of the base currency and the
quote currency separated by '/' character. If base_currency and/or quote_currency is passed,
@ -1033,7 +1223,7 @@ def symbol_is_pair(market_symbol: str, base_currency: str = None, quote_currency
(symbol_parts[1] == quote_currency if quote_currency else len(symbol_parts[1]) > 0))
def market_is_active(market):
def market_is_active(market: Dict) -> bool:
"""
Return True if the market is active.
"""

14
freqtrade/exchange/ftx.py Normal file
View File

@ -0,0 +1,14 @@
""" FTX exchange subclass """
import logging
from typing import Dict
from freqtrade.exchange import Exchange
logger = logging.getLogger(__name__)
class Ftx(Exchange):
_ft_has: Dict = {
"ohlcv_candle_limit": 1500,
}

View File

@ -4,7 +4,8 @@ from typing import Dict
import ccxt
from freqtrade import OperationalException, TemporaryError
from freqtrade.exceptions import (DependencyException, InvalidOrderException,
OperationalException, TemporaryError)
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange import retrier
@ -15,6 +16,7 @@ class Kraken(Exchange):
_params: Dict = {"trading_agreement": "agree"}
_ft_has: Dict = {
"stoploss_on_exchange": True,
"trades_pagination": "id",
"trades_pagination_arg": "since",
}
@ -48,3 +50,51 @@ class Kraken(Exchange):
f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def stoploss_adjust(self, stop_loss: float, order: Dict) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
"""
return order['type'] == 'stop-loss' and stop_loss > float(order['price'])
def stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict) -> Dict:
"""
Creates a stoploss market order.
Stoploss market orders is the only stoploss type supported by kraken.
"""
ordertype = "stop-loss"
stop_price = self.price_to_precision(pair, stop_price)
if self._config['dry_run']:
dry_order = self.dry_run_order(
pair, ordertype, "sell", amount, stop_price)
return dry_order
try:
params = self._params.copy()
amount = self.amount_to_precision(pair, amount)
order = self._api.create_order(symbol=pair, type=ordertype, side='sell',
amount=amount, price=stop_price, params=params)
logger.info('stoploss order added for %s. '
'stop price: %s.', pair, stop_price)
return order
except ccxt.InsufficientFunds as e:
raise DependencyException(
f'Insufficient funds to create {ordertype} sell order on market {pair}.'
f'Tried to create stoploss with amount {amount} at stoploss {stop_price}. '
f'Message: {e}') from e
except ccxt.InvalidOrder as e:
raise InvalidOrderException(
f'Could not create {ordertype} sell order on market {pair}. '
f'Tried to create stoploss with amount {amount} at stoploss {stop_price}. '
f'Message: {e}') from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not place sell order due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ from logging import Formatter
from logging.handlers import RotatingFileHandler, SysLogHandler
from typing import Any, Dict, List
from freqtrade import OperationalException
from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
@ -18,13 +18,13 @@ def _set_loggers(verbosity: int = 0) -> None:
"""
logging.getLogger('requests').setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger("urllib3").setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger('ccxt.base.exchange').setLevel(
logging.INFO if verbosity <= 2 else logging.DEBUG
logging.INFO if verbosity <= 2 else logging.DEBUG
)
logging.getLogger('telegram').setLevel(logging.INFO)

View File

@ -4,6 +4,7 @@ Main Freqtrade bot script.
Read the documentation to know what cli arguments you need.
"""
from freqtrade.exceptions import FreqtradeException, OperationalException
import sys
# check min. python version
if sys.version_info < (3, 6):
@ -13,8 +14,7 @@ if sys.version_info < (3, 6):
import logging
from typing import Any, List
from freqtrade import OperationalException
from freqtrade.configuration import Arguments
from freqtrade.commands import Arguments
logger = logging.getLogger('freqtrade')
@ -38,8 +38,8 @@ def main(sysargv: List[str] = None) -> None:
# No subcommand was issued.
raise OperationalException(
"Usage of Freqtrade requires a subcommand to be specified.\n"
"To have the previous behavior (bot executing trades in live/dry-run modes, "
"depending on the value of the `dry_run` setting in the config), run freqtrade "
"To have the bot executing trades in live/dry-run modes, "
"depending on the value of the `dry_run` setting in the config, run Freqtrade "
"as `freqtrade trade [options...]`.\n"
"To see the full list of options available, please use "
"`freqtrade --help` or `freqtrade <command> --help`."
@ -50,7 +50,7 @@ def main(sysargv: List[str] = None) -> None:
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
return_code = 0
except OperationalException as e:
except FreqtradeException as e:
logger.error(str(e))
return_code = 2
except Exception:

View File

@ -6,6 +6,7 @@ import logging
import re
from datetime import datetime
from pathlib import Path
from typing import Any
from typing.io import IO
import numpy as np
@ -40,28 +41,30 @@ def datesarray_to_datetimearray(dates: np.ndarray) -> np.ndarray:
return dates.dt.to_pydatetime()
def file_dump_json(filename: Path, data, is_zip=False) -> None:
def file_dump_json(filename: Path, data: Any, is_zip: bool = False) -> None:
"""
Dump JSON data into a file
:param filename: file to create
:param data: JSON Data to save
:return:
"""
logger.info(f'dumping json to "{filename}"')
if is_zip:
if filename.suffix != '.gz':
filename = filename.with_suffix('.gz')
logger.info(f'dumping json to "{filename}"')
with gzip.open(filename, 'w') as fp:
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
else:
logger.info(f'dumping json to "{filename}"')
with open(filename, 'w') as fp:
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
logger.debug(f'done json to "{filename}"')
def json_load(datafile: IO):
def json_load(datafile: IO) -> Any:
"""
load data with rapidjson
Use this to have a consistent experience,
@ -78,18 +81,24 @@ def file_load_json(file):
gzipfile = file
# Try gzip file first, otherwise regular json file.
if gzipfile.is_file():
logger.debug('Loading ticker data from file %s', gzipfile)
with gzip.open(gzipfile) as tickerdata:
pairdata = json_load(tickerdata)
logger.debug(f"Loading historical data from file {gzipfile}")
with gzip.open(gzipfile) as datafile:
pairdata = json_load(datafile)
elif file.is_file():
logger.debug('Loading ticker data from file %s', file)
with open(file) as tickerdata:
pairdata = json_load(tickerdata)
logger.debug(f"Loading historical data from file {file}")
with open(file) as datafile:
pairdata = json_load(datafile)
else:
return None
return pairdata
def pair_to_filename(pair: str) -> str:
for ch in ['/', '-', ' ', '.', '@', '$', '+', ':']:
pair = pair.replace(ch, '_')
return pair
def format_ms_time(date: int) -> str:
"""
convert MS date to readable format.
@ -125,11 +134,26 @@ def round_dict(d, n):
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()}
def plural(num, singular: str, plural: str = None) -> str:
def safe_value_fallback(dict1: dict, dict2: dict, key1: str, key2: str, default_value=None):
"""
Search a value in dict1, return this if it's not None.
Fall back to dict2 - return key2 from dict2 if it's not None.
Else falls back to None.
"""
if key1 in dict1 and dict1[key1] is not None:
return dict1[key1]
else:
if key2 in dict2 and dict2[key2] is not None:
return dict2[key2]
return default_value
def plural(num: float, singular: str, plural: str = None) -> str:
return singular if (num == 1 or num == -1) else plural or singular + 's'
def render_template(templatefile: str, arguments: dict = {}):
def render_template(templatefile: str, arguments: dict = {}) -> str:
from jinja2 import Environment, PackageLoader, select_autoescape
@ -138,5 +162,16 @@ def render_template(templatefile: str, arguments: dict = {}):
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template(templatefile)
return template.render(**arguments)
def render_template_with_fallback(templatefile: str, templatefallbackfile: str,
arguments: dict = {}) -> str:
"""
Use templatefile if possible, otherwise fall back to templatefallbackfile
"""
from jinja2.exceptions import TemplateNotFound
try:
return render_template(templatefile, arguments)
except TemplateNotFound:
return render_template(templatefallbackfile, arguments)

View File

@ -1,102 +0,0 @@
import logging
from typing import Any, Dict
from freqtrade import DependencyException, constants, OperationalException
from freqtrade.state import RunMode
from freqtrade.utils import setup_utils_configuration
logger = logging.getLogger(__name__)
def setup_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
"""
Prepare the configuration for the Hyperopt module
:param args: Cli args from Arguments()
:return: Configuration
"""
config = setup_utils_configuration(args, method)
if method == RunMode.BACKTEST:
if config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT:
raise DependencyException('stake amount could not be "%s" for backtesting' %
constants.UNLIMITED_STAKE_AMOUNT)
return config
def start_backtesting(args: Dict[str, Any]) -> None:
"""
Start Backtesting script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading backtesting module when it's not used
from freqtrade.optimize.backtesting import Backtesting
# Initialize configuration
config = setup_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in Backtesting mode')
# Initialize backtesting object
backtesting = Backtesting(config)
backtesting.start()
def start_hyperopt(args: Dict[str, Any]) -> None:
"""
Start hyperopt script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading hyperopt module when it's not used
try:
from filelock import FileLock, Timeout
from freqtrade.optimize.hyperopt import Hyperopt
except ImportError as e:
raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
# Initialize configuration
config = setup_configuration(args, RunMode.HYPEROPT)
logger.info('Starting freqtrade in Hyperopt mode')
lock = FileLock(Hyperopt.get_lock_filename(config))
try:
with lock.acquire(timeout=1):
# Remove noisy log messages
logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
logging.getLogger('filelock').setLevel(logging.WARNING)
# Initialize backtesting object
hyperopt = Hyperopt(config)
hyperopt.start()
except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.")
logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines.")
logger.info("Quitting now.")
# TODO: return False here in order to help freqtrade to exit
# with non-zero exit code...
# Same in Edge and Backtesting start() functions.
def start_edge(args: Dict[str, Any]) -> None:
"""
Start Edge script
:param args: Cli args from Arguments()
:return: None
"""
from freqtrade.optimize.edge_cli import EdgeCli
# Initialize configuration
config = setup_configuration(args, RunMode.EDGE)
logger.info('Starting freqtrade in Edge mode')
# Initialize Edge object
edge_cli = EdgeCli(config)
edge_cli.start()

View File

@ -6,23 +6,25 @@ This module contains the backtesting logic
import logging
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
import arrow
from pandas import DataFrame
from tabulate import tabulate
from freqtrade import OperationalException
from freqtrade.configuration import (TimeRange, remove_credentials,
validate_config_consistency)
from freqtrade.data import history
from freqtrade.data.converter import trim_dataframe
from freqtrade.data.dataprovider import DataProvider
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
from freqtrade.misc import file_dump_json
from freqtrade.optimize.optimize_reports import (show_backtest_results,
store_backtest_result)
from freqtrade.pairlist.pairlistmanager import PairListManager
from freqtrade.persistence import Trade
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
from freqtrade.state import RunMode
from freqtrade.strategy.interface import IStrategy, SellType
from freqtrade.strategy.interface import IStrategy, SellCheckTuple, SellType
logger = logging.getLogger(__name__)
@ -60,12 +62,21 @@ class Backtesting:
# Reset keys for backtesting
remove_credentials(self.config)
self.strategylist: List[IStrategy] = []
self.exchange = ExchangeResolver(self.config['exchange']['name'], self.config).exchange
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config)
self.pairlists = PairListManager(self.exchange, self.config)
if 'VolumePairList' in self.pairlists.name_list:
raise OperationalException("VolumePairList not allowed for backtesting.")
self.pairlists.refresh_pairlist()
if len(self.pairlists.whitelist) == 0:
raise OperationalException("No pair in whitelist.")
if config.get('fee'):
self.fee = config['fee']
else:
self.fee = self.exchange.get_fee(symbol=self.config['exchange']['pair_whitelist'][0])
self.fee = self.exchange.get_fee(symbol=self.pairlists.whitelist[0])
if self.config.get('runmode') != RunMode.HYPEROPT:
self.dataprovider = DataProvider(self.config, self.exchange)
@ -75,17 +86,17 @@ class Backtesting:
for strat in list(self.config['strategy_list']):
stratconf = deepcopy(self.config)
stratconf['strategy'] = strat
self.strategylist.append(StrategyResolver(stratconf).strategy)
self.strategylist.append(StrategyResolver.load_strategy(stratconf))
validate_config_consistency(stratconf)
else:
# No strategy list specified, only one strategy
self.strategylist.append(StrategyResolver(self.config).strategy)
self.strategylist.append(StrategyResolver.load_strategy(self.config))
validate_config_consistency(self.config)
if "ticker_interval" not in self.config:
raise OperationalException("Ticker-interval needs to be set in either configuration "
"or as cli argument `--ticker-interval 5m`")
raise OperationalException("Timeframe (ticker interval) needs to be set in either "
"configuration or as cli argument `--ticker-interval 5m`")
self.timeframe = str(self.config.get('ticker_interval'))
self.timeframe_min = timeframe_to_minutes(self.timeframe)
@ -104,17 +115,18 @@ class Backtesting:
# And the regular "stoploss" function would not apply to that case
self.strategy.order_types['stoploss_on_exchange'] = False
def load_bt_data(self):
def load_bt_data(self) -> Tuple[Dict[str, DataFrame], TimeRange]:
timerange = TimeRange.parse_timerange(None if self.config.get(
'timerange') is None else str(self.config.get('timerange')))
data = history.load_data(
datadir=Path(self.config['datadir']),
pairs=self.config['exchange']['pair_whitelist'],
datadir=self.config['datadir'],
pairs=self.pairlists.whitelist,
timeframe=self.timeframe,
timerange=timerange,
startup_candles=self.required_startup,
fail_without_data=True,
data_format=self.config.get('dataformat_ohlcv', 'json'),
)
min_date, max_date = history.get_timerange(data)
@ -129,139 +141,36 @@ class Backtesting:
return data, timerange
def _generate_text_table(self, data: Dict[str, Dict], results: DataFrame,
skip_nan: bool = False) -> str:
def _get_ohlcv_as_lists(self, processed: Dict) -> Dict[str, DataFrame]:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:return: pretty printed table with tabulate as str
"""
stake_currency = str(self.config.get('stake_currency'))
max_open_trades = self.config.get('max_open_trades')
floatfmt = ('s', 'd', '.2f', '.2f', '.8f', '.2f', 'd', '.1f', '.1f')
tabular_data = []
headers = ['pair', 'buy count', 'avg profit %', 'cum profit %',
'tot profit ' + stake_currency, 'tot profit %', 'avg duration',
'profit', 'loss']
for pair in data:
result = results[results.pair == pair]
if skip_nan and result.profit_abs.isnull().all():
continue
tabular_data.append([
pair,
len(result.index),
result.profit_percent.mean() * 100.0,
result.profit_percent.sum() * 100.0,
result.profit_abs.sum(),
result.profit_percent.sum() * 100.0 / max_open_trades,
str(timedelta(
minutes=round(result.trade_duration.mean()))) if not result.empty else '0:00',
len(result[result.profit_abs > 0]),
len(result[result.profit_abs < 0])
])
# Append Total
tabular_data.append([
'TOTAL',
len(results.index),
results.profit_percent.mean() * 100.0,
results.profit_percent.sum() * 100.0,
results.profit_abs.sum(),
results.profit_percent.sum() * 100.0 / max_open_trades,
str(timedelta(
minutes=round(results.trade_duration.mean()))) if not results.empty else '0:00',
len(results[results.profit_abs > 0]),
len(results[results.profit_abs < 0])
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="pipe") # type: ignore
def _generate_text_table_sell_reason(self, data: Dict[str, Dict], results: DataFrame) -> str:
"""
Generate small table outlining Backtest results
"""
tabular_data = []
headers = ['Sell Reason', 'Count']
for reason, count in results['sell_reason'].value_counts().iteritems():
tabular_data.append([reason.value, count])
return tabulate(tabular_data, headers=headers, tablefmt="pipe")
def _generate_text_table_strategy(self, all_results: dict) -> str:
"""
Generate summary table per strategy
"""
stake_currency = str(self.config.get('stake_currency'))
max_open_trades = self.config.get('max_open_trades')
floatfmt = ('s', 'd', '.2f', '.2f', '.8f', '.2f', 'd', '.1f', '.1f')
tabular_data = []
headers = ['Strategy', 'buy count', 'avg profit %', 'cum profit %',
'tot profit ' + stake_currency, 'tot profit %', 'avg duration',
'profit', 'loss']
for strategy, results in all_results.items():
tabular_data.append([
strategy,
len(results.index),
results.profit_percent.mean() * 100.0,
results.profit_percent.sum() * 100.0,
results.profit_abs.sum(),
results.profit_percent.sum() * 100.0 / max_open_trades,
str(timedelta(
minutes=round(results.trade_duration.mean()))) if not results.empty else '0:00',
len(results[results.profit_abs > 0]),
len(results[results.profit_abs < 0])
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="pipe") # type: ignore
def _store_backtest_result(self, recordfilename: Path, results: DataFrame,
strategyname: Optional[str] = None) -> None:
records = [(t.pair, t.profit_percent, t.open_time.timestamp(),
t.close_time.timestamp(), t.open_index - 1, t.trade_duration,
t.open_rate, t.close_rate, t.open_at_end, t.sell_reason.value)
for index, t in results.iterrows()]
if records:
if strategyname:
# Inject strategyname to filename
recordfilename = Path.joinpath(
recordfilename.parent,
f'{recordfilename.stem}-{strategyname}').with_suffix(recordfilename.suffix)
logger.info(f'Dumping backtest results to {recordfilename}')
file_dump_json(recordfilename, records)
def _get_ticker_list(self, processed) -> Dict[str, DataFrame]:
"""
Helper function to convert a processed tickerlist into a list for performance reasons.
Helper function to convert a processed dataframes into lists for performance reasons.
Used by backtest() - so keep this optimized for performance.
"""
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high']
ticker: Dict = {}
# Create ticker dict
data: Dict = {}
# Create dict with data
for pair, pair_data in processed.items():
pair_data.loc[:, 'buy'] = 0 # cleanup from previous run
pair_data.loc[:, 'sell'] = 0 # cleanup from previous run
ticker_data = self.strategy.advise_sell(
df_analyzed = self.strategy.advise_sell(
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()
# to avoid using data from future, we buy/sell with signal from previous candle
ticker_data.loc[:, 'buy'] = ticker_data['buy'].shift(1)
ticker_data.loc[:, 'sell'] = ticker_data['sell'].shift(1)
# To avoid using data from future, we use buy/sell signals shifted
# from the previous candle
df_analyzed.loc[:, 'buy'] = df_analyzed.loc[:, 'buy'].shift(1)
df_analyzed.loc[:, 'sell'] = df_analyzed.loc[:, 'sell'].shift(1)
ticker_data.drop(ticker_data.head(1).index, inplace=True)
df_analyzed.drop(df_analyzed.head(1).index, inplace=True)
# Convert from Pandas to list for performance reasons
# (Looping Pandas is slow.)
ticker[pair] = [x for x in ticker_data.itertuples()]
return ticker
data[pair] = [x for x in df_analyzed.itertuples()]
return data
def _get_close_rate(self, sell_row, trade: Trade, sell, trade_dur) -> float:
def _get_close_rate(self, sell_row, trade: Trade, sell: SellCheckTuple,
trade_dur: int) -> float:
"""
Get close rate for backtesting result
"""
@ -302,7 +211,7 @@ class Backtesting:
def _get_sell_trade_entry(
self, pair: str, buy_row: DataFrame,
partial_ticker: List, trade_count_lock: Dict,
partial_ohlcv: List, trade_count_lock: Dict,
stake_amount: float, max_open_trades: int) -> Optional[BacktestResult]:
trade = Trade(
@ -317,7 +226,7 @@ class Backtesting:
)
logger.debug(f"{pair} - Backtesting emulates creation of new trade: {trade}.")
# calculate win/lose forwards from buy point
for sell_row in partial_ticker:
for sell_row in partial_ohlcv:
if max_open_trades > 0:
# Increase trade_count_lock for every iteration
trade_count_lock[sell_row.date] = trade_count_lock.get(sell_row.date, 0) + 1
@ -341,9 +250,9 @@ class Backtesting:
close_rate=closerate,
sell_reason=sell.sell_type
)
if partial_ticker:
if partial_ohlcv:
# no sell condition found - trade stil open at end of backtest period
sell_row = partial_ticker[-1]
sell_row = partial_ohlcv[-1]
bt_res = BacktestResult(pair=pair,
profit_percent=trade.calc_profit_ratio(rate=sell_row.open),
profit_abs=trade.calc_profit(rate=sell_row.open),
@ -365,35 +274,34 @@ class Backtesting:
return bt_res
return None
def backtest(self, args: Dict) -> DataFrame:
def backtest(self, processed: Dict, stake_amount: float,
start_date: arrow.Arrow, end_date: arrow.Arrow,
max_open_trades: int = 0, position_stacking: bool = False) -> DataFrame:
"""
Implements backtesting functionality
Implement backtesting functionality
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
Of course try to not have ugly code. By some accessor are sometime slower than functions.
Avoid, logging on this method
Avoid extensive logging in this method and functions it calls.
:param args: a dict containing:
stake_amount: btc amount to use for each trade
processed: a processed dictionary with format {pair, data}
max_open_trades: maximum number of concurrent trades (default: 0, disabled)
position_stacking: do we allow position stacking? (default: False)
:return: DataFrame
:param processed: a processed dictionary with format {pair, data}
:param stake_amount: amount to use for each trade
:param start_date: backtesting timerange start datetime
:param end_date: backtesting timerange end datetime
:param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited
:param position_stacking: do we allow position stacking?
:return: DataFrame with trades (results of backtesting)
"""
# Arguments are long and noisy, so this is commented out.
# Uncomment if you need to debug the backtest() method.
# logger.debug(f"Start backtest, args: {args}")
processed = args['processed']
stake_amount = args['stake_amount']
max_open_trades = args.get('max_open_trades', 0)
position_stacking = args.get('position_stacking', False)
start_date = args['start_date']
end_date = args['end_date']
logger.debug(f"Run backtest, stake_amount: {stake_amount}, "
f"start_date: {start_date}, end_date: {end_date}, "
f"max_open_trades: {max_open_trades}, position_stacking: {position_stacking}"
)
trades = []
trade_count_lock: Dict = {}
# Dict of ticker-lists for performance (looping lists is a lot faster than dataframes)
ticker: Dict = self._get_ticker_list(processed)
# Use dict of lists with data for performance
# (looping lists is a lot faster than pandas DataFrames)
data: Dict = self._get_ohlcv_as_lists(processed)
lock_pair_until: Dict = {}
# Indexes per pair, so some pairs are allowed to have a missing start.
@ -403,12 +311,12 @@ class Backtesting:
# Loop timerange and get candle for each pair at that point in time
while tmp < end_date:
for i, pair in enumerate(ticker):
for i, pair in enumerate(data):
if pair not in indexes:
indexes[pair] = 0
try:
row = ticker[pair][indexes[pair]]
row = data[pair][indexes[pair]]
except IndexError:
# missing Data for one pair at the end.
# Warnings for this are shown during data loading
@ -436,7 +344,7 @@ class Backtesting:
# since indexes has been incremented before, we need to go one step back to
# also check the buying candle for sell conditions.
trade_entry = self._get_sell_trade_entry(pair, row, ticker[pair][indexes[pair]-1:],
trade_entry = self._get_sell_trade_entry(pair, row, data[pair][indexes[pair]-1:],
trade_count_lock, stake_amount,
max_open_trades)
@ -455,18 +363,21 @@ class Backtesting:
def start(self) -> None:
"""
Run a backtesting end-to-end
Run backtesting end-to-end
:return: None
"""
data: Dict[str, Any] = {}
logger.info('Using stake_currency: %s ...', self.config['stake_currency'])
logger.info('Using stake_amount: %s ...', self.config['stake_amount'])
# Use max_open_trades in backtesting, except --disable-max-market-positions is set
if self.config.get('use_max_market_positions', True):
max_open_trades = self.config['max_open_trades']
else:
logger.info('Ignoring max_open_trades (--disable-max-market-positions was used) ...')
max_open_trades = 0
position_stacking = self.config.get('position_stacking', False)
data, timerange = self.load_bt_data()
@ -476,11 +387,11 @@ class Backtesting:
self._set_strategy(strat)
# need to reprocess data every time to populate signals
preprocessed = self.strategy.tickerdata_to_dataframe(data)
preprocessed = self.strategy.ohlcvdata_to_dataframe(data)
# Trim startup period from analyzed dataframe
for pair, df in preprocessed.items():
preprocessed[pair] = history.trim_dataframe(df, timerange)
preprocessed[pair] = trim_dataframe(df, timerange)
min_date, max_date = history.get_timerange(preprocessed)
logger.info(
@ -489,34 +400,15 @@ class Backtesting:
)
# Execute backtest and print results
all_results[self.strategy.get_strategy_name()] = self.backtest(
{
'stake_amount': self.config.get('stake_amount'),
'processed': preprocessed,
'max_open_trades': max_open_trades,
'position_stacking': self.config.get('position_stacking', False),
'start_date': min_date,
'end_date': max_date,
}
processed=preprocessed,
stake_amount=self.config['stake_amount'],
start_date=min_date,
end_date=max_date,
max_open_trades=max_open_trades,
position_stacking=position_stacking,
)
for strategy, results in all_results.items():
if self.config.get('export', False):
self._store_backtest_result(Path(self.config['exportfilename']), results,
strategy if len(self.strategylist) > 1 else None)
print(f"Result for strategy {strategy}")
print(' BACKTESTING REPORT '.center(133, '='))
print(self._generate_text_table(data, results))
print(' SELL REASON STATS '.center(133, '='))
print(self._generate_text_table_sell_reason(data, results))
print(' LEFT OPEN TRADES REPORT '.center(133, '='))
print(self._generate_text_table(data, results.loc[results.open_at_end], True))
print()
if len(all_results) > 1:
# Print Strategy summary table
print(' Strategy Summary '.center(133, '='))
print(self._generate_text_table_strategy(all_results))
print('\nFor more details, please look at the detail tables above')
if self.config.get('export', False):
store_backtest_result(self.config['exportfilename'], all_results)
# Show backtest results
show_backtest_results(self.config, data, all_results)

View File

@ -6,14 +6,12 @@ This module contains the edge backtesting interface
import logging
from typing import Any, Dict
from tabulate import tabulate
from freqtrade import constants
from freqtrade.configuration import (TimeRange, remove_credentials,
validate_config_consistency)
from freqtrade.edge import Edge
from freqtrade.exchange import Exchange
from freqtrade.resolvers import StrategyResolver
from freqtrade.optimize.optimize_reports import generate_edge_table
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
logger = logging.getLogger(__name__)
@ -33,8 +31,8 @@ class EdgeCli:
# Reset keys for edge
remove_credentials(self.config)
self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
self.exchange = Exchange(self.config)
self.strategy = StrategyResolver(self.config).strategy
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config)
self.strategy = StrategyResolver.load_strategy(self.config)
validate_config_consistency(self.config)
@ -42,38 +40,11 @@ class EdgeCli:
# Set refresh_pairs to false for edge-cli (it must be true for edge)
self.edge._refresh_pairs = False
self.timerange = TimeRange.parse_timerange(None if self.config.get(
self.edge._timerange = TimeRange.parse_timerange(None if self.config.get(
'timerange') is None else str(self.config.get('timerange')))
self.edge._timerange = self.timerange
def _generate_edge_table(self, results: dict) -> str:
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', '.d')
tabular_data = []
headers = ['pair', 'stoploss', 'win rate', 'risk reward ratio',
'required risk reward', 'expectancy', 'total number of trades',
'average duration (min)']
for result in results.items():
if result[1].nb_trades > 0:
tabular_data.append([
result[0],
result[1].stoploss,
result[1].winrate,
result[1].risk_reward_ratio,
result[1].required_risk_reward,
result[1].expectancy,
result[1].nb_trades,
round(result[1].avg_trade_duration)
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="pipe") # type: ignore
def start(self) -> None:
result = self.edge.calculate()
if result:
print('') # blank line for readability
print(self._generate_edge_table(self.edge._cached_pairs))
print(generate_edge_table(self.edge._cached_pairs))

View File

@ -7,8 +7,8 @@ This module contains the hyperopt logic
import locale
import logging
import random
import sys
import warnings
from math import ceil
from collections import OrderedDict
from operator import itemgetter
from pathlib import Path
@ -17,18 +17,22 @@ from typing import Any, Dict, List, Optional
import rapidjson
from colorama import Fore, Style
from colorama import init as colorama_init
from joblib import (Parallel, cpu_count, delayed, dump, load,
wrap_non_picklable_objects)
from pandas import DataFrame
from pandas import DataFrame, json_normalize, isna
import progressbar
import tabulate
from os import path
import io
from freqtrade import OperationalException
from freqtrade.data.history import get_timerange, trim_dataframe
from freqtrade.data.converter import trim_dataframe
from freqtrade.data.history import get_timerange
from freqtrade.exceptions import OperationalException
from freqtrade.misc import plural, round_dict
from freqtrade.optimize.backtesting import Backtesting
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F4
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F4
from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F401
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F401
from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver,
HyperOptResolver)
@ -38,15 +42,16 @@ with warnings.catch_warnings():
from skopt import Optimizer
from skopt.space import Dimension
progressbar.streams.wrap_stderr()
progressbar.streams.wrap_stdout()
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than 2*SKOPT_MODELS_MAX_NUM models
# in the skopt models list
SKOPT_MODELS_MAX_NUM = 10
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization
@ -59,20 +64,21 @@ class Hyperopt:
hyperopt = Hyperopt(config)
hyperopt.start()
"""
def __init__(self, config: Dict[str, Any]) -> None:
self.config = config
self.backtesting = Backtesting(self.config)
self.custom_hyperopt = HyperOptResolver(self.config).hyperopt
self.custom_hyperopt = HyperOptResolver.load_hyperopt(self.config)
self.custom_hyperoptloss = HyperOptLossResolver(self.config).hyperoptloss
self.custom_hyperoptloss = HyperOptLossResolver.load_hyperoptloss(self.config)
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
self.trials_file = (self.config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
self.tickerdata_pickle = (self.config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_tickerdata.pkl')
self.results_file = (self.config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
self.data_pickle_file = (self.config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_tickerdata.pkl')
self.total_epochs = config.get('epochs', 0)
self.current_best_loss = 100
@ -82,21 +88,21 @@ class Hyperopt:
else:
logger.info("Continuing on previous hyperopt results.")
self.num_trials_saved = 0
self.num_epochs_saved = 0
# Previous evaluations
self.trials: List = []
self.epochs: List = []
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
if hasattr(self.custom_hyperopt, 'populate_indicators'):
self.backtesting.strategy.advise_indicators = \
self.custom_hyperopt.populate_indicators # type: ignore
self.custom_hyperopt.populate_indicators # type: ignore
if hasattr(self.custom_hyperopt, 'populate_buy_trend'):
self.backtesting.strategy.advise_buy = \
self.custom_hyperopt.populate_buy_trend # type: ignore
self.custom_hyperopt.populate_buy_trend # type: ignore
if hasattr(self.custom_hyperopt, 'populate_sell_trend'):
self.backtesting.strategy.advise_sell = \
self.custom_hyperopt.populate_sell_trend # type: ignore
self.custom_hyperopt.populate_sell_trend # type: ignore
# Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
if self.config.get('use_max_market_positions', True):
@ -113,19 +119,20 @@ class Hyperopt:
self.config['ask_strategy']['use_sell_signal'] = True
self.print_all = self.config.get('print_all', False)
self.hyperopt_table_header = 0
self.print_colorized = self.config.get('print_colorized', False)
self.print_json = self.config.get('print_json', False)
@staticmethod
def get_lock_filename(config) -> str:
def get_lock_filename(config: Dict[str, Any]) -> str:
return str(config['user_data_dir'] / 'hyperopt.lock')
def clean_hyperopt(self):
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
for f in [self.tickerdata_pickle, self.trials_file]:
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
@ -144,27 +151,26 @@ class Hyperopt:
# and the values are taken from the list of parameters.
return {d.name: v for d, v in zip(dimensions, raw_params)}
def save_trials(self, final: bool = False) -> None:
def _save_results(self) -> None:
"""
Save hyperopt trials to file
Save hyperopt results to file
"""
num_trials = len(self.trials)
if num_trials > self.num_trials_saved:
logger.info(f"Saving {num_trials} {plural(num_trials, 'epoch')}.")
dump(self.trials, self.trials_file)
self.num_trials_saved = num_trials
if final:
logger.info(f"{num_trials} {plural(num_trials, 'epoch')} "
f"saved to '{self.trials_file}'.")
num_epochs = len(self.epochs)
if num_epochs > self.num_epochs_saved:
logger.debug(f"Saving {num_epochs} {plural(num_epochs, 'epoch')}.")
dump(self.epochs, self.results_file)
self.num_epochs_saved = num_epochs
logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'.")
@staticmethod
def _read_trials(trials_file) -> List:
def _read_results(results_file: Path) -> List:
"""
Read hyperopt trials file
Read hyperopt results from file
"""
logger.info("Reading Trials from '%s'", trials_file)
trials = load(trials_file)
return trials
logger.info("Reading epochs from '%s'", results_file)
data = load(results_file)
return data
def _get_params_details(self, params: Dict) -> Dict:
"""
@ -189,7 +195,7 @@ class Hyperopt:
return result
@staticmethod
def print_epoch_details(results, total_epochs, print_json: bool,
def print_epoch_details(results, total_epochs: int, print_json: bool,
no_header: bool = False, header_str: str = None) -> None:
"""
Display details of the hyperopt result
@ -218,7 +224,7 @@ class Hyperopt:
Hyperopt._params_pretty_print(params, 'trailing', "Trailing stop:")
@staticmethod
def _params_update_for_json(result_dict, params, space: str):
def _params_update_for_json(result_dict, params, space: str) -> None:
if space in params:
space_params = Hyperopt._space_params(params, space)
if space in ['buy', 'sell']:
@ -235,7 +241,7 @@ class Hyperopt:
result_dict.update(space_params)
@staticmethod
def _params_pretty_print(params, space: str, header: str):
def _params_pretty_print(params, space: str, header: str) -> None:
if space in params:
space_params = Hyperopt._space_params(params, space, 5)
if space == 'stoploss':
@ -251,7 +257,7 @@ class Hyperopt:
return round_dict(d, r) if r else d
@staticmethod
def is_best_loss(results, current_best_loss) -> bool:
def is_best_loss(results, current_best_loss: float) -> bool:
return results['loss'] < current_best_loss
def print_results(self, results) -> None:
@ -259,33 +265,16 @@ class Hyperopt:
Log results if it is better than any previous evaluation
"""
is_best = results['is_best']
if not self.print_all:
# Print '\n' after each 100th epoch to separate dots from the log messages.
# Otherwise output is messy on a terminal.
print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore
sys.stdout.flush()
if self.print_all or is_best:
if not self.print_all:
# Separate the results explanation string from dots
print("\n")
self.print_results_explanation(results, self.total_epochs, self.print_all,
self.print_colorized)
@staticmethod
def print_results_explanation(results, total_epochs, highlight_best: bool,
print_colorized: bool) -> None:
"""
Log results explanation string
"""
explanation_str = Hyperopt._format_explanation_string(results, total_epochs)
# Colorize output
if print_colorized:
if results['total_profit'] > 0:
explanation_str = Fore.GREEN + explanation_str
if highlight_best and results['is_best']:
explanation_str = Style.BRIGHT + explanation_str
print(explanation_str)
print(
self.get_result_table(
self.config, results, self.total_epochs,
self.print_all, self.print_colorized,
self.hyperopt_table_header
)
)
self.hyperopt_table_header = 2
@staticmethod
def _format_explanation_string(results, total_epochs) -> str:
@ -294,6 +283,151 @@ class Hyperopt:
f"{results['results_explanation']} " +
f"Objective: {results['loss']:.5f}")
@staticmethod
def get_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool,
print_colorized: bool, remove_header: int) -> str:
"""
Log result table
"""
if not results:
return ''
tabulate.PRESERVE_WHITESPACE = True
trials = json_normalize(results, max_level=1)
trials['Best'] = ''
trials = trials[['Best', 'current_epoch', 'results_metrics.trade_count',
'results_metrics.avg_profit', 'results_metrics.total_profit',
'results_metrics.profit', 'results_metrics.duration',
'loss', 'is_initial_point', 'is_best']]
trials.columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Total profit',
'Profit', 'Avg duration', 'Objective', 'is_initial_point', 'is_best']
trials['is_profit'] = False
trials.loc[trials['is_initial_point'], 'Best'] = '* '
trials.loc[trials['is_best'], 'Best'] = 'Best'
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
trials['Trades'] = trials['Trades'].astype(str)
trials['Epoch'] = trials['Epoch'].apply(
lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs)
)
trials['Avg profit'] = trials['Avg profit'].apply(
lambda x: '{:,.2f}%'.format(x).rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
)
trials['Avg duration'] = trials['Avg duration'].apply(
lambda x: '{:,.1f} m'.format(x).rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
)
trials['Objective'] = trials['Objective'].apply(
lambda x: '{:,.5f}'.format(x).rjust(8, ' ') if x != 100000 else "N/A".rjust(8, ' ')
)
trials['Profit'] = trials.apply(
lambda x: '{:,.8f} {} {}'.format(
x['Total profit'], config['stake_currency'],
'({:,.2f}%)'.format(x['Profit']).rjust(10, ' ')
).rjust(25+len(config['stake_currency']))
if x['Total profit'] != 0.0 else '--'.rjust(25+len(config['stake_currency'])),
axis=1
)
trials = trials.drop(columns=['Total profit'])
if print_colorized:
for i in range(len(trials)):
if trials.loc[i]['is_profit']:
for j in range(len(trials.loc[i])-3):
trials.iat[i, j] = "{}{}{}".format(Fore.GREEN,
str(trials.loc[i][j]), Fore.RESET)
if trials.loc[i]['is_best'] and highlight_best:
for j in range(len(trials.loc[i])-3):
trials.iat[i, j] = "{}{}{}".format(Style.BRIGHT,
str(trials.loc[i][j]), Style.RESET_ALL)
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
if remove_header > 0:
table = tabulate.tabulate(
trials.to_dict(orient='list'), tablefmt='orgtbl',
headers='keys', stralign="right"
)
table = table.split("\n", remove_header)[remove_header]
elif remove_header < 0:
table = tabulate.tabulate(
trials.to_dict(orient='list'), tablefmt='psql',
headers='keys', stralign="right"
)
table = "\n".join(table.split("\n")[0:remove_header])
else:
table = tabulate.tabulate(
trials.to_dict(orient='list'), tablefmt='psql',
headers='keys', stralign="right"
)
return table
@staticmethod
def export_csv_file(config: dict, results: list, total_epochs: int, highlight_best: bool,
csv_file: str) -> None:
"""
Log result to csv-file
"""
if not results:
return
# Verification for overwrite
if path.isfile(csv_file):
logger.error(f"CSV file already exists: {csv_file}")
return
try:
io.open(csv_file, 'w+').close()
except IOError:
logger.error(f"Failed to create CSV file: {csv_file}")
return
trials = json_normalize(results, max_level=1)
trials['Best'] = ''
trials['Stake currency'] = config['stake_currency']
base_metrics = ['Best', 'current_epoch', 'results_metrics.trade_count',
'results_metrics.avg_profit', 'results_metrics.total_profit',
'Stake currency', 'results_metrics.profit', 'results_metrics.duration',
'loss', 'is_initial_point', 'is_best']
param_metrics = [("params_dict."+param) for param in results[0]['params_dict'].keys()]
trials = trials[base_metrics + param_metrics]
base_columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Total profit', 'Stake currency',
'Profit', 'Avg duration', 'Objective', 'is_initial_point', 'is_best']
param_columns = list(results[0]['params_dict'].keys())
trials.columns = base_columns + param_columns
trials['is_profit'] = False
trials.loc[trials['is_initial_point'], 'Best'] = '*'
trials.loc[trials['is_best'], 'Best'] = 'Best'
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
trials['Epoch'] = trials['Epoch'].astype(str)
trials['Trades'] = trials['Trades'].astype(str)
trials['Total profit'] = trials['Total profit'].apply(
lambda x: '{:,.8f}'.format(x) if x != 0.0 else ""
)
trials['Profit'] = trials['Profit'].apply(
lambda x: '{:,.2f}'.format(x) if not isna(x) else ""
)
trials['Avg profit'] = trials['Avg profit'].apply(
lambda x: '{:,.2f}%'.format(x) if not isna(x) else ""
)
trials['Avg duration'] = trials['Avg duration'].apply(
lambda x: '{:,.1f} m'.format(x) if not isna(x) else ""
)
trials['Objective'] = trials['Objective'].apply(
lambda x: '{:,.5f}'.format(x) if x != 100000 else ""
)
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
trials.to_csv(csv_file, index=False, header=True, mode='w', encoding='UTF-8')
logger.info(f"CSV file created: {csv_file}")
def has_space(self, space: str) -> bool:
"""
Tell if the space value is contained in the configuration
@ -345,15 +479,15 @@ class Hyperopt:
if self.has_space('roi'):
self.backtesting.strategy.minimal_roi = \
self.custom_hyperopt.generate_roi_table(params_dict)
self.custom_hyperopt.generate_roi_table(params_dict)
if self.has_space('buy'):
self.backtesting.strategy.advise_buy = \
self.custom_hyperopt.buy_strategy_generator(params_dict)
self.custom_hyperopt.buy_strategy_generator(params_dict)
if self.has_space('sell'):
self.backtesting.strategy.advise_sell = \
self.custom_hyperopt.sell_strategy_generator(params_dict)
self.custom_hyperopt.sell_strategy_generator(params_dict)
if self.has_space('stoploss'):
self.backtesting.strategy.stoploss = params_dict['stoploss']
@ -367,19 +501,17 @@ class Hyperopt:
self.backtesting.strategy.trailing_only_offset_is_reached = \
d['trailing_only_offset_is_reached']
processed = load(self.tickerdata_pickle)
processed = load(self.data_pickle_file)
min_date, max_date = get_timerange(processed)
backtesting_results = self.backtesting.backtest(
{
'stake_amount': self.config['stake_amount'],
'processed': processed,
'max_open_trades': self.max_open_trades,
'position_stacking': self.position_stacking,
'start_date': min_date,
'end_date': max_date,
}
processed=processed,
stake_amount=self.config['stake_amount'],
start_date=min_date,
end_date=max_date,
max_open_trades=self.max_open_trades,
position_stacking=self.position_stacking,
)
return self._get_results_dict(backtesting_results, min_date, max_date,
params_dict, params_details)
@ -438,43 +570,28 @@ class Hyperopt:
n_initial_points=INITIAL_POINTS,
acq_optimizer_kwargs={'n_jobs': cpu_count},
random_state=self.random_state,
model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
)
def fix_optimizer_models_list(self):
"""
WORKAROUND: Since skopt is not actively supported, this resolves problems with skopt
memory usage, see also: https://github.com/scikit-optimize/scikit-optimize/pull/746
This may cease working when skopt updates if implementation of this intrinsic
part changes.
"""
n = len(self.opt.models) - SKOPT_MODELS_MAX_NUM
# Keep no more than 2*SKOPT_MODELS_MAX_NUM models in the skopt models list,
# remove the old ones. These are actually of no use, the current model
# from the estimator is the only one used in the skopt optimizer.
# Freqtrade code also does not inspect details of the models.
if n >= SKOPT_MODELS_MAX_NUM:
logger.debug(f"Fixing skopt models list, removing {n} old items...")
del self.opt.models[0:n]
def run_optimizer_parallel(self, parallel, asked, i) -> List:
return parallel(delayed(
wrap_non_picklable_objects(self.generate_optimizer))(v, i) for v in asked)
@staticmethod
def load_previous_results(trials_file) -> List:
def load_previous_results(results_file: Path) -> List:
"""
Load data for epochs from the file if we have one
"""
trials: List = []
if trials_file.is_file() and trials_file.stat().st_size > 0:
trials = Hyperopt._read_trials(trials_file)
if trials[0].get('is_best') is None:
epochs: List = []
if results_file.is_file() and results_file.stat().st_size > 0:
epochs = Hyperopt._read_results(results_file)
# Detection of some old format, without 'is_best' field saved
if epochs[0].get('is_best') is None:
raise OperationalException(
"The file with Hyperopt results is incompatible with this version "
"of Freqtrade and cannot be loaded.")
logger.info(f"Loaded {len(trials)} previous evaluations from disk.")
return trials
"The file with Hyperopt results is incompatible with this version "
"of Freqtrade and cannot be loaded.")
logger.info(f"Loaded {len(epochs)} previous evaluations from disk.")
return epochs
def _set_random_state(self, random_state: Optional[int]) -> int:
return random_state or random.randint(1, 2**16 - 1)
@ -482,10 +599,10 @@ class Hyperopt:
def start(self) -> None:
self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
data, timerange = self.backtesting.load_bt_data()
preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data)
preprocessed = self.backtesting.strategy.ohlcvdata_to_dataframe(data)
# Trim startup period from analyzed dataframe
for pair, df in preprocessed.items():
@ -496,12 +613,13 @@ class Hyperopt:
'Hyperopting with data from %s up to %s (%s days)..',
min_date.isoformat(), max_date.isoformat(), (max_date - min_date).days
)
dump(preprocessed, self.tickerdata_pickle)
dump(preprocessed, self.data_pickle_file)
# We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
self.trials = self.load_previous_results(self.trials_file)
self.epochs = self.load_previous_results(self.results_file)
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
@ -510,52 +628,85 @@ class Hyperopt:
self.dimensions: List[Dimension] = self.hyperopt_space()
self.opt = self.get_optimizer(self.dimensions, config_jobs)
if self.print_colorized:
colorama_init(autoreset=True)
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
logger.info(f'Effective number of parallel workers used: {jobs}')
EVALS = max(self.total_epochs // jobs, 1)
for i in range(EVALS):
asked = self.opt.ask(n_points=jobs)
f_val = self.run_optimizer_parallel(parallel, asked, i)
self.opt.tell(asked, [v['loss'] for v in f_val])
self.fix_optimizer_models_list()
for j in range(jobs):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1
val = f_val[j]
val['current_epoch'] = current
val['is_initial_point'] = current <= INITIAL_POINTS
logger.debug(f"Optimizer epoch evaluated: {val}")
is_best = self.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val['is_best'] = is_best
# Define progressbar
if self.print_colorized:
widgets = [
' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
' (', progressbar.Percentage(), ')] ',
progressbar.Bar(marker=progressbar.AnimatedMarker(
fill='\N{FULL BLOCK}',
fill_wrap=Fore.GREEN + '{}' + Fore.RESET,
marker_wrap=Style.BRIGHT + '{}' + Style.RESET_ALL,
)),
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
]
else:
widgets = [
' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
' (', progressbar.Percentage(), ')] ',
progressbar.Bar(marker=progressbar.AnimatedMarker(
fill='\N{FULL BLOCK}',
)),
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
]
with progressbar.ProgressBar(
max_value=self.total_epochs, redirect_stdout=False, redirect_stderr=False,
widgets=widgets
) as pbar:
EVALS = ceil(self.total_epochs / jobs)
for i in range(EVALS):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - self.total_epochs
current_jobs = jobs - n_rest if n_rest > 0 else jobs
self.print_results(val)
asked = self.opt.ask(n_points=current_jobs)
f_val = self.run_optimizer_parallel(parallel, asked, i)
self.opt.tell(asked, [v['loss'] for v in f_val])
# Calculate progressbar outputs
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1
val['current_epoch'] = current
val['is_initial_point'] = current <= INITIAL_POINTS
logger.debug(f"Optimizer epoch evaluated: {val}")
is_best = self.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val['is_best'] = is_best
self.print_results(val)
if is_best:
self.current_best_loss = val['loss']
self.epochs.append(val)
# Save results after each best epoch and every 100 epochs
if is_best or current % 100 == 0:
self._save_results()
pbar.update(current)
if is_best:
self.current_best_loss = val['loss']
self.trials.append(val)
# Save results after each best epoch and every 100 epochs
if is_best or current % 100 == 0:
self.save_trials()
except KeyboardInterrupt:
print('User interrupted..')
self.save_trials(final=True)
self._save_results()
logger.info(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'.")
if self.trials:
sorted_trials = sorted(self.trials, key=itemgetter('loss'))
results = sorted_trials[0]
self.print_epoch_details(results, self.total_epochs, self.print_json)
if self.epochs:
sorted_epochs = sorted(self.epochs, key=itemgetter('loss'))
best_epoch = sorted_epochs[0]
self.print_epoch_details(best_epoch, self.total_epochs, self.print_json)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.

View File

@ -4,17 +4,15 @@ This module defines the interface to apply for hyperopt
"""
import logging
import math
from abc import ABC
from typing import Dict, Any, Callable, List
from typing import Any, Callable, Dict, List
from skopt.space import Categorical, Dimension, Integer, Real
from freqtrade import OperationalException
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.misc import round_dict
logger = logging.getLogger(__name__)
@ -209,7 +207,7 @@ class IHyperOpt(ABC):
# so this intermediate parameter is used as the value of the difference between
# them. The value of the 'trailing_stop_positive_offset' is constructed in the
# generate_trailing_params() method.
# # This is similar to the hyperspace dimensions used for constructing the ROI tables.
# This is similar to the hyperspace dimensions used for constructing the ROI tables.
Real(0.001, 0.1, name='trailing_stop_positive_offset_p1'),
Categorical([True, False], name='trailing_only_offset_is_reached'),

View File

@ -28,18 +28,19 @@ class SharpeHyperOptLoss(IHyperOptLoss):
Uses Sharpe Ratio calculation.
"""
total_profit = results.profit_percent
total_profit = results["profit_percent"]
days_period = (max_date - min_date).days
# adding slippage of 0.1% per trade
total_profit = total_profit - 0.0005
expected_yearly_return = total_profit.sum() / days_period
expected_returns_mean = total_profit.sum() / days_period
up_stdev = np.std(total_profit)
if (np.std(total_profit) != 0.):
sharp_ratio = expected_yearly_return / np.std(total_profit) * np.sqrt(365)
if up_stdev != 0:
sharp_ratio = expected_returns_mean / up_stdev * np.sqrt(365)
else:
# Define high (negative) sharpe ratio to be clear that this is NOT optimal.
sharp_ratio = -20.
# print(expected_yearly_return, np.std(total_profit), sharp_ratio)
# print(expected_returns_mean, up_stdev, sharp_ratio)
return -sharp_ratio

View File

@ -0,0 +1,62 @@
"""
SharpeHyperOptLossDaily
This module defines the alternative HyperOptLoss class which can be used for
Hyperoptimization.
"""
import math
from datetime import datetime
from pandas import DataFrame, date_range
from freqtrade.optimize.hyperopt import IHyperOptLoss
class SharpeHyperOptLossDaily(IHyperOptLoss):
"""
Defines the loss function for hyperopt.
This implementation uses the Sharpe Ratio calculation.
"""
@staticmethod
def hyperopt_loss_function(results: DataFrame, trade_count: int,
min_date: datetime, max_date: datetime,
*args, **kwargs) -> float:
"""
Objective function, returns smaller number for more optimal results.
Uses Sharpe Ratio calculation.
"""
resample_freq = '1D'
slippage_per_trade_ratio = 0.0005
days_in_year = 365
annual_risk_free_rate = 0.0
risk_free_rate = annual_risk_free_rate / days_in_year
# apply slippage per trade to profit_percent
results.loc[:, 'profit_percent_after_slippage'] = \
results['profit_percent'] - slippage_per_trade_ratio
# create the index within the min_date and end max_date
t_index = date_range(start=min_date, end=max_date, freq=resample_freq,
normalize=True)
sum_daily = (
results.resample(resample_freq, on='close_time').agg(
{"profit_percent_after_slippage": sum}).reindex(t_index).fillna(0)
)
total_profit = sum_daily["profit_percent_after_slippage"] - risk_free_rate
expected_returns_mean = total_profit.mean()
up_stdev = total_profit.std()
if up_stdev != 0:
sharp_ratio = expected_returns_mean / up_stdev * math.sqrt(days_in_year)
else:
# Define high (negative) sharpe ratio to be clear that this is NOT optimal.
sharp_ratio = -20.
# print(t_index, sum_daily, total_profit)
# print(risk_free_rate, expected_returns_mean, up_stdev, sharp_ratio)
return -sharp_ratio

View File

@ -0,0 +1,49 @@
"""
SortinoHyperOptLoss
This module defines the alternative HyperOptLoss class which can be used for
Hyperoptimization.
"""
from datetime import datetime
from pandas import DataFrame
import numpy as np
from freqtrade.optimize.hyperopt import IHyperOptLoss
class SortinoHyperOptLoss(IHyperOptLoss):
"""
Defines the loss function for hyperopt.
This implementation uses the Sortino Ratio calculation.
"""
@staticmethod
def hyperopt_loss_function(results: DataFrame, trade_count: int,
min_date: datetime, max_date: datetime,
*args, **kwargs) -> float:
"""
Objective function, returns smaller number for more optimal results.
Uses Sortino Ratio calculation.
"""
total_profit = results["profit_percent"]
days_period = (max_date - min_date).days
# adding slippage of 0.1% per trade
total_profit = total_profit - 0.0005
expected_returns_mean = total_profit.sum() / days_period
results['downside_returns'] = 0
results.loc[total_profit < 0, 'downside_returns'] = results['profit_percent']
down_stdev = np.std(results['downside_returns'])
if down_stdev != 0:
sortino_ratio = expected_returns_mean / down_stdev * np.sqrt(365)
else:
# Define high (negative) sortino ratio to be clear that this is NOT optimal.
sortino_ratio = -20.
# print(expected_returns_mean, down_stdev, sortino_ratio)
return -sortino_ratio

Some files were not shown because too many files have changed in this diff Show More