Merge branch 'develop' into pairlocks_direction
This commit is contained in:
commit
995c48b642
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,9 +1,9 @@
|
|||||||
Thank you for sending your pull request. But first, have you included
|
<!-- Thank you for sending your pull request. But first, have you included
|
||||||
unit tests, and is your code PEP8 conformant? [More details](https://github.com/freqtrade/freqtrade/blob/develop/CONTRIBUTING.md)
|
unit tests, and is your code PEP8 conformant? [More details](https://github.com/freqtrade/freqtrade/blob/develop/CONTRIBUTING.md)
|
||||||
|
-->
|
||||||
## Summary
|
## Summary
|
||||||
|
|
||||||
Explain in one sentence the goal of this PR
|
<!-- Explain in one sentence the goal of this PR -->
|
||||||
|
|
||||||
Solve the issue: #___
|
Solve the issue: #___
|
||||||
|
|
||||||
@ -14,4 +14,4 @@ Solve the issue: #___
|
|||||||
|
|
||||||
## What's new?
|
## What's new?
|
||||||
|
|
||||||
*Explain in details what this PR solve or improve. You can include visuals.*
|
<!-- Explain in details what this PR solve or improve. You can include visuals. -->
|
||||||
|
32
.github/workflows/ci.yml
vendored
32
.github/workflows/ci.yml
vendored
@ -100,7 +100,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts tests
|
||||||
|
|
||||||
- name: Discord notification
|
- name: Discord notification
|
||||||
uses: rjstone/discord-webhook-notify@v1
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
@ -255,7 +255,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts tests
|
||||||
|
|
||||||
- name: Discord notification
|
- name: Discord notification
|
||||||
uses: rjstone/discord-webhook-notify@v1
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
@ -265,6 +265,21 @@ jobs:
|
|||||||
details: Test Failed
|
details: Test Failed
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
|
mypy_version_check:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v3
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
|
||||||
|
- name: pre-commit dependencies
|
||||||
|
run: |
|
||||||
|
pip install pyaml
|
||||||
|
python build_helpers/pre_commit_update.py
|
||||||
|
|
||||||
docs_check:
|
docs_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
@ -277,7 +292,7 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v3
|
uses: actions/setup-python@v3
|
||||||
with:
|
with:
|
||||||
python-version: 3.8
|
python-version: 3.9
|
||||||
|
|
||||||
- name: Documentation build
|
- name: Documentation build
|
||||||
run: |
|
run: |
|
||||||
@ -294,6 +309,9 @@ jobs:
|
|||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
cleanup-prior-runs:
|
cleanup-prior-runs:
|
||||||
|
permissions:
|
||||||
|
actions: write # for rokroskar/workflow-run-cleanup-action to obtain workflow name & cancel it
|
||||||
|
contents: read # for rokroskar/workflow-run-cleanup-action to obtain branch
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: Cleanup previous runs on this branch
|
- name: Cleanup previous runs on this branch
|
||||||
@ -304,8 +322,12 @@ jobs:
|
|||||||
|
|
||||||
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||||
notify-complete:
|
notify-complete:
|
||||||
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check ]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
# Discord notification can't handle schedule events
|
||||||
|
if: (github.event_name != 'schedule')
|
||||||
|
permissions:
|
||||||
|
repository-projects: read
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Check user permission
|
- name: Check user permission
|
||||||
@ -325,7 +347,7 @@ jobs:
|
|||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
needs: [ build_linux, build_macos, build_windows, docs_check ]
|
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check ]
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
||||||
|
@ -11,6 +11,13 @@ repos:
|
|||||||
rev: "v0.942"
|
rev: "v0.942"
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
|
exclude: build_helpers
|
||||||
|
additional_dependencies:
|
||||||
|
- types-cachetools==5.0.1
|
||||||
|
- types-filelock==3.2.5
|
||||||
|
- types-requests==2.27.20
|
||||||
|
- types-tabulate==0.8.7
|
||||||
|
- types-python-dateutil==2.8.12
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
|
@ -39,6 +39,14 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
|
|||||||
- [X] [OKX](https://okx.com/) (Former OKEX)
|
- [X] [OKX](https://okx.com/) (Former OKEX)
|
||||||
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
||||||
|
|
||||||
|
### Experimentally, freqtrade also supports futures on the following exchanges
|
||||||
|
|
||||||
|
- [X] [Binance](https://www.binance.com/)
|
||||||
|
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||||
|
- [X] [OKX](https://okx.com/).
|
||||||
|
|
||||||
|
Please make sure to read the [exchange specific notes](docs/exchanges.md), as well as the [trading with leverage](docs/leverage.md) documentation before diving in.
|
||||||
|
|
||||||
### Community tested
|
### Community tested
|
||||||
|
|
||||||
Exchanges confirmed working by the community:
|
Exchanges confirmed working by the community:
|
||||||
|
42
build_helpers/pre_commit_update.py
Normal file
42
build_helpers/pre_commit_update.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# File used in CI to ensure pre-commit dependencies are kept uptodate.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
pre_commit_file = Path('.pre-commit-config.yaml')
|
||||||
|
require_dev = Path('requirements-dev.txt')
|
||||||
|
|
||||||
|
with require_dev.open('r') as rfile:
|
||||||
|
requirements = rfile.readlines()
|
||||||
|
|
||||||
|
# Extract types only
|
||||||
|
type_reqs = [r.strip('\n') for r in requirements if r.startswith('types-')]
|
||||||
|
|
||||||
|
with pre_commit_file.open('r') as file:
|
||||||
|
f = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
|
||||||
|
mypy_repo = [repo for repo in f['repos'] if repo['repo']
|
||||||
|
== 'https://github.com/pre-commit/mirrors-mypy']
|
||||||
|
|
||||||
|
hooks = mypy_repo[0]['hooks'][0]['additional_dependencies']
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
for hook in hooks:
|
||||||
|
if hook not in type_reqs:
|
||||||
|
errors.append(f"{hook} is missing in requirements-dev.txt.")
|
||||||
|
|
||||||
|
for req in type_reqs:
|
||||||
|
if req not in hooks:
|
||||||
|
errors.append(f"{req} is missing in pre-config file.")
|
||||||
|
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
for e in errors:
|
||||||
|
print(e)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
sys.exit(0)
|
@ -90,7 +90,7 @@
|
|||||||
},
|
},
|
||||||
"bot_name": "freqtrade",
|
"bot_name": "freqtrade",
|
||||||
"initial_state": "running",
|
"initial_state": "running",
|
||||||
"force_enter_enable": false,
|
"force_entry_enable": false,
|
||||||
"internals": {
|
"internals": {
|
||||||
"process_throttle_secs": 5
|
"process_throttle_secs": 5
|
||||||
}
|
}
|
||||||
|
@ -299,6 +299,7 @@ A backtesting result will look like that:
|
|||||||
| Final balance | 0.01762792 BTC |
|
| Final balance | 0.01762792 BTC |
|
||||||
| Absolute profit | 0.00762792 BTC |
|
| Absolute profit | 0.00762792 BTC |
|
||||||
| Total profit % | 76.2% |
|
| Total profit % | 76.2% |
|
||||||
|
| CAGR % | 460.87% |
|
||||||
| Trades per day | 3.575 |
|
| Trades per day | 3.575 |
|
||||||
| Avg. stake amount | 0.001 BTC |
|
| Avg. stake amount | 0.001 BTC |
|
||||||
| Total trade volume | 0.429 BTC |
|
| Total trade volume | 0.429 BTC |
|
||||||
@ -388,6 +389,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
| Final balance | 0.01762792 BTC |
|
| Final balance | 0.01762792 BTC |
|
||||||
| Absolute profit | 0.00762792 BTC |
|
| Absolute profit | 0.00762792 BTC |
|
||||||
| Total profit % | 76.2% |
|
| Total profit % | 76.2% |
|
||||||
|
| CAGR % | 460.87% |
|
||||||
| Avg. stake amount | 0.001 BTC |
|
| Avg. stake amount | 0.001 BTC |
|
||||||
| Total trade volume | 0.429 BTC |
|
| Total trade volume | 0.429 BTC |
|
||||||
| | |
|
| | |
|
||||||
|
@ -51,6 +51,14 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
|
|||||||
- [X] [OKX](https://okx.com/) (Former OKEX)
|
- [X] [OKX](https://okx.com/) (Former OKEX)
|
||||||
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
||||||
|
|
||||||
|
### Experimentally, freqtrade also supports futures on the following exchanges:
|
||||||
|
|
||||||
|
- [X] [Binance](https://www.binance.com/)
|
||||||
|
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||||
|
- [X] [OKX](https://okx.com/).
|
||||||
|
|
||||||
|
Please make sure to read the [exchange specific notes](exchanges.md), as well as the [trading with leverage](leverage.md) documentation before diving in.
|
||||||
|
|
||||||
### Community tested
|
### Community tested
|
||||||
|
|
||||||
Exchanges confirmed working by the community:
|
Exchanges confirmed working by the community:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
mkdocs==1.3.0
|
mkdocs==1.3.0
|
||||||
mkdocs-material==8.2.9
|
mkdocs-material==8.2.10
|
||||||
mdx_truly_sane_lists==1.2
|
mdx_truly_sane_lists==1.2
|
||||||
pymdown-extensions==9.3
|
pymdown-extensions==9.4
|
||||||
jinja2==3.1.1
|
jinja2==3.1.1
|
||||||
|
@ -7,6 +7,7 @@ Depending on the callback used, they may be called when entering / exiting a tra
|
|||||||
|
|
||||||
Currently available callbacks:
|
Currently available callbacks:
|
||||||
|
|
||||||
|
* [`bot_start()`](#bot-start)
|
||||||
* [`bot_loop_start()`](#bot-loop-start)
|
* [`bot_loop_start()`](#bot-loop-start)
|
||||||
* [`custom_stake_amount()`](#stake-size-management)
|
* [`custom_stake_amount()`](#stake-size-management)
|
||||||
* [`custom_exit()`](#custom-exit-signal)
|
* [`custom_exit()`](#custom-exit-signal)
|
||||||
@ -21,6 +22,29 @@ Currently available callbacks:
|
|||||||
!!! Tip "Callback calling sequence"
|
!!! Tip "Callback calling sequence"
|
||||||
You can find the callback calling sequence in [bot-basics](bot-basics.md#bot-execution-logic)
|
You can find the callback calling sequence in [bot-basics](bot-basics.md#bot-execution-logic)
|
||||||
|
|
||||||
|
## Bot start
|
||||||
|
|
||||||
|
A simple callback which is called once when the strategy is loaded.
|
||||||
|
This can be used to perform actions that must only be performed once and runs after dataprovider and wallet are set
|
||||||
|
|
||||||
|
``` python
|
||||||
|
import requests
|
||||||
|
|
||||||
|
class AwesomeStrategy(IStrategy):
|
||||||
|
|
||||||
|
# ... populate_* methods
|
||||||
|
|
||||||
|
def bot_start(self, **kwargs) -> None:
|
||||||
|
"""
|
||||||
|
Called only once after bot instantiation.
|
||||||
|
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||||
|
"""
|
||||||
|
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||||
|
# Assign this to the class by using self.*
|
||||||
|
# can then be used by populate_* methods
|
||||||
|
self.cust_remote_data = requests.get('https://some_remote_source.example.com')
|
||||||
|
|
||||||
|
```
|
||||||
## Bot loop start
|
## Bot loop start
|
||||||
|
|
||||||
A simple callback which is called once at the start of every bot throttling iteration (roughly every 5 seconds, unless configured differently).
|
A simple callback which is called once at the start of every bot throttling iteration (roughly every 5 seconds, unless configured differently).
|
||||||
@ -122,11 +146,11 @@ See [Dataframe access](strategy-advanced.md#dataframe-access) for more informati
|
|||||||
|
|
||||||
## Custom stoploss
|
## Custom stoploss
|
||||||
|
|
||||||
Called for open trade every throttling iteration (roughly every 5 seconds) until a trade is closed.
|
Called for open trade every iteration (roughly every 5 seconds) until a trade is closed.
|
||||||
|
|
||||||
The usage of the custom stoploss method must be enabled by setting `use_custom_stoploss=True` on the strategy object.
|
The usage of the custom stoploss method must be enabled by setting `use_custom_stoploss=True` on the strategy object.
|
||||||
|
|
||||||
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade).
|
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade), and is still mandatory.
|
||||||
|
|
||||||
The method must return a stoploss value (float / number) as a percentage of the current price.
|
The method must return a stoploss value (float / number) as a percentage of the current price.
|
||||||
E.g. If the `current_rate` is 200 USD, then returning `0.02` will set the stoploss price 2% lower, at 196 USD.
|
E.g. If the `current_rate` is 200 USD, then returning `0.02` will set the stoploss price 2% lower, at 196 USD.
|
||||||
@ -376,7 +400,7 @@ class AwesomeStrategy(IStrategy):
|
|||||||
|
|
||||||
def custom_exit_price(self, pair: str, trade: Trade,
|
def custom_exit_price(self, pair: str, trade: Trade,
|
||||||
current_time: datetime, proposed_rate: float,
|
current_time: datetime, proposed_rate: float,
|
||||||
current_profit: float, **kwargs) -> float:
|
current_profit: float, exit_tag: Optional[str], **kwargs) -> float:
|
||||||
|
|
||||||
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
|
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
|
||||||
timeframe=self.timeframe)
|
timeframe=self.timeframe)
|
||||||
|
@ -22,6 +22,6 @@ def setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str
|
|||||||
|
|
||||||
# Ensure these modes are using Dry-run
|
# Ensure these modes are using Dry-run
|
||||||
config['dry_run'] = True
|
config['dry_run'] = True
|
||||||
validate_config_consistency(config)
|
validate_config_consistency(config, preliminary=True)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
@ -39,7 +39,7 @@ def _extend_validator(validator_class):
|
|||||||
FreqtradeValidator = _extend_validator(Draft4Validator)
|
FreqtradeValidator = _extend_validator(Draft4Validator)
|
||||||
|
|
||||||
|
|
||||||
def validate_config_schema(conf: Dict[str, Any]) -> Dict[str, Any]:
|
def validate_config_schema(conf: Dict[str, Any], preliminary: bool = False) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Validate the configuration follow the Config Schema
|
Validate the configuration follow the Config Schema
|
||||||
:param conf: Config in JSON format
|
:param conf: Config in JSON format
|
||||||
@ -49,7 +49,10 @@ def validate_config_schema(conf: Dict[str, Any]) -> Dict[str, Any]:
|
|||||||
if conf.get('runmode', RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE):
|
if conf.get('runmode', RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE):
|
||||||
conf_schema['required'] = constants.SCHEMA_TRADE_REQUIRED
|
conf_schema['required'] = constants.SCHEMA_TRADE_REQUIRED
|
||||||
elif conf.get('runmode', RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT):
|
elif conf.get('runmode', RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT):
|
||||||
|
if preliminary:
|
||||||
conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED
|
conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED
|
||||||
|
else:
|
||||||
|
conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED_FINAL
|
||||||
else:
|
else:
|
||||||
conf_schema['required'] = constants.SCHEMA_MINIMAL_REQUIRED
|
conf_schema['required'] = constants.SCHEMA_MINIMAL_REQUIRED
|
||||||
try:
|
try:
|
||||||
@ -64,7 +67,7 @@ def validate_config_schema(conf: Dict[str, Any]) -> Dict[str, Any]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def validate_config_consistency(conf: Dict[str, Any]) -> None:
|
def validate_config_consistency(conf: Dict[str, Any], preliminary: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Validate the configuration consistency.
|
Validate the configuration consistency.
|
||||||
Should be ran after loading both configuration and strategy,
|
Should be ran after loading both configuration and strategy,
|
||||||
@ -85,7 +88,7 @@ def validate_config_consistency(conf: Dict[str, Any]) -> None:
|
|||||||
|
|
||||||
# validate configuration before returning
|
# validate configuration before returning
|
||||||
logger.info('Validating configuration ...')
|
logger.info('Validating configuration ...')
|
||||||
validate_config_schema(conf)
|
validate_config_schema(conf, preliminary=preliminary)
|
||||||
|
|
||||||
|
|
||||||
def _validate_unlimited_amount(conf: Dict[str, Any]) -> None:
|
def _validate_unlimited_amount(conf: Dict[str, Any]) -> None:
|
||||||
|
@ -462,6 +462,10 @@ SCHEMA_BACKTEST_REQUIRED = [
|
|||||||
'dataformat_ohlcv',
|
'dataformat_ohlcv',
|
||||||
'dataformat_trades',
|
'dataformat_trades',
|
||||||
]
|
]
|
||||||
|
SCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [
|
||||||
|
'stoploss',
|
||||||
|
'minimal_roi',
|
||||||
|
]
|
||||||
|
|
||||||
SCHEMA_MINIMAL_REQUIRED = [
|
SCHEMA_MINIMAL_REQUIRED = [
|
||||||
'exchange',
|
'exchange',
|
||||||
|
@ -5,14 +5,15 @@ import logging
|
|||||||
from copy import copy
|
from copy import copy
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.misc import get_backtest_metadata_filename, json_load
|
from freqtrade.misc import json_load
|
||||||
|
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||||
from freqtrade.persistence import LocalTrade, Trade, init_db
|
from freqtrade.persistence import LocalTrade, Trade, init_db
|
||||||
|
|
||||||
|
|
||||||
@ -399,157 +400,3 @@ def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,
|
|||||||
trades = trades.loc[(trades['open_date'] >= trades_start) &
|
trades = trades.loc[(trades['open_date'] >= trades_start) &
|
||||||
(trades['close_date'] <= trades_stop)]
|
(trades['close_date'] <= trades_stop)]
|
||||||
return trades
|
return trades
|
||||||
|
|
||||||
|
|
||||||
def calculate_market_change(data: Dict[str, pd.DataFrame], column: str = "close") -> float:
|
|
||||||
"""
|
|
||||||
Calculate market change based on "column".
|
|
||||||
Calculation is done by taking the first non-null and the last non-null element of each column
|
|
||||||
and calculating the pctchange as "(last - first) / first".
|
|
||||||
Then the results per pair are combined as mean.
|
|
||||||
|
|
||||||
:param data: Dict of Dataframes, dict key should be pair.
|
|
||||||
:param column: Column in the original dataframes to use
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
tmp_means = []
|
|
||||||
for pair, df in data.items():
|
|
||||||
start = df[column].dropna().iloc[0]
|
|
||||||
end = df[column].dropna().iloc[-1]
|
|
||||||
tmp_means.append((end - start) / start)
|
|
||||||
|
|
||||||
return float(np.mean(tmp_means))
|
|
||||||
|
|
||||||
|
|
||||||
def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame],
|
|
||||||
column: str = "close") -> pd.DataFrame:
|
|
||||||
"""
|
|
||||||
Combine multiple dataframes "column"
|
|
||||||
:param data: Dict of Dataframes, dict key should be pair.
|
|
||||||
:param column: Column in the original dataframes to use
|
|
||||||
:return: DataFrame with the column renamed to the dict key, and a column
|
|
||||||
named mean, containing the mean of all pairs.
|
|
||||||
:raise: ValueError if no data is provided.
|
|
||||||
"""
|
|
||||||
df_comb = pd.concat([data[pair].set_index('date').rename(
|
|
||||||
{column: pair}, axis=1)[pair] for pair in data], axis=1)
|
|
||||||
|
|
||||||
df_comb['mean'] = df_comb.mean(axis=1)
|
|
||||||
|
|
||||||
return df_comb
|
|
||||||
|
|
||||||
|
|
||||||
def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
|
|
||||||
timeframe: str) -> pd.DataFrame:
|
|
||||||
"""
|
|
||||||
Adds a column `col_name` with the cumulative profit for the given trades array.
|
|
||||||
:param df: DataFrame with date index
|
|
||||||
:param trades: DataFrame containing trades (requires columns close_date and profit_abs)
|
|
||||||
:param col_name: Column name that will be assigned the results
|
|
||||||
:param timeframe: Timeframe used during the operations
|
|
||||||
:return: Returns df with one additional column, col_name, containing the cumulative profit.
|
|
||||||
:raise: ValueError if trade-dataframe was found empty.
|
|
||||||
"""
|
|
||||||
if len(trades) == 0:
|
|
||||||
raise ValueError("Trade dataframe empty.")
|
|
||||||
from freqtrade.exchange import timeframe_to_minutes
|
|
||||||
timeframe_minutes = timeframe_to_minutes(timeframe)
|
|
||||||
# Resample to timeframe to make sure trades match candles
|
|
||||||
_trades_sum = trades.resample(f'{timeframe_minutes}min', on='close_date'
|
|
||||||
)[['profit_abs']].sum()
|
|
||||||
df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum()
|
|
||||||
# Set first value to 0
|
|
||||||
df.loc[df.iloc[0].name, col_name] = 0
|
|
||||||
# FFill to get continuous
|
|
||||||
df[col_name] = df[col_name].ffill()
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str
|
|
||||||
) -> pd.DataFrame:
|
|
||||||
max_drawdown_df = pd.DataFrame()
|
|
||||||
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
|
|
||||||
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
|
|
||||||
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
|
|
||||||
max_drawdown_df['date'] = profit_results.loc[:, date_col]
|
|
||||||
return max_drawdown_df
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
|
||||||
value_col: str = 'profit_ratio'
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Calculate max drawdown and the corresponding close dates
|
|
||||||
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
|
||||||
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
|
|
||||||
:param value_col: Column in DataFrame to use for values (defaults to 'profit_ratio')
|
|
||||||
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue) with absolute max drawdown,
|
|
||||||
high and low time and high and low value.
|
|
||||||
:raise: ValueError if trade-dataframe was found empty.
|
|
||||||
"""
|
|
||||||
if len(trades) == 0:
|
|
||||||
raise ValueError("Trade dataframe empty.")
|
|
||||||
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
|
||||||
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
|
||||||
|
|
||||||
return max_drawdown_df
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
|
||||||
value_col: str = 'profit_abs', starting_balance: float = 0
|
|
||||||
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:
|
|
||||||
"""
|
|
||||||
Calculate max drawdown and the corresponding close dates
|
|
||||||
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
|
||||||
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
|
|
||||||
:param value_col: Column in DataFrame to use for values (defaults to 'profit_abs')
|
|
||||||
:param starting_balance: Portfolio starting balance - properly calculate relative drawdown.
|
|
||||||
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown)
|
|
||||||
with absolute max drawdown, high and low time and high and low value,
|
|
||||||
and the relative account drawdown
|
|
||||||
:raise: ValueError if trade-dataframe was found empty.
|
|
||||||
"""
|
|
||||||
if len(trades) == 0:
|
|
||||||
raise ValueError("Trade dataframe empty.")
|
|
||||||
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
|
||||||
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
|
||||||
|
|
||||||
idxmin = max_drawdown_df['drawdown'].idxmin()
|
|
||||||
if idxmin == 0:
|
|
||||||
raise ValueError("No losing trade, therefore no drawdown.")
|
|
||||||
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]
|
|
||||||
low_date = profit_results.loc[idxmin, date_col]
|
|
||||||
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
|
|
||||||
['high_value'].idxmax(), 'cumulative']
|
|
||||||
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
|
|
||||||
max_drawdown_rel = 0.0
|
|
||||||
if high_val + starting_balance != 0:
|
|
||||||
max_drawdown_rel = (high_val - low_val) / (high_val + starting_balance)
|
|
||||||
|
|
||||||
return (
|
|
||||||
abs(min(max_drawdown_df['drawdown'])),
|
|
||||||
high_date,
|
|
||||||
low_date,
|
|
||||||
high_val,
|
|
||||||
low_val,
|
|
||||||
max_drawdown_rel
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[float, float]:
|
|
||||||
"""
|
|
||||||
Calculate min/max cumsum of trades, to show if the wallet/stake amount ratio is sane
|
|
||||||
:param trades: DataFrame containing trades (requires columns close_date and profit_percent)
|
|
||||||
:param starting_balance: Add starting balance to results, to show the wallets high / low points
|
|
||||||
:return: Tuple (float, float) with cumsum of profit_abs
|
|
||||||
:raise: ValueError if trade-dataframe was found empty.
|
|
||||||
"""
|
|
||||||
if len(trades) == 0:
|
|
||||||
raise ValueError("Trade dataframe empty.")
|
|
||||||
|
|
||||||
csum_df = pd.DataFrame()
|
|
||||||
csum_df['sum'] = trades['profit_abs'].cumsum()
|
|
||||||
csum_min = csum_df['sum'].min() + starting_balance
|
|
||||||
csum_max = csum_df['sum'].max() + starting_balance
|
|
||||||
|
|
||||||
return csum_min, csum_max
|
|
||||||
|
173
freqtrade/data/metrics.py
Normal file
173
freqtrade/data/metrics.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_market_change(data: Dict[str, pd.DataFrame], column: str = "close") -> float:
|
||||||
|
"""
|
||||||
|
Calculate market change based on "column".
|
||||||
|
Calculation is done by taking the first non-null and the last non-null element of each column
|
||||||
|
and calculating the pctchange as "(last - first) / first".
|
||||||
|
Then the results per pair are combined as mean.
|
||||||
|
|
||||||
|
:param data: Dict of Dataframes, dict key should be pair.
|
||||||
|
:param column: Column in the original dataframes to use
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
tmp_means = []
|
||||||
|
for pair, df in data.items():
|
||||||
|
start = df[column].dropna().iloc[0]
|
||||||
|
end = df[column].dropna().iloc[-1]
|
||||||
|
tmp_means.append((end - start) / start)
|
||||||
|
|
||||||
|
return float(np.mean(tmp_means))
|
||||||
|
|
||||||
|
|
||||||
|
def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame],
|
||||||
|
column: str = "close") -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Combine multiple dataframes "column"
|
||||||
|
:param data: Dict of Dataframes, dict key should be pair.
|
||||||
|
:param column: Column in the original dataframes to use
|
||||||
|
:return: DataFrame with the column renamed to the dict key, and a column
|
||||||
|
named mean, containing the mean of all pairs.
|
||||||
|
:raise: ValueError if no data is provided.
|
||||||
|
"""
|
||||||
|
df_comb = pd.concat([data[pair].set_index('date').rename(
|
||||||
|
{column: pair}, axis=1)[pair] for pair in data], axis=1)
|
||||||
|
|
||||||
|
df_comb['mean'] = df_comb.mean(axis=1)
|
||||||
|
|
||||||
|
return df_comb
|
||||||
|
|
||||||
|
|
||||||
|
def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
|
||||||
|
timeframe: str) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Adds a column `col_name` with the cumulative profit for the given trades array.
|
||||||
|
:param df: DataFrame with date index
|
||||||
|
:param trades: DataFrame containing trades (requires columns close_date and profit_abs)
|
||||||
|
:param col_name: Column name that will be assigned the results
|
||||||
|
:param timeframe: Timeframe used during the operations
|
||||||
|
:return: Returns df with one additional column, col_name, containing the cumulative profit.
|
||||||
|
:raise: ValueError if trade-dataframe was found empty.
|
||||||
|
"""
|
||||||
|
if len(trades) == 0:
|
||||||
|
raise ValueError("Trade dataframe empty.")
|
||||||
|
from freqtrade.exchange import timeframe_to_minutes
|
||||||
|
timeframe_minutes = timeframe_to_minutes(timeframe)
|
||||||
|
# Resample to timeframe to make sure trades match candles
|
||||||
|
_trades_sum = trades.resample(f'{timeframe_minutes}min', on='close_date'
|
||||||
|
)[['profit_abs']].sum()
|
||||||
|
df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum()
|
||||||
|
# Set first value to 0
|
||||||
|
df.loc[df.iloc[0].name, col_name] = 0
|
||||||
|
# FFill to get continuous
|
||||||
|
df[col_name] = df[col_name].ffill()
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
max_drawdown_df = pd.DataFrame()
|
||||||
|
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
|
||||||
|
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
|
||||||
|
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
|
||||||
|
max_drawdown_df['date'] = profit_results.loc[:, date_col]
|
||||||
|
return max_drawdown_df
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||||
|
value_col: str = 'profit_ratio'
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Calculate max drawdown and the corresponding close dates
|
||||||
|
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
||||||
|
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
|
||||||
|
:param value_col: Column in DataFrame to use for values (defaults to 'profit_ratio')
|
||||||
|
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue) with absolute max drawdown,
|
||||||
|
high and low time and high and low value.
|
||||||
|
:raise: ValueError if trade-dataframe was found empty.
|
||||||
|
"""
|
||||||
|
if len(trades) == 0:
|
||||||
|
raise ValueError("Trade dataframe empty.")
|
||||||
|
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||||
|
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||||
|
|
||||||
|
return max_drawdown_df
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
|
||||||
|
value_col: str = 'profit_abs', starting_balance: float = 0
|
||||||
|
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:
|
||||||
|
"""
|
||||||
|
Calculate max drawdown and the corresponding close dates
|
||||||
|
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
|
||||||
|
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
|
||||||
|
:param value_col: Column in DataFrame to use for values (defaults to 'profit_abs')
|
||||||
|
:param starting_balance: Portfolio starting balance - properly calculate relative drawdown.
|
||||||
|
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown)
|
||||||
|
with absolute max drawdown, high and low time and high and low value,
|
||||||
|
and the relative account drawdown
|
||||||
|
:raise: ValueError if trade-dataframe was found empty.
|
||||||
|
"""
|
||||||
|
if len(trades) == 0:
|
||||||
|
raise ValueError("Trade dataframe empty.")
|
||||||
|
profit_results = trades.sort_values(date_col).reset_index(drop=True)
|
||||||
|
max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col)
|
||||||
|
|
||||||
|
idxmin = max_drawdown_df['drawdown'].idxmin()
|
||||||
|
if idxmin == 0:
|
||||||
|
raise ValueError("No losing trade, therefore no drawdown.")
|
||||||
|
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]
|
||||||
|
low_date = profit_results.loc[idxmin, date_col]
|
||||||
|
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
|
||||||
|
['high_value'].idxmax(), 'cumulative']
|
||||||
|
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
|
||||||
|
max_drawdown_rel = 0.0
|
||||||
|
if high_val + starting_balance != 0:
|
||||||
|
max_drawdown_rel = (high_val - low_val) / (high_val + starting_balance)
|
||||||
|
|
||||||
|
return (
|
||||||
|
abs(min(max_drawdown_df['drawdown'])),
|
||||||
|
high_date,
|
||||||
|
low_date,
|
||||||
|
high_val,
|
||||||
|
low_val,
|
||||||
|
max_drawdown_rel
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Calculate min/max cumsum of trades, to show if the wallet/stake amount ratio is sane
|
||||||
|
:param trades: DataFrame containing trades (requires columns close_date and profit_percent)
|
||||||
|
:param starting_balance: Add starting balance to results, to show the wallets high / low points
|
||||||
|
:return: Tuple (float, float) with cumsum of profit_abs
|
||||||
|
:raise: ValueError if trade-dataframe was found empty.
|
||||||
|
"""
|
||||||
|
if len(trades) == 0:
|
||||||
|
raise ValueError("Trade dataframe empty.")
|
||||||
|
|
||||||
|
csum_df = pd.DataFrame()
|
||||||
|
csum_df['sum'] = trades['profit_abs'].cumsum()
|
||||||
|
csum_min = csum_df['sum'].min() + starting_balance
|
||||||
|
csum_max = csum_df['sum'].max() + starting_balance
|
||||||
|
|
||||||
|
return csum_min, csum_max
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_cagr(days_passed: int, starting_balance: float, final_balance: float) -> float:
|
||||||
|
"""
|
||||||
|
Calculate CAGR
|
||||||
|
:param days_passed: Days passed between start and ending balance
|
||||||
|
:param starting_balance: Starting balance
|
||||||
|
:param final_balance: Final balance to calculate CAGR against
|
||||||
|
:return: CAGR
|
||||||
|
"""
|
||||||
|
return (final_balance / starting_balance) ** (1 / (days_passed / 365)) - 1
|
@ -9,6 +9,7 @@ import logging
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from math import ceil
|
from math import ceil
|
||||||
|
from threading import Lock
|
||||||
from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union
|
from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union
|
||||||
|
|
||||||
import arrow
|
import arrow
|
||||||
@ -64,6 +65,7 @@ class Exchange:
|
|||||||
"ohlcv_params": {},
|
"ohlcv_params": {},
|
||||||
"ohlcv_candle_limit": 500,
|
"ohlcv_candle_limit": 500,
|
||||||
"ohlcv_partial_candle": True,
|
"ohlcv_partial_candle": True,
|
||||||
|
"ohlcv_require_since": False,
|
||||||
# Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency
|
# Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency
|
||||||
"ohlcv_volume_currency": "base", # "base" or "quote"
|
"ohlcv_volume_currency": "base", # "base" or "quote"
|
||||||
"tickers_have_quoteVolume": True,
|
"tickers_have_quoteVolume": True,
|
||||||
@ -95,6 +97,9 @@ class Exchange:
|
|||||||
self._markets: Dict = {}
|
self._markets: Dict = {}
|
||||||
self._trading_fees: Dict[str, Any] = {}
|
self._trading_fees: Dict[str, Any] = {}
|
||||||
self._leverage_tiers: Dict[str, List[Dict]] = {}
|
self._leverage_tiers: Dict[str, List[Dict]] = {}
|
||||||
|
# Lock event loop. This is necessary to avoid race-conditions when using force* commands
|
||||||
|
# Due to funding fee fetching.
|
||||||
|
self._loop_lock = Lock()
|
||||||
self.loop = asyncio.new_event_loop()
|
self.loop = asyncio.new_event_loop()
|
||||||
asyncio.set_event_loop(self.loop)
|
asyncio.set_event_loop(self.loop)
|
||||||
self._config: Dict = {}
|
self._config: Dict = {}
|
||||||
@ -166,7 +171,7 @@ class Exchange:
|
|||||||
self._api_async = self._init_ccxt(
|
self._api_async = self._init_ccxt(
|
||||||
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config)
|
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config)
|
||||||
|
|
||||||
logger.info('Using Exchange "%s"', self.name)
|
logger.info(f'Using Exchange "{self.name}"')
|
||||||
|
|
||||||
if validate:
|
if validate:
|
||||||
# Check if timeframe is available
|
# Check if timeframe is available
|
||||||
@ -368,6 +373,9 @@ class Exchange:
|
|||||||
return (
|
return (
|
||||||
market.get('quote', None) is not None
|
market.get('quote', None) is not None
|
||||||
and market.get('base', None) is not None
|
and market.get('base', None) is not None
|
||||||
|
and (self.precisionMode != TICK_SIZE
|
||||||
|
# Too low precision will falsify calculations
|
||||||
|
or market.get('precision', {}).get('price', None) > 1e-11)
|
||||||
and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market))
|
and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market))
|
||||||
or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market))
|
or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market))
|
||||||
or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market)))
|
or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market)))
|
||||||
@ -551,7 +559,7 @@ class Exchange:
|
|||||||
# Therefore we also show that.
|
# Therefore we also show that.
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"The ccxt library does not provide the list of timeframes "
|
f"The ccxt library does not provide the list of timeframes "
|
||||||
f"for the exchange \"{self.name}\" and this exchange "
|
f"for the exchange {self.name} and this exchange "
|
||||||
f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}")
|
f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}")
|
||||||
|
|
||||||
if timeframe and (timeframe not in self.timeframes):
|
if timeframe and (timeframe not in self.timeframes):
|
||||||
@ -781,7 +789,9 @@ class Exchange:
|
|||||||
rate: float, leverage: float, params: Dict = {},
|
rate: float, leverage: float, params: Dict = {},
|
||||||
stop_loss: bool = False) -> Dict[str, Any]:
|
stop_loss: bool = False) -> Dict[str, Any]:
|
||||||
order_id = f'dry_run_{side}_{datetime.now().timestamp()}'
|
order_id = f'dry_run_{side}_{datetime.now().timestamp()}'
|
||||||
_amount = self.amount_to_precision(pair, amount)
|
# Rounding here must respect to contract sizes
|
||||||
|
_amount = self._contracts_to_amount(
|
||||||
|
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)))
|
||||||
dry_order: Dict[str, Any] = {
|
dry_order: Dict[str, Any] = {
|
||||||
'id': order_id,
|
'id': order_id,
|
||||||
'symbol': pair,
|
'symbol': pair,
|
||||||
@ -1710,7 +1720,8 @@ class Exchange:
|
|||||||
def _build_coroutine(self, pair: str, timeframe: str, candle_type: CandleType,
|
def _build_coroutine(self, pair: str, timeframe: str, candle_type: CandleType,
|
||||||
since_ms: Optional[int]) -> Coroutine:
|
since_ms: Optional[int]) -> Coroutine:
|
||||||
|
|
||||||
if not since_ms and self.required_candle_call_count > 1:
|
if (not since_ms
|
||||||
|
and (self._ft_has["ohlcv_require_since"] or self.required_candle_call_count > 1)):
|
||||||
# Multiple calls for one pair - to get more history
|
# Multiple calls for one pair - to get more history
|
||||||
one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit(timeframe)
|
one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit(timeframe)
|
||||||
move_to = one_call * self.required_candle_call_count
|
move_to = one_call * self.required_candle_call_count
|
||||||
@ -1770,6 +1781,7 @@ class Exchange:
|
|||||||
async def gather_stuff():
|
async def gather_stuff():
|
||||||
return await asyncio.gather(*input_coro, return_exceptions=True)
|
return await asyncio.gather(*input_coro, return_exceptions=True)
|
||||||
|
|
||||||
|
with self._loop_lock:
|
||||||
results = self.loop.run_until_complete(gather_stuff())
|
results = self.loop.run_until_complete(gather_stuff())
|
||||||
|
|
||||||
for res in results:
|
for res in results:
|
||||||
@ -1829,17 +1841,18 @@ class Exchange:
|
|||||||
pair, timeframe, since_ms, s
|
pair, timeframe, since_ms, s
|
||||||
)
|
)
|
||||||
params = deepcopy(self._ft_has.get('ohlcv_params', {}))
|
params = deepcopy(self._ft_has.get('ohlcv_params', {}))
|
||||||
|
candle_limit = self.ohlcv_candle_limit(timeframe)
|
||||||
if candle_type != CandleType.SPOT:
|
if candle_type != CandleType.SPOT:
|
||||||
params.update({'price': candle_type})
|
params.update({'price': candle_type})
|
||||||
if candle_type != CandleType.FUNDING_RATE:
|
if candle_type != CandleType.FUNDING_RATE:
|
||||||
data = await self._api_async.fetch_ohlcv(
|
data = await self._api_async.fetch_ohlcv(
|
||||||
pair, timeframe=timeframe, since=since_ms,
|
pair, timeframe=timeframe, since=since_ms,
|
||||||
limit=self.ohlcv_candle_limit(timeframe), params=params)
|
limit=candle_limit, params=params)
|
||||||
else:
|
else:
|
||||||
# Funding rate
|
# Funding rate
|
||||||
data = await self._api_async.fetch_funding_rate_history(
|
data = await self._api_async.fetch_funding_rate_history(
|
||||||
pair, since=since_ms,
|
pair, since=since_ms,
|
||||||
limit=self.ohlcv_candle_limit(timeframe))
|
limit=candle_limit)
|
||||||
# Convert funding rate to candle pattern
|
# Convert funding rate to candle pattern
|
||||||
data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data]
|
data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data]
|
||||||
# Some exchanges sort OHLCV in ASC order and others in DESC.
|
# Some exchanges sort OHLCV in ASC order and others in DESC.
|
||||||
@ -2026,6 +2039,7 @@ class Exchange:
|
|||||||
if not self.exchange_has("fetchTrades"):
|
if not self.exchange_has("fetchTrades"):
|
||||||
raise OperationalException("This exchange does not support downloading Trades.")
|
raise OperationalException("This exchange does not support downloading Trades.")
|
||||||
|
|
||||||
|
with self._loop_lock:
|
||||||
return self.loop.run_until_complete(
|
return self.loop.run_until_complete(
|
||||||
self._async_get_trade_history(pair=pair, since=since,
|
self._async_get_trade_history(pair=pair, since=since,
|
||||||
until=until, from_id=from_id))
|
until=until, from_id=from_id))
|
||||||
|
@ -20,6 +20,7 @@ class Ftx(Exchange):
|
|||||||
_ft_has: Dict = {
|
_ft_has: Dict = {
|
||||||
"stoploss_on_exchange": True,
|
"stoploss_on_exchange": True,
|
||||||
"ohlcv_candle_limit": 1500,
|
"ohlcv_candle_limit": 1500,
|
||||||
|
"ohlcv_require_since": True,
|
||||||
"ohlcv_volume_currency": "quote",
|
"ohlcv_volume_currency": "quote",
|
||||||
"mark_ohlcv_price": "index",
|
"mark_ohlcv_price": "index",
|
||||||
"mark_ohlcv_timeframe": "1h",
|
"mark_ohlcv_timeframe": "1h",
|
||||||
|
@ -122,6 +122,8 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
self._schedule.every().day.at(t).do(update)
|
self._schedule.every().day.at(t).do(update)
|
||||||
self.last_process = datetime(1970, 1, 1, tzinfo=timezone.utc)
|
self.last_process = datetime(1970, 1, 1, tzinfo=timezone.utc)
|
||||||
|
|
||||||
|
self.strategy.bot_start()
|
||||||
|
|
||||||
def notify_status(self, msg: str) -> None:
|
def notify_status(self, msg: str) -> None:
|
||||||
"""
|
"""
|
||||||
Public method for users of this class (worker, etc.) to send notifications
|
Public method for users of this class (worker, etc.) to send notifications
|
||||||
@ -588,7 +590,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
Executes a limit buy for the given pair
|
Executes a limit buy for the given pair
|
||||||
:param pair: pair for which we want to create a LIMIT_BUY
|
:param pair: pair for which we want to create a LIMIT_BUY
|
||||||
:param stake_amount: amount of stake-currency for the pair
|
:param stake_amount: amount of stake-currency for the pair
|
||||||
:param leverage: amount of leverage applied to this trade
|
|
||||||
:return: True if a buy order is created, false if it fails.
|
:return: True if a buy order is created, false if it fails.
|
||||||
"""
|
"""
|
||||||
time_in_force = self.strategy.order_time_in_force['entry']
|
time_in_force = self.strategy.order_time_in_force['entry']
|
||||||
@ -667,16 +668,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
amount = safe_value_fallback(order, 'filled', 'amount')
|
amount = safe_value_fallback(order, 'filled', 'amount')
|
||||||
enter_limit_filled_price = safe_value_fallback(order, 'average', 'price')
|
enter_limit_filled_price = safe_value_fallback(order, 'average', 'price')
|
||||||
|
|
||||||
# TODO: this might be unnecessary, as we're calling it in update_trade_state.
|
|
||||||
isolated_liq = self.exchange.get_liquidation_price(
|
|
||||||
leverage=leverage,
|
|
||||||
pair=pair,
|
|
||||||
amount=amount,
|
|
||||||
open_rate=enter_limit_filled_price,
|
|
||||||
is_short=is_short
|
|
||||||
)
|
|
||||||
interest_rate = self.exchange.get_interest_rate()
|
|
||||||
|
|
||||||
# Fee is applied twice because we make a LIMIT_BUY and LIMIT_SELL
|
# Fee is applied twice because we make a LIMIT_BUY and LIMIT_SELL
|
||||||
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
||||||
base_currency = self.exchange.get_pair_base_currency(pair)
|
base_currency = self.exchange.get_pair_base_currency(pair)
|
||||||
@ -705,8 +696,6 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
timeframe=timeframe_to_minutes(self.config['timeframe']),
|
timeframe=timeframe_to_minutes(self.config['timeframe']),
|
||||||
leverage=leverage,
|
leverage=leverage,
|
||||||
is_short=is_short,
|
is_short=is_short,
|
||||||
interest_rate=interest_rate,
|
|
||||||
liquidation_price=isolated_liq,
|
|
||||||
trading_mode=self.trading_mode,
|
trading_mode=self.trading_mode,
|
||||||
funding_fees=funding_fees
|
funding_fees=funding_fees
|
||||||
)
|
)
|
||||||
@ -1376,7 +1365,8 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
default_retval=proposed_limit_rate)(
|
default_retval=proposed_limit_rate)(
|
||||||
pair=trade.pair, trade=trade,
|
pair=trade.pair, trade=trade,
|
||||||
current_time=datetime.now(timezone.utc),
|
current_time=datetime.now(timezone.utc),
|
||||||
proposed_rate=proposed_limit_rate, current_profit=current_profit)
|
proposed_rate=proposed_limit_rate, current_profit=current_profit,
|
||||||
|
exit_tag=exit_check.exit_reason)
|
||||||
|
|
||||||
limit = self.get_valid_price(custom_exit_price, proposed_limit_rate)
|
limit = self.get_valid_price(custom_exit_price, proposed_limit_rate)
|
||||||
|
|
||||||
|
@ -2,13 +2,11 @@
|
|||||||
Various tool function for Freqtrade and scripts
|
Various tool function for Freqtrade and scripts
|
||||||
"""
|
"""
|
||||||
import gzip
|
import gzip
|
||||||
import hashlib
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from copy import deepcopy
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Iterator, List, Union
|
from typing import Any, Iterator, List
|
||||||
from typing.io import IO
|
from typing.io import IO
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
@ -251,34 +249,3 @@ def parse_db_uri_for_logging(uri: str):
|
|||||||
return uri
|
return uri
|
||||||
pwd = parsed_db_uri.netloc.split(':')[1].split('@')[0]
|
pwd = parsed_db_uri.netloc.split(':')[1].split('@')[0]
|
||||||
return parsed_db_uri.geturl().replace(f':{pwd}@', ':*****@')
|
return parsed_db_uri.geturl().replace(f':{pwd}@', ':*****@')
|
||||||
|
|
||||||
|
|
||||||
def get_strategy_run_id(strategy) -> str:
|
|
||||||
"""
|
|
||||||
Generate unique identification hash for a backtest run. Identical config and strategy file will
|
|
||||||
always return an identical hash.
|
|
||||||
:param strategy: strategy object.
|
|
||||||
:return: hex string id.
|
|
||||||
"""
|
|
||||||
digest = hashlib.sha1()
|
|
||||||
config = deepcopy(strategy.config)
|
|
||||||
|
|
||||||
# Options that have no impact on results of individual backtest.
|
|
||||||
not_important_keys = ('strategy_list', 'original_config', 'telegram', 'api_server')
|
|
||||||
for k in not_important_keys:
|
|
||||||
if k in config:
|
|
||||||
del config[k]
|
|
||||||
|
|
||||||
# Explicitly allow NaN values (e.g. max_open_trades).
|
|
||||||
# as it does not matter for getting the hash.
|
|
||||||
digest.update(rapidjson.dumps(config, default=str,
|
|
||||||
number_mode=rapidjson.NM_NAN).encode('utf-8'))
|
|
||||||
with open(strategy.__file__, 'rb') as fp:
|
|
||||||
digest.update(fp.read())
|
|
||||||
return digest.hexdigest().lower()
|
|
||||||
|
|
||||||
|
|
||||||
def get_backtest_metadata_filename(filename: Union[Path, str]) -> Path:
|
|
||||||
"""Return metadata filename for specified backtest results file."""
|
|
||||||
filename = Path(filename)
|
|
||||||
return filename.parent / Path(f'{filename.stem}.meta{filename.suffix}')
|
|
||||||
|
40
freqtrade/optimize/backtest_caching.py
Normal file
40
freqtrade/optimize/backtest_caching.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
import hashlib
|
||||||
|
from copy import deepcopy
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import rapidjson
|
||||||
|
|
||||||
|
|
||||||
|
def get_strategy_run_id(strategy) -> str:
|
||||||
|
"""
|
||||||
|
Generate unique identification hash for a backtest run. Identical config and strategy file will
|
||||||
|
always return an identical hash.
|
||||||
|
:param strategy: strategy object.
|
||||||
|
:return: hex string id.
|
||||||
|
"""
|
||||||
|
digest = hashlib.sha1()
|
||||||
|
config = deepcopy(strategy.config)
|
||||||
|
|
||||||
|
# Options that have no impact on results of individual backtest.
|
||||||
|
not_important_keys = ('strategy_list', 'original_config', 'telegram', 'api_server')
|
||||||
|
for k in not_important_keys:
|
||||||
|
if k in config:
|
||||||
|
del config[k]
|
||||||
|
|
||||||
|
# Explicitly allow NaN values (e.g. max_open_trades).
|
||||||
|
# as it does not matter for getting the hash.
|
||||||
|
digest.update(rapidjson.dumps(config, default=str,
|
||||||
|
number_mode=rapidjson.NM_NAN).encode('utf-8'))
|
||||||
|
# Include _ft_params_from_file - so changing parameter files cause cache eviction
|
||||||
|
digest.update(rapidjson.dumps(
|
||||||
|
strategy._ft_params_from_file, default=str, number_mode=rapidjson.NM_NAN).encode('utf-8'))
|
||||||
|
with open(strategy.__file__, 'rb') as fp:
|
||||||
|
digest.update(fp.read())
|
||||||
|
return digest.hexdigest().lower()
|
||||||
|
|
||||||
|
|
||||||
|
def get_backtest_metadata_filename(filename: Union[Path, str]) -> Path:
|
||||||
|
"""Return metadata filename for specified backtest results file."""
|
||||||
|
filename = Path(filename)
|
||||||
|
return filename.parent / Path(f'{filename.stem}.meta{filename.suffix}')
|
44
freqtrade/optimize/backtesting.py
Normal file → Executable file
44
freqtrade/optimize/backtesting.py
Normal file → Executable file
@ -9,6 +9,7 @@ from copy import deepcopy
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
from numpy import nan
|
from numpy import nan
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
@ -23,8 +24,8 @@ from freqtrade.enums import (BacktestState, CandleType, ExitCheckTuple, ExitType
|
|||||||
TradingMode)
|
TradingMode)
|
||||||
from freqtrade.exceptions import DependencyException, OperationalException
|
from freqtrade.exceptions import DependencyException, OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
|
||||||
from freqtrade.misc import get_strategy_run_id
|
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
|
from freqtrade.optimize.backtest_caching import get_strategy_run_id
|
||||||
from freqtrade.optimize.bt_progress import BTProgress
|
from freqtrade.optimize.bt_progress import BTProgress
|
||||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results,
|
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results,
|
||||||
store_backtest_signal_candles,
|
store_backtest_signal_candles,
|
||||||
@ -53,6 +54,11 @@ ESHORT_IDX = 8 # Exit short
|
|||||||
ENTER_TAG_IDX = 9
|
ENTER_TAG_IDX = 9
|
||||||
EXIT_TAG_IDX = 10
|
EXIT_TAG_IDX = 10
|
||||||
|
|
||||||
|
# Every change to this headers list must evaluate further usages of the resulting tuple
|
||||||
|
# and eventually change the constants for indexes at the top
|
||||||
|
HEADERS = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long',
|
||||||
|
'enter_short', 'exit_short', 'enter_tag', 'exit_tag']
|
||||||
|
|
||||||
|
|
||||||
class Backtesting:
|
class Backtesting:
|
||||||
"""
|
"""
|
||||||
@ -181,6 +187,7 @@ class Backtesting:
|
|||||||
# since a "perfect" stoploss-exit is assumed anyway
|
# since a "perfect" stoploss-exit is assumed anyway
|
||||||
# And the regular "stoploss" function would not apply to that case
|
# And the regular "stoploss" function would not apply to that case
|
||||||
self.strategy.order_types['stoploss_on_exchange'] = False
|
self.strategy.order_types['stoploss_on_exchange'] = False
|
||||||
|
self.strategy.bot_start()
|
||||||
|
|
||||||
def _load_protections(self, strategy: IStrategy):
|
def _load_protections(self, strategy: IStrategy):
|
||||||
if self.config.get('enable_protections', False):
|
if self.config.get('enable_protections', False):
|
||||||
@ -263,10 +270,18 @@ class Backtesting:
|
|||||||
candle_type=CandleType.from_string(self.exchange._ft_has["mark_ohlcv_price"])
|
candle_type=CandleType.from_string(self.exchange._ft_has["mark_ohlcv_price"])
|
||||||
)
|
)
|
||||||
# Combine data to avoid combining the data per trade.
|
# Combine data to avoid combining the data per trade.
|
||||||
|
unavailable_pairs = []
|
||||||
for pair in self.pairlists.whitelist:
|
for pair in self.pairlists.whitelist:
|
||||||
|
if pair not in self.exchange._leverage_tiers:
|
||||||
|
unavailable_pairs.append(pair)
|
||||||
|
continue
|
||||||
self.futures_data[pair] = funding_rates_dict[pair].merge(
|
self.futures_data[pair] = funding_rates_dict[pair].merge(
|
||||||
mark_rates_dict[pair], on='date', how="inner", suffixes=["_fund", "_mark"])
|
mark_rates_dict[pair], on='date', how="inner", suffixes=["_fund", "_mark"])
|
||||||
|
|
||||||
|
if unavailable_pairs:
|
||||||
|
raise OperationalException(
|
||||||
|
f"Pairs {', '.join(unavailable_pairs)} got no leverage tiers available. "
|
||||||
|
"It is therefore impossible to backtest with this pair at the moment.")
|
||||||
else:
|
else:
|
||||||
self.futures_data = {}
|
self.futures_data = {}
|
||||||
|
|
||||||
@ -304,10 +319,7 @@ class Backtesting:
|
|||||||
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
|
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
|
||||||
optimize memory usage!
|
optimize memory usage!
|
||||||
"""
|
"""
|
||||||
# Every change to this headers list must evaluate further usages of the resulting tuple
|
|
||||||
# and eventually change the constants for indexes at the top
|
|
||||||
headers = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long',
|
|
||||||
'enter_short', 'exit_short', 'enter_tag', 'exit_tag']
|
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
self.progress.init_step(BacktestState.CONVERT, len(processed))
|
self.progress.init_step(BacktestState.CONVERT, len(processed))
|
||||||
|
|
||||||
@ -319,7 +331,7 @@ class Backtesting:
|
|||||||
|
|
||||||
if not pair_data.empty:
|
if not pair_data.empty:
|
||||||
# Cleanup from prior runs
|
# Cleanup from prior runs
|
||||||
pair_data.drop(headers[5:] + ['buy', 'sell'], axis=1, errors='ignore')
|
pair_data.drop(HEADERS[5:] + ['buy', 'sell'], axis=1, errors='ignore')
|
||||||
|
|
||||||
df_analyzed = self.strategy.advise_exit(
|
df_analyzed = self.strategy.advise_exit(
|
||||||
self.strategy.advise_entry(pair_data, {'pair': pair}),
|
self.strategy.advise_entry(pair_data, {'pair': pair}),
|
||||||
@ -338,7 +350,7 @@ class Backtesting:
|
|||||||
|
|
||||||
# To avoid using data from future, we use entry/exit signals shifted
|
# To avoid using data from future, we use entry/exit signals shifted
|
||||||
# from the previous candle
|
# from the previous candle
|
||||||
for col in headers[5:]:
|
for col in HEADERS[5:]:
|
||||||
tag_col = col in ('enter_tag', 'exit_tag')
|
tag_col = col in ('enter_tag', 'exit_tag')
|
||||||
if col in df_analyzed.columns:
|
if col in df_analyzed.columns:
|
||||||
df_analyzed.loc[:, col] = df_analyzed.loc[:, col].replace(
|
df_analyzed.loc[:, col] = df_analyzed.loc[:, col].replace(
|
||||||
@ -350,7 +362,7 @@ class Backtesting:
|
|||||||
|
|
||||||
# Convert from Pandas to list for performance reasons
|
# Convert from Pandas to list for performance reasons
|
||||||
# (Looping Pandas is slow.)
|
# (Looping Pandas is slow.)
|
||||||
data[pair] = df_analyzed[headers].values.tolist() if not df_analyzed.empty else []
|
data[pair] = df_analyzed[HEADERS].values.tolist() if not df_analyzed.empty else []
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def _get_close_rate(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple,
|
def _get_close_rate(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple,
|
||||||
@ -514,10 +526,10 @@ class Backtesting:
|
|||||||
|
|
||||||
exit_candle_time: datetime = row[DATE_IDX].to_pydatetime()
|
exit_candle_time: datetime = row[DATE_IDX].to_pydatetime()
|
||||||
enter = row[SHORT_IDX] if trade.is_short else row[LONG_IDX]
|
enter = row[SHORT_IDX] if trade.is_short else row[LONG_IDX]
|
||||||
exit_ = row[ESHORT_IDX] if trade.is_short else row[ELONG_IDX]
|
exit_sig = row[ESHORT_IDX] if trade.is_short else row[ELONG_IDX]
|
||||||
exit_ = self.strategy.should_exit(
|
exit_ = self.strategy.should_exit(
|
||||||
trade, row[OPEN_IDX], exit_candle_time, # type: ignore
|
trade, row[OPEN_IDX], exit_candle_time, # type: ignore
|
||||||
enter=enter, exit_=exit_,
|
enter=enter, exit_=exit_sig,
|
||||||
low=row[LOW_IDX], high=row[HIGH_IDX]
|
low=row[LOW_IDX], high=row[HIGH_IDX]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -539,7 +551,8 @@ class Backtesting:
|
|||||||
default_retval=closerate)(
|
default_retval=closerate)(
|
||||||
pair=trade.pair, trade=trade,
|
pair=trade.pair, trade=trade,
|
||||||
current_time=exit_candle_time,
|
current_time=exit_candle_time,
|
||||||
proposed_rate=closerate, current_profit=current_profit)
|
proposed_rate=closerate, current_profit=current_profit,
|
||||||
|
exit_tag=exit_.exit_reason)
|
||||||
# We can't place orders lower than current low.
|
# We can't place orders lower than current low.
|
||||||
# freqtrade does not support this in live, and the order would fill immediately
|
# freqtrade does not support this in live, and the order would fill immediately
|
||||||
if trade.is_short:
|
if trade.is_short:
|
||||||
@ -566,6 +579,7 @@ class Backtesting:
|
|||||||
len(row) > EXIT_TAG_IDX
|
len(row) > EXIT_TAG_IDX
|
||||||
and row[EXIT_TAG_IDX] is not None
|
and row[EXIT_TAG_IDX] is not None
|
||||||
and len(row[EXIT_TAG_IDX]) > 0
|
and len(row[EXIT_TAG_IDX]) > 0
|
||||||
|
and exit_.exit_type in (ExitType.EXIT_SIGNAL,)
|
||||||
):
|
):
|
||||||
trade.exit_reason = row[EXIT_TAG_IDX]
|
trade.exit_reason = row[EXIT_TAG_IDX]
|
||||||
|
|
||||||
@ -624,9 +638,7 @@ class Backtesting:
|
|||||||
detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX]
|
detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX]
|
||||||
detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX]
|
detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX]
|
||||||
detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX]
|
detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX]
|
||||||
headers = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long',
|
for det_row in detail_data[HEADERS].values.tolist():
|
||||||
'enter_short', 'exit_short', 'enter_tag', 'exit_tag']
|
|
||||||
for det_row in detail_data[headers].values.tolist():
|
|
||||||
res = self._get_exit_trade_entry_for_candle(trade, det_row)
|
res = self._get_exit_trade_entry_for_candle(trade, det_row)
|
||||||
if res:
|
if res:
|
||||||
return res
|
return res
|
||||||
@ -1028,7 +1040,7 @@ class Backtesting:
|
|||||||
timerange: TimeRange):
|
timerange: TimeRange):
|
||||||
self.progress.init_step(BacktestState.ANALYZE, 0)
|
self.progress.init_step(BacktestState.ANALYZE, 0)
|
||||||
|
|
||||||
logger.info("Running backtesting for Strategy %s", strat.get_strategy_name())
|
logger.info(f"Running backtesting for Strategy {strat.get_strategy_name()}")
|
||||||
backtest_start_time = datetime.now(timezone.utc)
|
backtest_start_time = datetime.now(timezone.utc)
|
||||||
self._set_strategy(strat)
|
self._set_strategy(strat)
|
||||||
|
|
||||||
@ -1095,7 +1107,7 @@ class Backtesting:
|
|||||||
for t, v in pairresults.open_date.items():
|
for t, v in pairresults.open_date.items():
|
||||||
allinds = pairdf.loc[(pairdf['date'] < v)]
|
allinds = pairdf.loc[(pairdf['date'] < v)]
|
||||||
signal_inds = allinds.iloc[[-1]]
|
signal_inds = allinds.iloc[[-1]]
|
||||||
signal_candles_only_df = signal_candles_only_df.append(signal_inds)
|
signal_candles_only_df = pd.concat([signal_candles_only_df, signal_inds])
|
||||||
|
|
||||||
signal_candles_only[pair] = signal_candles_only_df
|
signal_candles_only[pair] = signal_candles_only_df
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ class EdgeCli:
|
|||||||
|
|
||||||
self.edge._timerange = TimeRange.parse_timerange(None if self.config.get(
|
self.edge._timerange = TimeRange.parse_timerange(None if self.config.get(
|
||||||
'timerange') is None else str(self.config.get('timerange')))
|
'timerange') is None else str(self.config.get('timerange')))
|
||||||
|
self.strategy.bot_start()
|
||||||
|
|
||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
result = self.edge.calculate(self.config['exchange']['pair_whitelist'])
|
result = self.edge.calculate(self.config['exchange']['pair_whitelist'])
|
||||||
|
@ -468,6 +468,7 @@ class Hyperopt:
|
|||||||
self.backtesting.exchange._api = None
|
self.backtesting.exchange._api = None
|
||||||
self.backtesting.exchange._api_async = None
|
self.backtesting.exchange._api_async = None
|
||||||
self.backtesting.exchange.loop = None # type: ignore
|
self.backtesting.exchange.loop = None # type: ignore
|
||||||
|
self.backtesting.exchange._loop_lock = None # type: ignore
|
||||||
# self.backtesting.exchange = None # type: ignore
|
# self.backtesting.exchange = None # type: ignore
|
||||||
self.backtesting.pairlists = None # type: ignore
|
self.backtesting.pairlists = None # type: ignore
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ from typing import Any, Dict
|
|||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.data.btanalysis import calculate_max_drawdown
|
from freqtrade.data.metrics import calculate_max_drawdown
|
||||||
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
||||||
|
|
||||||
|
|
@ -8,7 +8,7 @@ from datetime import datetime
|
|||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.data.btanalysis import calculate_max_drawdown
|
from freqtrade.data.metrics import calculate_max_drawdown
|
||||||
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
||||||
|
|
||||||
|
|
@ -9,7 +9,7 @@ individual needs.
|
|||||||
"""
|
"""
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.data.btanalysis import calculate_max_drawdown
|
from freqtrade.data.metrics import calculate_max_drawdown
|
||||||
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
||||||
|
|
||||||
|
|
@ -9,10 +9,10 @@ from pandas import DataFrame, to_datetime
|
|||||||
from tabulate import tabulate
|
from tabulate import tabulate
|
||||||
|
|
||||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN, UNLIMITED_STAKE_AMOUNT
|
from freqtrade.constants import DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN, UNLIMITED_STAKE_AMOUNT
|
||||||
from freqtrade.data.btanalysis import (calculate_csum, calculate_market_change,
|
from freqtrade.data.metrics import (calculate_cagr, calculate_csum, calculate_market_change,
|
||||||
calculate_max_drawdown)
|
calculate_max_drawdown)
|
||||||
from freqtrade.misc import (decimals_per_coin, file_dump_joblib, file_dump_json,
|
from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
|
||||||
get_backtest_metadata_filename, round_coin_value)
|
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -446,6 +446,7 @@ def generate_strategy_stats(pairlist: List[str],
|
|||||||
'profit_total_abs': results['profit_abs'].sum(),
|
'profit_total_abs': results['profit_abs'].sum(),
|
||||||
'profit_total_long_abs': results.loc[~results['is_short'], 'profit_abs'].sum(),
|
'profit_total_long_abs': results.loc[~results['is_short'], 'profit_abs'].sum(),
|
||||||
'profit_total_short_abs': results.loc[results['is_short'], 'profit_abs'].sum(),
|
'profit_total_short_abs': results.loc[results['is_short'], 'profit_abs'].sum(),
|
||||||
|
'cagr': calculate_cagr(backtest_days, start_balance, content['final_balance']),
|
||||||
'backtest_start': min_date.strftime(DATETIME_PRINT_FORMAT),
|
'backtest_start': min_date.strftime(DATETIME_PRINT_FORMAT),
|
||||||
'backtest_start_ts': int(min_date.timestamp() * 1000),
|
'backtest_start_ts': int(min_date.timestamp() * 1000),
|
||||||
'backtest_end': max_date.strftime(DATETIME_PRINT_FORMAT),
|
'backtest_end': max_date.strftime(DATETIME_PRINT_FORMAT),
|
||||||
@ -746,6 +747,7 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
|||||||
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
||||||
strat_results['stake_currency'])),
|
strat_results['stake_currency'])),
|
||||||
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
||||||
|
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
|
||||||
('Trades per day', strat_results['trades_per_day']),
|
('Trades per day', strat_results['trades_per_day']),
|
||||||
('Avg. daily profit %',
|
('Avg. daily profit %',
|
||||||
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
||||||
|
@ -429,12 +429,10 @@ class LocalTrade():
|
|||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
open_since = self.open_date.strftime(DATETIME_PRINT_FORMAT) if self.is_open else 'closed'
|
open_since = self.open_date.strftime(DATETIME_PRINT_FORMAT) if self.is_open else 'closed'
|
||||||
leverage = self.leverage or 1.0
|
|
||||||
is_short = self.is_short or False
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
f'Trade(id={self.id}, pair={self.pair}, amount={self.amount:.8f}, '
|
f'Trade(id={self.id}, pair={self.pair}, amount={self.amount:.8f}, '
|
||||||
f'is_short={is_short}, leverage={leverage}, '
|
f'is_short={self.is_short or False}, leverage={self.leverage or 1.0}, '
|
||||||
f'open_rate={self.open_rate:.8f}, open_since={open_since})'
|
f'open_rate={self.open_rate:.8f}, open_since={open_since})'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -5,12 +5,13 @@ from typing import Any, Dict, List, Optional
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data.btanalysis import (analyze_trade_parallelism, calculate_max_drawdown,
|
from freqtrade.data.btanalysis import (analyze_trade_parallelism, extract_trades_of_period,
|
||||||
calculate_underwater, combine_dataframes_with_mean,
|
load_trades)
|
||||||
create_cum_profit, extract_trades_of_period, load_trades)
|
|
||||||
from freqtrade.data.converter import trim_dataframe
|
from freqtrade.data.converter import trim_dataframe
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.data.history import get_timerange, load_data
|
from freqtrade.data.history import get_timerange, load_data
|
||||||
|
from freqtrade.data.metrics import (calculate_max_drawdown, calculate_underwater,
|
||||||
|
combine_dataframes_with_mean, create_cum_profit)
|
||||||
from freqtrade.enums import CandleType
|
from freqtrade.enums import CandleType
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_prev_date, timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_prev_date, timeframe_to_seconds
|
||||||
@ -610,6 +611,7 @@ def load_and_plot_trades(config: Dict[str, Any]):
|
|||||||
|
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)
|
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)
|
||||||
IStrategy.dp = DataProvider(config, exchange)
|
IStrategy.dp = DataProvider(config, exchange)
|
||||||
|
strategy.bot_start()
|
||||||
plot_elements = init_plotscript(config, list(exchange.markets), strategy.startup_candle_count)
|
plot_elements = init_plotscript(config, list(exchange.markets), strategy.startup_candle_count)
|
||||||
timerange = plot_elements['timerange']
|
timerange = plot_elements['timerange']
|
||||||
trades = plot_elements['trades']
|
trades = plot_elements['trades']
|
||||||
|
@ -6,7 +6,7 @@ from typing import Any, Dict, Optional
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.constants import LongShort
|
from freqtrade.constants import LongShort
|
||||||
from freqtrade.data.btanalysis import calculate_max_drawdown
|
from freqtrade.data.metrics import calculate_max_drawdown
|
||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
from freqtrade.plugins.protections import IProtection, ProtectionReturn
|
from freqtrade.plugins.protections import IProtection, ProtectionReturn
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ class HyperOptLossResolver(IResolver):
|
|||||||
object_type = IHyperOptLoss
|
object_type = IHyperOptLoss
|
||||||
object_type_str = "HyperoptLoss"
|
object_type_str = "HyperoptLoss"
|
||||||
user_subdir = USERPATH_HYPEROPTS
|
user_subdir = USERPATH_HYPEROPTS
|
||||||
initial_search_path = Path(__file__).parent.parent.joinpath('optimize').resolve()
|
initial_search_path = Path(__file__).parent.parent.joinpath('optimize/hyperopt_loss').resolve()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_hyperoptloss(config: Dict) -> IHyperOptLoss:
|
def load_hyperoptloss(config: Dict) -> IHyperOptLoss:
|
||||||
|
@ -217,15 +217,19 @@ class StrategyResolver(IResolver):
|
|||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
"`populate_exit_trend` or `populate_sell_trend` must be implemented.")
|
"`populate_exit_trend` or `populate_sell_trend` must be implemented.")
|
||||||
|
|
||||||
strategy._populate_fun_len = len(getfullargspec(strategy.populate_indicators).args)
|
_populate_fun_len = len(getfullargspec(strategy.populate_indicators).args)
|
||||||
strategy._buy_fun_len = len(getfullargspec(strategy.populate_buy_trend).args)
|
_buy_fun_len = len(getfullargspec(strategy.populate_buy_trend).args)
|
||||||
strategy._sell_fun_len = len(getfullargspec(strategy.populate_sell_trend).args)
|
_sell_fun_len = len(getfullargspec(strategy.populate_sell_trend).args)
|
||||||
if any(x == 2 for x in [
|
if any(x == 2 for x in [
|
||||||
strategy._populate_fun_len,
|
_populate_fun_len,
|
||||||
strategy._buy_fun_len,
|
_buy_fun_len,
|
||||||
strategy._sell_fun_len
|
_sell_fun_len
|
||||||
]):
|
]):
|
||||||
strategy.INTERFACE_VERSION = 1
|
raise OperationalException(
|
||||||
|
"Strategy Interface v1 is no longer supported. "
|
||||||
|
"Please update your strategy to implement "
|
||||||
|
"`populate_indicators`, `populate_entry_trend` and `populate_exit_trend` "
|
||||||
|
"with the metadata argument. ")
|
||||||
return strategy
|
return strategy
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -84,6 +84,7 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac
|
|||||||
lastconfig['enable_protections'] = btconfig.get('enable_protections')
|
lastconfig['enable_protections'] = btconfig.get('enable_protections')
|
||||||
lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet')
|
lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet')
|
||||||
|
|
||||||
|
ApiServer._bt.strategylist = [strat]
|
||||||
ApiServer._bt.results = {}
|
ApiServer._bt.results = {}
|
||||||
ApiServer._bt.load_prior_backtest()
|
ApiServer._bt.load_prior_backtest()
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ import logging
|
|||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
import rapidjson
|
import orjson
|
||||||
import uvicorn
|
import uvicorn
|
||||||
from fastapi import Depends, FastAPI
|
from fastapi import Depends, FastAPI
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
@ -24,7 +24,7 @@ class FTJSONResponse(JSONResponse):
|
|||||||
Use rapidjson for responses
|
Use rapidjson for responses
|
||||||
Handles NaN and Inf / -Inf in a javascript way by default.
|
Handles NaN and Inf / -Inf in a javascript way by default.
|
||||||
"""
|
"""
|
||||||
return rapidjson.dumps(content).encode("utf-8")
|
return orjson.dumps(content, option=orjson.OPT_SERIALIZE_NUMPY)
|
||||||
|
|
||||||
|
|
||||||
class ApiServer(RPCHandler):
|
class ApiServer(RPCHandler):
|
||||||
|
@ -943,7 +943,7 @@ class Telegram(RPCHandler):
|
|||||||
else:
|
else:
|
||||||
fiat_currency = self._config.get('fiat_display_currency', '')
|
fiat_currency = self._config.get('fiat_display_currency', '')
|
||||||
try:
|
try:
|
||||||
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
|
statlist, _, _ = self._rpc._rpc_status_table(
|
||||||
self._config['stake_currency'], fiat_currency)
|
self._config['stake_currency'], fiat_currency)
|
||||||
except RPCException:
|
except RPCException:
|
||||||
self._send_msg(msg='No open trade found.')
|
self._send_msg(msg='No open trade found.')
|
||||||
|
@ -23,7 +23,7 @@ class InformativeData:
|
|||||||
def informative(timeframe: str, asset: str = '',
|
def informative(timeframe: str, asset: str = '',
|
||||||
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
|
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
|
||||||
*,
|
*,
|
||||||
candle_type: Optional[CandleType] = None,
|
candle_type: Optional[Union[CandleType, str]] = None,
|
||||||
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
|
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
|
||||||
"""
|
"""
|
||||||
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
|
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
|
||||||
|
@ -3,7 +3,6 @@ IStrategy interface
|
|||||||
This module defines the interface to apply for strategies
|
This module defines the interface to apply for strategies
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import warnings
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Dict, List, Optional, Tuple, Union
|
from typing import Dict, List, Optional, Tuple, Union
|
||||||
@ -44,14 +43,11 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
"""
|
"""
|
||||||
# Strategy interface version
|
# Strategy interface version
|
||||||
# Default to version 2
|
# Default to version 2
|
||||||
# Version 1 is the initial interface without metadata dict
|
# Version 1 is the initial interface without metadata dict - deprecated and no longer supported.
|
||||||
# Version 2 populate_* include metadata dict
|
# Version 2 populate_* include metadata dict
|
||||||
# Version 3 - First version with short and leverage support
|
# Version 3 - First version with short and leverage support
|
||||||
INTERFACE_VERSION: int = 3
|
INTERFACE_VERSION: int = 3
|
||||||
|
|
||||||
_populate_fun_len: int = 0
|
|
||||||
_buy_fun_len: int = 0
|
|
||||||
_sell_fun_len: int = 0
|
|
||||||
_ft_params_from_file: Dict
|
_ft_params_from_file: Dict
|
||||||
# associated minimal roi
|
# associated minimal roi
|
||||||
minimal_roi: Dict = {}
|
minimal_roi: Dict = {}
|
||||||
@ -114,7 +110,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
# Class level variables (intentional) containing
|
# Class level variables (intentional) containing
|
||||||
# the dataprovider (dp) (access to other candles, historic data, ...)
|
# the dataprovider (dp) (access to other candles, historic data, ...)
|
||||||
# and wallets - access to the current balance.
|
# and wallets - access to the current balance.
|
||||||
dp: Optional[DataProvider]
|
dp: DataProvider
|
||||||
wallets: Optional[Wallets] = None
|
wallets: Optional[Wallets] = None
|
||||||
# Filled from configuration
|
# Filled from configuration
|
||||||
stake_currency: str
|
stake_currency: str
|
||||||
@ -197,6 +193,13 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
"""
|
"""
|
||||||
return self.populate_sell_trend(dataframe, metadata)
|
return self.populate_sell_trend(dataframe, metadata)
|
||||||
|
|
||||||
|
def bot_start(self, **kwargs) -> None:
|
||||||
|
"""
|
||||||
|
Called only once after bot instantiation.
|
||||||
|
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
def bot_loop_start(self, **kwargs) -> None:
|
def bot_loop_start(self, **kwargs) -> None:
|
||||||
"""
|
"""
|
||||||
Called at the start of the bot iteration (one loop).
|
Called at the start of the bot iteration (one loop).
|
||||||
@ -359,7 +362,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
|
|
||||||
def custom_exit_price(self, pair: str, trade: Trade,
|
def custom_exit_price(self, pair: str, trade: Trade,
|
||||||
current_time: datetime, proposed_rate: float,
|
current_time: datetime, proposed_rate: float,
|
||||||
current_profit: float, **kwargs) -> float:
|
current_profit: float, exit_tag: Optional[str], **kwargs) -> float:
|
||||||
"""
|
"""
|
||||||
Custom exit price logic, returning the new exit price.
|
Custom exit price logic, returning the new exit price.
|
||||||
|
|
||||||
@ -372,6 +375,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
:param current_time: datetime object, containing the current datetime
|
:param current_time: datetime object, containing the current datetime
|
||||||
:param proposed_rate: Rate, calculated based on pricing settings in exit_pricing.
|
:param proposed_rate: Rate, calculated based on pricing settings in exit_pricing.
|
||||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||||
|
:param exit_tag: Exit reason.
|
||||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||||
:return float: New exit price value if provided
|
:return float: New exit price value if provided
|
||||||
"""
|
"""
|
||||||
@ -1092,11 +1096,6 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
dataframe = _create_and_merge_informative_pair(
|
dataframe = _create_and_merge_informative_pair(
|
||||||
self, dataframe, metadata, inf_data, populate_fn)
|
self, dataframe, metadata, inf_data, populate_fn)
|
||||||
|
|
||||||
if self._populate_fun_len == 2:
|
|
||||||
warnings.warn("deprecated - check out the Sample strategy to see "
|
|
||||||
"the current function headers!", DeprecationWarning)
|
|
||||||
return self.populate_indicators(dataframe) # type: ignore
|
|
||||||
else:
|
|
||||||
return self.populate_indicators(dataframe, metadata)
|
return self.populate_indicators(dataframe, metadata)
|
||||||
|
|
||||||
def advise_entry(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
def advise_entry(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
@ -1111,11 +1110,6 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
|
|
||||||
logger.debug(f"Populating enter signals for pair {metadata.get('pair')}.")
|
logger.debug(f"Populating enter signals for pair {metadata.get('pair')}.")
|
||||||
|
|
||||||
if self._buy_fun_len == 2:
|
|
||||||
warnings.warn("deprecated - check out the Sample strategy to see "
|
|
||||||
"the current function headers!", DeprecationWarning)
|
|
||||||
df = self.populate_buy_trend(dataframe) # type: ignore
|
|
||||||
else:
|
|
||||||
df = self.populate_entry_trend(dataframe, metadata)
|
df = self.populate_entry_trend(dataframe, metadata)
|
||||||
if 'enter_long' not in df.columns:
|
if 'enter_long' not in df.columns:
|
||||||
df = df.rename({'buy': 'enter_long', 'buy_tag': 'enter_tag'}, axis='columns')
|
df = df.rename({'buy': 'enter_long', 'buy_tag': 'enter_tag'}, axis='columns')
|
||||||
@ -1131,13 +1125,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
currently traded pair
|
currently traded pair
|
||||||
:return: DataFrame with exit column
|
:return: DataFrame with exit column
|
||||||
"""
|
"""
|
||||||
|
|
||||||
logger.debug(f"Populating exit signals for pair {metadata.get('pair')}.")
|
logger.debug(f"Populating exit signals for pair {metadata.get('pair')}.")
|
||||||
if self._sell_fun_len == 2:
|
|
||||||
warnings.warn("deprecated - check out the Sample strategy to see "
|
|
||||||
"the current function headers!", DeprecationWarning)
|
|
||||||
df = self.populate_sell_trend(dataframe) # type: ignore
|
|
||||||
else:
|
|
||||||
df = self.populate_exit_trend(dataframe, metadata)
|
df = self.populate_exit_trend(dataframe, metadata)
|
||||||
if 'exit_long' not in df.columns:
|
if 'exit_long' not in df.columns:
|
||||||
df = df.rename({'sell': 'exit_long'}, axis='columns')
|
df = df.rename({'sell': 'exit_long'}, axis='columns')
|
||||||
|
@ -32,7 +32,7 @@ def custom_entry_price(self, pair: str, current_time: 'datetime', proposed_rate:
|
|||||||
|
|
||||||
def custom_exit_price(self, pair: str, trade: 'Trade',
|
def custom_exit_price(self, pair: str, trade: 'Trade',
|
||||||
current_time: 'datetime', proposed_rate: float,
|
current_time: 'datetime', proposed_rate: float,
|
||||||
current_profit: float, **kwargs) -> float:
|
current_profit: float, exit_tag: Optional[str], **kwargs) -> float:
|
||||||
"""
|
"""
|
||||||
Custom exit price logic, returning the new exit price.
|
Custom exit price logic, returning the new exit price.
|
||||||
|
|
||||||
@ -45,6 +45,7 @@ def custom_exit_price(self, pair: str, trade: 'Trade',
|
|||||||
:param current_time: datetime object, containing the current datetime
|
:param current_time: datetime object, containing the current datetime
|
||||||
:param proposed_rate: Rate, calculated based on pricing settings in exit_pricing.
|
:param proposed_rate: Rate, calculated based on pricing settings in exit_pricing.
|
||||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||||
|
:param exit_tag: Exit reason.
|
||||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||||
:return float: New exit price value if provided
|
:return float: New exit price value if provided
|
||||||
"""
|
"""
|
||||||
|
@ -23,7 +23,7 @@ exclude = '''
|
|||||||
line_length = 100
|
line_length = 100
|
||||||
multi_line_output=0
|
multi_line_output=0
|
||||||
lines_after_imports=2
|
lines_after_imports=2
|
||||||
skip_glob = ["**/.env*", "**/env/*", "**/.venv/*", "**/docs/*"]
|
skip_glob = ["**/.env*", "**/env/*", "**/.venv/*", "**/docs/*", "**/user_data/*"]
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
[tool.pytest.ini_options]
|
||||||
asyncio_mode = "auto"
|
asyncio_mode = "auto"
|
||||||
|
@ -9,7 +9,7 @@ flake8==4.0.1
|
|||||||
flake8-tidy-imports==4.6.0
|
flake8-tidy-imports==4.6.0
|
||||||
mypy==0.942
|
mypy==0.942
|
||||||
pre-commit==2.18.1
|
pre-commit==2.18.1
|
||||||
pytest==7.1.1
|
pytest==7.1.2
|
||||||
pytest-asyncio==0.18.3
|
pytest-asyncio==0.18.3
|
||||||
pytest-cov==3.0.0
|
pytest-cov==3.0.0
|
||||||
pytest-mock==3.7.0
|
pytest-mock==3.7.0
|
||||||
@ -24,8 +24,6 @@ nbconvert==6.5.0
|
|||||||
# mypy types
|
# mypy types
|
||||||
types-cachetools==5.0.1
|
types-cachetools==5.0.1
|
||||||
types-filelock==3.2.5
|
types-filelock==3.2.5
|
||||||
types-requests==2.27.19
|
types-requests==2.27.20
|
||||||
types-tabulate==0.8.7
|
types-tabulate==0.8.7
|
||||||
|
types-python-dateutil==2.8.12
|
||||||
# Extensions to datetime library
|
|
||||||
types-python-dateutil==2.8.11
|
|
||||||
|
@ -2,7 +2,7 @@ numpy==1.22.3
|
|||||||
pandas==1.4.2
|
pandas==1.4.2
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==1.79.81
|
ccxt==1.80.61
|
||||||
# Pin cryptography for now due to rust build errors with piwheels
|
# Pin cryptography for now due to rust build errors with piwheels
|
||||||
cryptography==36.0.2
|
cryptography==36.0.2
|
||||||
aiohttp==3.8.1
|
aiohttp==3.8.1
|
||||||
@ -27,6 +27,8 @@ py_find_1st==1.1.5
|
|||||||
|
|
||||||
# Load ticker files 30% faster
|
# Load ticker files 30% faster
|
||||||
python-rapidjson==1.6
|
python-rapidjson==1.6
|
||||||
|
# Properly format api responses
|
||||||
|
orjson==3.6.8
|
||||||
|
|
||||||
# Notify systemd
|
# Notify systemd
|
||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
|
@ -53,6 +53,10 @@ exclude =
|
|||||||
[mypy]
|
[mypy]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
warn_unused_ignores = True
|
warn_unused_ignores = True
|
||||||
|
exclude = (?x)(
|
||||||
|
^build_helpers\.py$
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
[mypy-tests.*]
|
[mypy-tests.*]
|
||||||
ignore_errors = True
|
ignore_errors = True
|
||||||
|
1
setup.py
1
setup.py
@ -57,6 +57,7 @@ setup(
|
|||||||
'pycoingecko',
|
'pycoingecko',
|
||||||
'py_find_1st',
|
'py_find_1st',
|
||||||
'python-rapidjson',
|
'python-rapidjson',
|
||||||
|
'orjson',
|
||||||
'sdnotify',
|
'sdnotify',
|
||||||
'colorama',
|
'colorama',
|
||||||
'jinja2',
|
'jinja2',
|
||||||
|
@ -859,8 +859,8 @@ def test_start_list_strategies(capsys):
|
|||||||
# pargs['config'] = None
|
# pargs['config'] = None
|
||||||
start_list_strategies(pargs)
|
start_list_strategies(pargs)
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert "TestStrategyLegacyV1" in captured.out
|
assert "StrategyTestV2" in captured.out
|
||||||
assert "legacy_strategy_v1.py" not in captured.out
|
assert "strategy_test_v2.py" not in captured.out
|
||||||
assert CURRENT_TEST_STRATEGY in captured.out
|
assert CURRENT_TEST_STRATEGY in captured.out
|
||||||
|
|
||||||
# Test regular output
|
# Test regular output
|
||||||
@ -874,8 +874,8 @@ def test_start_list_strategies(capsys):
|
|||||||
# pargs['config'] = None
|
# pargs['config'] = None
|
||||||
start_list_strategies(pargs)
|
start_list_strategies(pargs)
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert "TestStrategyLegacyV1" in captured.out
|
assert "StrategyTestV2" in captured.out
|
||||||
assert "legacy_strategy_v1.py" in captured.out
|
assert "strategy_test_v2.py" in captured.out
|
||||||
assert CURRENT_TEST_STRATEGY in captured.out
|
assert CURRENT_TEST_STRATEGY in captured.out
|
||||||
|
|
||||||
# Test color output
|
# Test color output
|
||||||
@ -888,8 +888,8 @@ def test_start_list_strategies(capsys):
|
|||||||
# pargs['config'] = None
|
# pargs['config'] = None
|
||||||
start_list_strategies(pargs)
|
start_list_strategies(pargs)
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert "TestStrategyLegacyV1" in captured.out
|
assert "StrategyTestV2" in captured.out
|
||||||
assert "legacy_strategy_v1.py" in captured.out
|
assert "strategy_test_v2.py" in captured.out
|
||||||
assert CURRENT_TEST_STRATEGY in captured.out
|
assert CURRENT_TEST_STRATEGY in captured.out
|
||||||
assert "LOAD FAILED" in captured.out
|
assert "LOAD FAILED" in captured.out
|
||||||
# Recursive
|
# Recursive
|
||||||
@ -907,8 +907,8 @@ def test_start_list_strategies(capsys):
|
|||||||
# pargs['config'] = None
|
# pargs['config'] = None
|
||||||
start_list_strategies(pargs)
|
start_list_strategies(pargs)
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert "TestStrategyLegacyV1" in captured.out
|
assert "StrategyTestV2" in captured.out
|
||||||
assert "legacy_strategy_v1.py" in captured.out
|
assert "strategy_test_v2.py" in captured.out
|
||||||
assert "StrategyTestV2" in captured.out
|
assert "StrategyTestV2" in captured.out
|
||||||
assert "TestStrategyNoImplements" in captured.out
|
assert "TestStrategyNoImplements" in captured.out
|
||||||
assert str(Path("broken_strats/broken_futures_strategies.py")) in captured.out
|
assert str(Path("broken_strats/broken_futures_strategies.py")) in captured.out
|
||||||
|
@ -8,14 +8,14 @@ from pandas import DataFrame, DateOffset, Timestamp, to_datetime
|
|||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||||
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism, calculate_csum,
|
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,
|
||||||
calculate_market_change, calculate_max_drawdown,
|
extract_trades_of_period, get_latest_backtest_filename,
|
||||||
calculate_underwater, combine_dataframes_with_mean,
|
get_latest_hyperopt_file, load_backtest_data,
|
||||||
create_cum_profit, extract_trades_of_period,
|
load_backtest_metadata, load_trades, load_trades_from_db)
|
||||||
get_latest_backtest_filename, get_latest_hyperopt_file,
|
|
||||||
load_backtest_data, load_backtest_metadata, load_trades,
|
|
||||||
load_trades_from_db)
|
|
||||||
from freqtrade.data.history import load_data, load_pair_history
|
from freqtrade.data.history import load_data, load_pair_history
|
||||||
|
from freqtrade.data.metrics import (calculate_cagr, calculate_csum, calculate_market_change,
|
||||||
|
calculate_max_drawdown, calculate_underwater,
|
||||||
|
combine_dataframes_with_mean, create_cum_profit)
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
|
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
|
||||||
from tests.conftest_trades import MOCK_TRADE_COUNT
|
from tests.conftest_trades import MOCK_TRADE_COUNT
|
||||||
@ -336,6 +336,19 @@ def test_calculate_csum(testdatadir):
|
|||||||
csum_min, csum_max = calculate_csum(DataFrame())
|
csum_min, csum_max = calculate_csum(DataFrame())
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('start,end,days, expected', [
|
||||||
|
(64900, 176000, 3 * 365, 0.3945),
|
||||||
|
(64900, 176000, 365, 1.7119),
|
||||||
|
(1000, 1000, 365, 0.0),
|
||||||
|
(1000, 1500, 365, 0.5),
|
||||||
|
(1000, 1500, 100, 3.3927), # sub year
|
||||||
|
(0.01000000, 0.01762792, 120, 4.6087), # sub year BTC values
|
||||||
|
])
|
||||||
|
def test_calculate_cagr(start, end, days, expected):
|
||||||
|
|
||||||
|
assert round(calculate_cagr(days, start, end), 4) == expected
|
||||||
|
|
||||||
|
|
||||||
def test_calculate_max_drawdown2():
|
def test_calculate_max_drawdown2():
|
||||||
values = [0.011580, 0.010048, 0.011340, 0.012161, 0.010416, 0.010009, 0.020024,
|
values = [0.011580, 0.010048, 0.011340, 0.012161, 0.010416, 0.010009, 0.020024,
|
||||||
-0.024662, -0.022350, 0.020496, -0.029859, -0.030511, 0.010041, 0.010872,
|
-0.024662, -0.022350, 0.020496, -0.029859, -0.030511, 0.010041, 0.010872,
|
||||||
|
@ -909,7 +909,7 @@ def test_validate_timeframes_emulated_ohlcv_1(default_conf, mocker):
|
|||||||
mocker.patch('freqtrade.exchange.Exchange.validate_stakecurrency')
|
mocker.patch('freqtrade.exchange.Exchange.validate_stakecurrency')
|
||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match=r'The ccxt library does not provide the list of timeframes '
|
match=r'The ccxt library does not provide the list of timeframes '
|
||||||
r'for the exchange ".*" and this exchange '
|
r'for the exchange .* and this exchange '
|
||||||
r'is therefore not supported. *'):
|
r'is therefore not supported. *'):
|
||||||
Exchange(default_conf)
|
Exchange(default_conf)
|
||||||
|
|
||||||
@ -930,7 +930,7 @@ def test_validate_timeframes_emulated_ohlcvi_2(default_conf, mocker):
|
|||||||
mocker.patch('freqtrade.exchange.Exchange.validate_stakecurrency')
|
mocker.patch('freqtrade.exchange.Exchange.validate_stakecurrency')
|
||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match=r'The ccxt library does not provide the list of timeframes '
|
match=r'The ccxt library does not provide the list of timeframes '
|
||||||
r'for the exchange ".*" and this exchange '
|
r'for the exchange .* and this exchange '
|
||||||
r'is therefore not supported. *'):
|
r'is therefore not supported. *'):
|
||||||
Exchange(default_conf)
|
Exchange(default_conf)
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ from freqtrade.data.history import get_timerange
|
|||||||
from freqtrade.enums import ExitType, RunMode
|
from freqtrade.enums import ExitType, RunMode
|
||||||
from freqtrade.exceptions import DependencyException, OperationalException
|
from freqtrade.exceptions import DependencyException, OperationalException
|
||||||
from freqtrade.exchange.exchange import timeframe_to_next_date
|
from freqtrade.exchange.exchange import timeframe_to_next_date
|
||||||
from freqtrade.misc import get_strategy_run_id
|
from freqtrade.optimize.backtest_caching import get_strategy_run_id
|
||||||
from freqtrade.optimize.backtesting import Backtesting
|
from freqtrade.optimize.backtesting import Backtesting
|
||||||
from freqtrade.persistence import LocalTrade
|
from freqtrade.persistence import LocalTrade
|
||||||
from freqtrade.resolvers import StrategyResolver
|
from freqtrade.resolvers import StrategyResolver
|
||||||
@ -312,6 +312,7 @@ def test_backtesting_init(mocker, default_conf, order_types) -> None:
|
|||||||
get_fee.assert_called()
|
get_fee.assert_called()
|
||||||
assert backtesting.fee == 0.5
|
assert backtesting.fee == 0.5
|
||||||
assert not backtesting.strategy.order_types["stoploss_on_exchange"]
|
assert not backtesting.strategy.order_types["stoploss_on_exchange"]
|
||||||
|
assert backtesting.strategy.bot_started is True
|
||||||
|
|
||||||
|
|
||||||
def test_backtesting_init_no_timeframe(mocker, default_conf, caplog) -> None:
|
def test_backtesting_init_no_timeframe(mocker, default_conf, caplog) -> None:
|
||||||
@ -500,7 +501,7 @@ def test_backtesting_pairlist_list(default_conf, mocker, caplog, testdatadir, ti
|
|||||||
Backtesting(default_conf)
|
Backtesting(default_conf)
|
||||||
|
|
||||||
# Multiple strategies
|
# Multiple strategies
|
||||||
default_conf['strategy_list'] = [CURRENT_TEST_STRATEGY, 'TestStrategyLegacyV1']
|
default_conf['strategy_list'] = [CURRENT_TEST_STRATEGY, 'StrategyTestV2']
|
||||||
with pytest.raises(OperationalException,
|
with pytest.raises(OperationalException,
|
||||||
match='PrecisionFilter not allowed for backtesting multiple strategies.'):
|
match='PrecisionFilter not allowed for backtesting multiple strategies.'):
|
||||||
Backtesting(default_conf)
|
Backtesting(default_conf)
|
||||||
@ -1198,7 +1199,7 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
|
|||||||
'--disable-max-market-positions',
|
'--disable-max-market-positions',
|
||||||
'--strategy-list',
|
'--strategy-list',
|
||||||
CURRENT_TEST_STRATEGY,
|
CURRENT_TEST_STRATEGY,
|
||||||
'TestStrategyLegacyV1',
|
'StrategyTestV2',
|
||||||
]
|
]
|
||||||
args = get_args(args)
|
args = get_args(args)
|
||||||
start_backtesting(args)
|
start_backtesting(args)
|
||||||
@ -1221,14 +1222,13 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
|
|||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:58:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...',
|
'Parameter --enable-position-stacking detected ...',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
'Running backtesting for Strategy TestStrategyLegacyV1',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
]
|
]
|
||||||
|
|
||||||
for line in exists:
|
for line in exists:
|
||||||
assert log_has(line, caplog)
|
assert log_has(line, caplog)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
|
||||||
def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdatadir, capsys):
|
def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdatadir, capsys):
|
||||||
default_conf.update({
|
default_conf.update({
|
||||||
"use_exit_signal": True,
|
"use_exit_signal": True,
|
||||||
@ -1310,7 +1310,7 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
|||||||
'--breakdown', 'day',
|
'--breakdown', 'day',
|
||||||
'--strategy-list',
|
'--strategy-list',
|
||||||
CURRENT_TEST_STRATEGY,
|
CURRENT_TEST_STRATEGY,
|
||||||
'TestStrategyLegacyV1',
|
'StrategyTestV2',
|
||||||
]
|
]
|
||||||
args = get_args(args)
|
args = get_args(args)
|
||||||
start_backtesting(args)
|
start_backtesting(args)
|
||||||
@ -1327,7 +1327,7 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
|||||||
'up to 2017-11-14 22:58:00 (0 days).',
|
'up to 2017-11-14 22:58:00 (0 days).',
|
||||||
'Parameter --enable-position-stacking detected ...',
|
'Parameter --enable-position-stacking detected ...',
|
||||||
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}',
|
||||||
'Running backtesting for Strategy TestStrategyLegacyV1',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
]
|
]
|
||||||
|
|
||||||
for line in exists:
|
for line in exists:
|
||||||
@ -1342,6 +1342,39 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
|||||||
assert 'STRATEGY SUMMARY' in captured.out
|
assert 'STRATEGY SUMMARY' in captured.out
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.filterwarnings("ignore:deprecated")
|
||||||
|
def test_backtest_start_futures_noliq(default_conf_usdt, mocker,
|
||||||
|
caplog, testdatadir, capsys):
|
||||||
|
# Tests detail-data loading
|
||||||
|
default_conf_usdt.update({
|
||||||
|
"trading_mode": "futures",
|
||||||
|
"margin_mode": "isolated",
|
||||||
|
"use_exit_signal": True,
|
||||||
|
"exit_profit_only": False,
|
||||||
|
"exit_profit_offset": 0.0,
|
||||||
|
"ignore_roi_if_entry_signal": False,
|
||||||
|
"strategy": CURRENT_TEST_STRATEGY,
|
||||||
|
})
|
||||||
|
patch_exchange(mocker)
|
||||||
|
|
||||||
|
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||||
|
PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT']))
|
||||||
|
# mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
|
||||||
|
|
||||||
|
patched_configuration_load_config_file(mocker, default_conf_usdt)
|
||||||
|
|
||||||
|
args = [
|
||||||
|
'backtesting',
|
||||||
|
'--config', 'config.json',
|
||||||
|
'--datadir', str(testdatadir),
|
||||||
|
'--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'),
|
||||||
|
'--timeframe', '1h',
|
||||||
|
]
|
||||||
|
args = get_args(args)
|
||||||
|
with pytest.raises(OperationalException, match=r"Pairs .* got no leverage tiers available\."):
|
||||||
|
start_backtesting(args)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
@pytest.mark.filterwarnings("ignore:deprecated")
|
||||||
def test_backtest_start_nomock_futures(default_conf_usdt, mocker,
|
def test_backtest_start_nomock_futures(default_conf_usdt, mocker,
|
||||||
caplog, testdatadir, capsys):
|
caplog, testdatadir, capsys):
|
||||||
@ -1592,7 +1625,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
min_backtest_date = now - timedelta(weeks=4)
|
min_backtest_date = now - timedelta(weeks=4)
|
||||||
load_backtest_metadata = MagicMock(return_value={
|
load_backtest_metadata = MagicMock(return_value={
|
||||||
'StrategyTestV2': {'run_id': '1', 'backtest_start_time': now.timestamp()},
|
'StrategyTestV2': {'run_id': '1', 'backtest_start_time': now.timestamp()},
|
||||||
'TestStrategyLegacyV1': {'run_id': run_id, 'backtest_start_time': start_time.timestamp()}
|
'StrategyTestV3': {'run_id': run_id, 'backtest_start_time': start_time.timestamp()}
|
||||||
})
|
})
|
||||||
load_backtest_stats = MagicMock(side_effect=[
|
load_backtest_stats = MagicMock(side_effect=[
|
||||||
{
|
{
|
||||||
@ -1601,9 +1634,9 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
'strategy_comparison': [{'key': 'StrategyTestV2'}]
|
'strategy_comparison': [{'key': 'StrategyTestV2'}]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'metadata': {'TestStrategyLegacyV1': {'run_id': '2'}},
|
'metadata': {'StrategyTestV3': {'run_id': '2'}},
|
||||||
'strategy': {'TestStrategyLegacyV1': {}},
|
'strategy': {'StrategyTestV3': {}},
|
||||||
'strategy_comparison': [{'key': 'TestStrategyLegacyV1'}]
|
'strategy_comparison': [{'key': 'StrategyTestV3'}]
|
||||||
}
|
}
|
||||||
])
|
])
|
||||||
mocker.patch('pathlib.Path.glob', return_value=[
|
mocker.patch('pathlib.Path.glob', return_value=[
|
||||||
@ -1627,7 +1660,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
'--cache', cache,
|
'--cache', cache,
|
||||||
'--strategy-list',
|
'--strategy-list',
|
||||||
'StrategyTestV2',
|
'StrategyTestV2',
|
||||||
'TestStrategyLegacyV1',
|
'StrategyTestV3',
|
||||||
]
|
]
|
||||||
args = get_args(args)
|
args = get_args(args)
|
||||||
start_backtesting(args)
|
start_backtesting(args)
|
||||||
@ -1649,7 +1682,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
assert backtestmock.call_count == 2
|
assert backtestmock.call_count == 2
|
||||||
exists = [
|
exists = [
|
||||||
'Running backtesting for Strategy StrategyTestV2',
|
'Running backtesting for Strategy StrategyTestV2',
|
||||||
'Running backtesting for Strategy TestStrategyLegacyV1',
|
'Running backtesting for Strategy StrategyTestV3',
|
||||||
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
||||||
]
|
]
|
||||||
@ -1657,12 +1690,12 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
|
|||||||
assert backtestmock.call_count == 0
|
assert backtestmock.call_count == 0
|
||||||
exists = [
|
exists = [
|
||||||
'Reusing result of previous backtest for StrategyTestV2',
|
'Reusing result of previous backtest for StrategyTestV2',
|
||||||
'Reusing result of previous backtest for TestStrategyLegacyV1',
|
'Reusing result of previous backtest for StrategyTestV3',
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
exists = [
|
exists = [
|
||||||
'Reusing result of previous backtest for StrategyTestV2',
|
'Reusing result of previous backtest for StrategyTestV2',
|
||||||
'Running backtesting for Strategy TestStrategyLegacyV1',
|
'Running backtesting for Strategy StrategyTestV3',
|
||||||
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
|
||||||
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
|
||||||
]
|
]
|
||||||
|
@ -94,6 +94,7 @@ def test_edge_init(mocker, edge_conf) -> None:
|
|||||||
assert edge_cli.config == edge_conf
|
assert edge_cli.config == edge_conf
|
||||||
assert edge_cli.config['stake_amount'] == 'unlimited'
|
assert edge_cli.config['stake_amount'] == 'unlimited'
|
||||||
assert callable(edge_cli.edge.calculate)
|
assert callable(edge_cli.edge.calculate)
|
||||||
|
assert edge_cli.strategy.bot_started is True
|
||||||
|
|
||||||
|
|
||||||
def test_edge_init_fee(mocker, edge_conf) -> None:
|
def test_edge_init_fee(mocker, edge_conf) -> None:
|
||||||
|
@ -4,7 +4,7 @@ from unittest.mock import MagicMock
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.optimize.hyperopt_loss_short_trade_dur import ShortTradeDurHyperOptLoss
|
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_short_trade_dur import ShortTradeDurHyperOptLoss
|
||||||
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
|
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@ import uvicorn
|
|||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from fastapi.exceptions import HTTPException
|
from fastapi.exceptions import HTTPException
|
||||||
from fastapi.testclient import TestClient
|
from fastapi.testclient import TestClient
|
||||||
from numpy import isnan
|
|
||||||
from requests.auth import _basic_auth_str
|
from requests.auth import _basic_auth_str
|
||||||
|
|
||||||
from freqtrade.__init__ import __version__
|
from freqtrade.__init__ import __version__
|
||||||
@ -985,7 +984,7 @@ def test_api_status(botclient, mocker, ticker, fee, markets, is_short,
|
|||||||
assert_response(rc)
|
assert_response(rc)
|
||||||
resp_values = rc.json()
|
resp_values = rc.json()
|
||||||
assert len(resp_values) == 4
|
assert len(resp_values) == 4
|
||||||
assert isnan(resp_values[0]['profit_abs'])
|
assert resp_values[0]['profit_abs'] is None
|
||||||
|
|
||||||
|
|
||||||
def test_api_version(botclient):
|
def test_api_version(botclient):
|
||||||
@ -1389,7 +1388,6 @@ def test_api_strategies(botclient):
|
|||||||
'StrategyTestV2',
|
'StrategyTestV2',
|
||||||
'StrategyTestV3',
|
'StrategyTestV3',
|
||||||
'StrategyTestV3Futures',
|
'StrategyTestV3Futures',
|
||||||
'TestStrategyLegacyV1',
|
|
||||||
]}
|
]}
|
||||||
|
|
||||||
|
|
||||||
@ -1485,7 +1483,7 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir):
|
|||||||
assert not result['running']
|
assert not result['running']
|
||||||
assert result['status_msg'] == 'Backtest reset'
|
assert result['status_msg'] == 'Backtest reset'
|
||||||
ftbot.config['export'] = 'trades'
|
ftbot.config['export'] = 'trades'
|
||||||
ftbot.config['backtest_cache'] = 'none'
|
ftbot.config['backtest_cache'] = 'day'
|
||||||
ftbot.config['user_data_dir'] = Path(tmpdir)
|
ftbot.config['user_data_dir'] = Path(tmpdir)
|
||||||
ftbot.config['exportfilename'] = Path(tmpdir) / "backtest_results"
|
ftbot.config['exportfilename'] = Path(tmpdir) / "backtest_results"
|
||||||
ftbot.config['exportfilename'].mkdir()
|
ftbot.config['exportfilename'].mkdir()
|
||||||
@ -1558,19 +1556,19 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir):
|
|||||||
|
|
||||||
ApiServer._bgtask_running = False
|
ApiServer._bgtask_running = False
|
||||||
|
|
||||||
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest_one_strategy',
|
|
||||||
side_effect=DependencyException())
|
|
||||||
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
|
|
||||||
assert log_has("Backtesting caused an error: ", caplog)
|
|
||||||
|
|
||||||
ftbot.config['backtest_cache'] = 'day'
|
|
||||||
|
|
||||||
# Rerun backtest (should get previous result)
|
# Rerun backtest (should get previous result)
|
||||||
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
|
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
|
||||||
assert_response(rc)
|
assert_response(rc)
|
||||||
result = rc.json()
|
result = rc.json()
|
||||||
assert log_has_re('Reusing result of previous backtest.*', caplog)
|
assert log_has_re('Reusing result of previous backtest.*', caplog)
|
||||||
|
|
||||||
|
data['stake_amount'] = 101
|
||||||
|
|
||||||
|
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest_one_strategy',
|
||||||
|
side_effect=DependencyException())
|
||||||
|
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
|
||||||
|
assert log_has("Backtesting caused an error: ", caplog)
|
||||||
|
|
||||||
# Delete backtesting to avoid leakage since the backtest-object may stick around.
|
# Delete backtesting to avoid leakage since the backtest-object may stick around.
|
||||||
rc = client_delete(client, f"{BASE_URI}/backtest")
|
rc = client_delete(client, f"{BASE_URI}/backtest")
|
||||||
assert_response(rc)
|
assert_response(rc)
|
||||||
|
30
tests/strategy/strats/broken_strats/legacy_strategy_v1.py
Normal file
30
tests/strategy/strats/broken_strats/legacy_strategy_v1.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# type: ignore
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.strategy import IStrategy
|
||||||
|
|
||||||
|
|
||||||
|
# Dummy strategy - no longer loads but raises an exception.
|
||||||
|
class TestStrategyLegacyV1(IStrategy):
|
||||||
|
|
||||||
|
minimal_roi = {
|
||||||
|
"40": 0.0,
|
||||||
|
"30": 0.01,
|
||||||
|
"20": 0.02,
|
||||||
|
"0": 0.04
|
||||||
|
}
|
||||||
|
stoploss = -0.10
|
||||||
|
|
||||||
|
timeframe = '5m'
|
||||||
|
|
||||||
|
def populate_indicators(self, dataframe: DataFrame) -> DataFrame:
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
|
||||||
|
def populate_buy_trend(self, dataframe: DataFrame) -> DataFrame:
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
|
||||||
|
def populate_sell_trend(self, dataframe: DataFrame) -> DataFrame:
|
||||||
|
|
||||||
|
return dataframe
|
@ -1,85 +0,0 @@
|
|||||||
|
|
||||||
# --- Do not remove these libs ---
|
|
||||||
# Add your lib to import here
|
|
||||||
import talib.abstract as ta
|
|
||||||
from pandas import DataFrame
|
|
||||||
|
|
||||||
from freqtrade.strategy import IStrategy
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------
|
|
||||||
|
|
||||||
# This class is a sample. Feel free to customize it.
|
|
||||||
class TestStrategyLegacyV1(IStrategy):
|
|
||||||
"""
|
|
||||||
This is a test strategy using the legacy function headers, which will be
|
|
||||||
removed in a future update.
|
|
||||||
Please do not use this as a template, but refer to user_data/strategy/sample_strategy.py
|
|
||||||
for a uptodate version of this template.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Minimal ROI designed for the strategy.
|
|
||||||
# This attribute will be overridden if the config file contains "minimal_roi"
|
|
||||||
minimal_roi = {
|
|
||||||
"40": 0.0,
|
|
||||||
"30": 0.01,
|
|
||||||
"20": 0.02,
|
|
||||||
"0": 0.04
|
|
||||||
}
|
|
||||||
|
|
||||||
# Optimal stoploss designed for the strategy
|
|
||||||
# This attribute will be overridden if the config file contains "stoploss"
|
|
||||||
stoploss = -0.10
|
|
||||||
|
|
||||||
timeframe = '5m'
|
|
||||||
|
|
||||||
def populate_indicators(self, dataframe: DataFrame) -> DataFrame:
|
|
||||||
"""
|
|
||||||
Adds several different TA indicators to the given DataFrame
|
|
||||||
|
|
||||||
Performance Note: For the best performance be frugal on the number of indicators
|
|
||||||
you are using. Let uncomment only the indicator you are using in your strategies
|
|
||||||
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Momentum Indicator
|
|
||||||
# ------------------------------------
|
|
||||||
|
|
||||||
# ADX
|
|
||||||
dataframe['adx'] = ta.ADX(dataframe)
|
|
||||||
|
|
||||||
# TEMA - Triple Exponential Moving Average
|
|
||||||
dataframe['tema'] = ta.TEMA(dataframe, timeperiod=9)
|
|
||||||
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def populate_buy_trend(self, dataframe: DataFrame) -> DataFrame:
|
|
||||||
"""
|
|
||||||
Based on TA indicators, populates the buy signal for the given dataframe
|
|
||||||
:param dataframe: DataFrame
|
|
||||||
:return: DataFrame with buy column
|
|
||||||
"""
|
|
||||||
dataframe.loc[
|
|
||||||
(
|
|
||||||
(dataframe['adx'] > 30) &
|
|
||||||
(dataframe['tema'] > dataframe['tema'].shift(1)) &
|
|
||||||
(dataframe['volume'] > 0)
|
|
||||||
),
|
|
||||||
'buy'] = 1
|
|
||||||
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def populate_sell_trend(self, dataframe: DataFrame) -> DataFrame:
|
|
||||||
"""
|
|
||||||
Based on TA indicators, populates the sell signal for the given dataframe
|
|
||||||
:param dataframe: DataFrame
|
|
||||||
:return: DataFrame with buy column
|
|
||||||
"""
|
|
||||||
dataframe.loc[
|
|
||||||
(
|
|
||||||
(dataframe['adx'] > 70) &
|
|
||||||
(dataframe['tema'] < dataframe['tema'].shift(1)) &
|
|
||||||
(dataframe['volume'] > 0)
|
|
||||||
),
|
|
||||||
'sell'] = 1
|
|
||||||
return dataframe
|
|
@ -56,19 +56,6 @@ class StrategyTestV2(IStrategy):
|
|||||||
# By default this strategy does not use Position Adjustments
|
# By default this strategy does not use Position Adjustments
|
||||||
position_adjustment_enable = False
|
position_adjustment_enable = False
|
||||||
|
|
||||||
def informative_pairs(self):
|
|
||||||
"""
|
|
||||||
Define additional, informative pair/interval combinations to be cached from the exchange.
|
|
||||||
These pair/interval combinations are non-tradeable, unless they are part
|
|
||||||
of the whitelist as well.
|
|
||||||
For more information, please consult the documentation
|
|
||||||
:return: List of tuples in the format (pair, interval)
|
|
||||||
Sample: return [("ETH/USDT", "5m"),
|
|
||||||
("BTC/USDT", "15m"),
|
|
||||||
]
|
|
||||||
"""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
Adds several different TA indicators to the given DataFrame
|
Adds several different TA indicators to the given DataFrame
|
||||||
|
@ -82,6 +82,11 @@ class StrategyTestV3(IStrategy):
|
|||||||
# })
|
# })
|
||||||
# return prot
|
# return prot
|
||||||
|
|
||||||
|
bot_started = False
|
||||||
|
|
||||||
|
def bot_start(self):
|
||||||
|
self.bot_started = True
|
||||||
|
|
||||||
def informative_pairs(self):
|
def informative_pairs(self):
|
||||||
|
|
||||||
return []
|
return []
|
||||||
|
@ -686,7 +686,7 @@ def test_is_pair_locked(default_conf):
|
|||||||
|
|
||||||
|
|
||||||
def test_is_informative_pairs_callback(default_conf):
|
def test_is_informative_pairs_callback(default_conf):
|
||||||
default_conf.update({'strategy': 'TestStrategyLegacyV1'})
|
default_conf.update({'strategy': 'StrategyTestV2'})
|
||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
strategy = StrategyResolver.load_strategy(default_conf)
|
||||||
# Should return empty
|
# Should return empty
|
||||||
# Uses fallback to base implementation
|
# Uses fallback to base implementation
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# pragma pylint: disable=missing-docstring, protected-access, C0103
|
# pragma pylint: disable=missing-docstring, protected-access, C0103
|
||||||
import logging
|
import logging
|
||||||
import warnings
|
|
||||||
from base64 import urlsafe_b64encode
|
from base64 import urlsafe_b64encode
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
@ -35,7 +34,7 @@ def test_search_all_strategies_no_failed():
|
|||||||
directory = Path(__file__).parent / "strats"
|
directory = Path(__file__).parent / "strats"
|
||||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=False)
|
strategies = StrategyResolver.search_all_objects(directory, enum_failed=False)
|
||||||
assert isinstance(strategies, list)
|
assert isinstance(strategies, list)
|
||||||
assert len(strategies) == 6
|
assert len(strategies) == 5
|
||||||
assert isinstance(strategies[0], dict)
|
assert isinstance(strategies[0], dict)
|
||||||
|
|
||||||
|
|
||||||
@ -43,10 +42,10 @@ def test_search_all_strategies_with_failed():
|
|||||||
directory = Path(__file__).parent / "strats"
|
directory = Path(__file__).parent / "strats"
|
||||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=True)
|
strategies = StrategyResolver.search_all_objects(directory, enum_failed=True)
|
||||||
assert isinstance(strategies, list)
|
assert isinstance(strategies, list)
|
||||||
assert len(strategies) == 7
|
assert len(strategies) == 6
|
||||||
# with enum_failed=True search_all_objects() shall find 2 good strategies
|
# with enum_failed=True search_all_objects() shall find 2 good strategies
|
||||||
# and 1 which fails to load
|
# and 1 which fails to load
|
||||||
assert len([x for x in strategies if x['class'] is not None]) == 6
|
assert len([x for x in strategies if x['class'] is not None]) == 5
|
||||||
assert len([x for x in strategies if x['class'] is None]) == 1
|
assert len([x for x in strategies if x['class'] is None]) == 1
|
||||||
|
|
||||||
|
|
||||||
@ -100,7 +99,7 @@ def test_load_strategy_noname(default_conf):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
@pytest.mark.filterwarnings("ignore:deprecated")
|
||||||
@pytest.mark.parametrize('strategy_name', ['StrategyTestV2', 'TestStrategyLegacyV1'])
|
@pytest.mark.parametrize('strategy_name', ['StrategyTestV2'])
|
||||||
def test_strategy_pre_v3(result, default_conf, strategy_name):
|
def test_strategy_pre_v3(result, default_conf, strategy_name):
|
||||||
default_conf.update({'strategy': strategy_name})
|
default_conf.update({'strategy': strategy_name})
|
||||||
|
|
||||||
@ -346,40 +345,6 @@ def test_strategy_override_use_exit_profit_only(caplog, default_conf):
|
|||||||
assert log_has("Override strategy 'exit_profit_only' with value in config file: True.", caplog)
|
assert log_has("Override strategy 'exit_profit_only' with value in config file: True.", caplog)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
|
||||||
def test_deprecate_populate_indicators(result, default_conf):
|
|
||||||
default_location = Path(__file__).parent / "strats"
|
|
||||||
default_conf.update({'strategy': 'TestStrategyLegacyV1',
|
|
||||||
'strategy_path': default_location})
|
|
||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
# Cause all warnings to always be triggered.
|
|
||||||
warnings.simplefilter("always")
|
|
||||||
indicators = strategy.advise_indicators(result, {'pair': 'ETH/BTC'})
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "deprecated - check out the Sample strategy to see the current function headers!" \
|
|
||||||
in str(w[-1].message)
|
|
||||||
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
# Cause all warnings to always be triggered.
|
|
||||||
warnings.simplefilter("always")
|
|
||||||
strategy.advise_entry(indicators, {'pair': 'ETH/BTC'})
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "deprecated - check out the Sample strategy to see the current function headers!" \
|
|
||||||
in str(w[-1].message)
|
|
||||||
|
|
||||||
with warnings.catch_warnings(record=True) as w:
|
|
||||||
# Cause all warnings to always be triggered.
|
|
||||||
warnings.simplefilter("always")
|
|
||||||
strategy.advise_exit(indicators, {'pair': 'ETH_BTC'})
|
|
||||||
assert len(w) == 1
|
|
||||||
assert issubclass(w[-1].category, DeprecationWarning)
|
|
||||||
assert "deprecated - check out the Sample strategy to see the current function headers!" \
|
|
||||||
in str(w[-1].message)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
@pytest.mark.filterwarnings("ignore:deprecated")
|
||||||
def test_missing_implements(default_conf, caplog):
|
def test_missing_implements(default_conf, caplog):
|
||||||
|
|
||||||
@ -438,33 +403,14 @@ def test_missing_implements(default_conf, caplog):
|
|||||||
StrategyResolver.load_strategy(default_conf)
|
StrategyResolver.load_strategy(default_conf)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.filterwarnings("ignore:deprecated")
|
def test_call_deprecated_function(default_conf):
|
||||||
def test_call_deprecated_function(result, default_conf, caplog):
|
default_location = Path(__file__).parent / "strats/broken_strats/"
|
||||||
default_location = Path(__file__).parent / "strats"
|
|
||||||
del default_conf['timeframe']
|
del default_conf['timeframe']
|
||||||
default_conf.update({'strategy': 'TestStrategyLegacyV1',
|
default_conf.update({'strategy': 'TestStrategyLegacyV1',
|
||||||
'strategy_path': default_location})
|
'strategy_path': default_location})
|
||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
with pytest.raises(OperationalException,
|
||||||
metadata = {'pair': 'ETH/BTC'}
|
match=r"Strategy Interface v1 is no longer supported.*"):
|
||||||
|
StrategyResolver.load_strategy(default_conf)
|
||||||
# Make sure we are using a legacy function
|
|
||||||
assert strategy._populate_fun_len == 2
|
|
||||||
assert strategy._buy_fun_len == 2
|
|
||||||
assert strategy._sell_fun_len == 2
|
|
||||||
assert strategy.INTERFACE_VERSION == 1
|
|
||||||
assert strategy.timeframe == '5m'
|
|
||||||
|
|
||||||
indicator_df = strategy.advise_indicators(result, metadata=metadata)
|
|
||||||
assert isinstance(indicator_df, DataFrame)
|
|
||||||
assert 'adx' in indicator_df.columns
|
|
||||||
|
|
||||||
enterdf = strategy.advise_entry(result, metadata=metadata)
|
|
||||||
assert isinstance(enterdf, DataFrame)
|
|
||||||
assert 'enter_long' in enterdf.columns
|
|
||||||
|
|
||||||
exitdf = strategy.advise_exit(result, metadata=metadata)
|
|
||||||
assert isinstance(exitdf, DataFrame)
|
|
||||||
assert 'exit_long' in exitdf
|
|
||||||
|
|
||||||
|
|
||||||
def test_strategy_interface_versioning(result, default_conf):
|
def test_strategy_interface_versioning(result, default_conf):
|
||||||
@ -472,10 +418,6 @@ def test_strategy_interface_versioning(result, default_conf):
|
|||||||
strategy = StrategyResolver.load_strategy(default_conf)
|
strategy = StrategyResolver.load_strategy(default_conf)
|
||||||
metadata = {'pair': 'ETH/BTC'}
|
metadata = {'pair': 'ETH/BTC'}
|
||||||
|
|
||||||
# Make sure we are using a legacy function
|
|
||||||
assert strategy._populate_fun_len == 3
|
|
||||||
assert strategy._buy_fun_len == 3
|
|
||||||
assert strategy._sell_fun_len == 3
|
|
||||||
assert strategy.INTERFACE_VERSION == 2
|
assert strategy.INTERFACE_VERSION == 2
|
||||||
|
|
||||||
indicator_df = strategy.advise_indicators(result, metadata=metadata)
|
indicator_df = strategy.advise_indicators(result, metadata=metadata)
|
||||||
|
@ -718,12 +718,12 @@ def test_process_informative_pairs_added(default_conf_usdt, ticker_usdt, mocker)
|
|||||||
(True, 'spot', 'gateio', None, 0.0, None),
|
(True, 'spot', 'gateio', None, 0.0, None),
|
||||||
(False, 'spot', 'okx', None, 0.0, None),
|
(False, 'spot', 'okx', None, 0.0, None),
|
||||||
(True, 'spot', 'okx', None, 0.0, None),
|
(True, 'spot', 'okx', None, 0.0, None),
|
||||||
(True, 'futures', 'binance', 'isolated', 0.0, 11.89108910891089),
|
(True, 'futures', 'binance', 'isolated', 0.0, 11.88151815181518),
|
||||||
(False, 'futures', 'binance', 'isolated', 0.0, 8.070707070707071),
|
(False, 'futures', 'binance', 'isolated', 0.0, 8.080471380471382),
|
||||||
(True, 'futures', 'gateio', 'isolated', 0.0, 11.87413417771621),
|
(True, 'futures', 'gateio', 'isolated', 0.0, 11.87413417771621),
|
||||||
(False, 'futures', 'gateio', 'isolated', 0.0, 8.085708510208207),
|
(False, 'futures', 'gateio', 'isolated', 0.0, 8.085708510208207),
|
||||||
(True, 'futures', 'binance', 'isolated', 0.05, 11.796534653465345),
|
(True, 'futures', 'binance', 'isolated', 0.05, 11.7874422442244),
|
||||||
(False, 'futures', 'binance', 'isolated', 0.05, 8.167171717171717),
|
(False, 'futures', 'binance', 'isolated', 0.05, 8.17644781144781),
|
||||||
(True, 'futures', 'gateio', 'isolated', 0.05, 11.7804274688304),
|
(True, 'futures', 'gateio', 'isolated', 0.05, 11.7804274688304),
|
||||||
(False, 'futures', 'gateio', 'isolated', 0.05, 8.181423084697796),
|
(False, 'futures', 'gateio', 'isolated', 0.05, 8.181423084697796),
|
||||||
(True, 'futures', 'okx', 'isolated', 0.0, 11.87413417771621),
|
(True, 'futures', 'okx', 'isolated', 0.0, 11.87413417771621),
|
||||||
@ -846,6 +846,7 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
|||||||
assert trade.open_order_id is None
|
assert trade.open_order_id is None
|
||||||
assert trade.open_rate == 10
|
assert trade.open_rate == 10
|
||||||
assert trade.stake_amount == round(order['price'] * order['filled'] / leverage, 8)
|
assert trade.stake_amount == round(order['price'] * order['filled'] / leverage, 8)
|
||||||
|
assert pytest.approx(trade.liquidation_price) == liq_price
|
||||||
|
|
||||||
# In case of rejected or expired order and partially filled
|
# In case of rejected or expired order and partially filled
|
||||||
order['status'] = 'expired'
|
order['status'] = 'expired'
|
||||||
@ -933,8 +934,6 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
|||||||
assert trade.open_rate_requested == 10
|
assert trade.open_rate_requested == 10
|
||||||
|
|
||||||
# In case of custom entry price not float type
|
# In case of custom entry price not float type
|
||||||
freqtrade.exchange.get_maintenance_ratio_and_amt = MagicMock(return_value=(0.01, 0.01))
|
|
||||||
freqtrade.exchange.name = exchange_name
|
|
||||||
order['status'] = 'open'
|
order['status'] = 'open'
|
||||||
order['id'] = '5568'
|
order['id'] = '5568'
|
||||||
freqtrade.strategy.custom_entry_price = lambda **kwargs: "string price"
|
freqtrade.strategy.custom_entry_price = lambda **kwargs: "string price"
|
||||||
@ -947,7 +946,6 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
|||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
assert trade
|
assert trade
|
||||||
assert trade.open_rate_requested == 10
|
assert trade.open_rate_requested == 10
|
||||||
assert trade.liquidation_price == liq_price
|
|
||||||
|
|
||||||
# In case of too high stake amount
|
# In case of too high stake amount
|
||||||
|
|
||||||
@ -3222,7 +3220,7 @@ def test_execute_trade_exit_custom_exit_price(
|
|||||||
freqtrade.execute_trade_exit(
|
freqtrade.execute_trade_exit(
|
||||||
trade=trade,
|
trade=trade,
|
||||||
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
|
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
|
||||||
exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)
|
exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL, exit_reason='foo')
|
||||||
)
|
)
|
||||||
|
|
||||||
# Sell price must be different to default bid price
|
# Sell price must be different to default bid price
|
||||||
@ -3250,8 +3248,8 @@ def test_execute_trade_exit_custom_exit_price(
|
|||||||
'profit_ratio': profit_ratio,
|
'profit_ratio': profit_ratio,
|
||||||
'stake_currency': 'USDT',
|
'stake_currency': 'USDT',
|
||||||
'fiat_currency': 'USD',
|
'fiat_currency': 'USD',
|
||||||
'sell_reason': ExitType.EXIT_SIGNAL.value,
|
'sell_reason': 'foo',
|
||||||
'exit_reason': ExitType.EXIT_SIGNAL.value,
|
'exit_reason': 'foo',
|
||||||
'open_date': ANY,
|
'open_date': ANY,
|
||||||
'close_date': ANY,
|
'close_date': ANY,
|
||||||
'close_rate': ANY,
|
'close_rate': ANY,
|
||||||
|
@ -10,7 +10,8 @@ from plotly.subplots import make_subplots
|
|||||||
from freqtrade.commands import start_plot_dataframe, start_plot_profit
|
from freqtrade.commands import start_plot_dataframe, start_plot_profit
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data import history
|
from freqtrade.data import history
|
||||||
from freqtrade.data.btanalysis import create_cum_profit, load_backtest_data
|
from freqtrade.data.btanalysis import load_backtest_data
|
||||||
|
from freqtrade.data.metrics import create_cum_profit
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.plot.plotting import (add_areas, add_indicators, add_profit, create_plotconfig,
|
from freqtrade.plot.plotting import (add_areas, add_indicators, add_profit, create_plotconfig,
|
||||||
generate_candlestick_graph, generate_plot_filename,
|
generate_candlestick_graph, generate_plot_filename,
|
||||||
|
Loading…
Reference in New Issue
Block a user