Merge branch 'fix-docs' of https://github.com/stash86/freqtrade into fix-docs

This commit is contained in:
Stefano Ariestasia 2022-01-22 06:54:57 +00:00
commit f79decdb9c
18 changed files with 195 additions and 68 deletions

View File

@ -22,6 +22,7 @@ usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]] [--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
[--export {none,trades}] [--export-filename PATH] [--export {none,trades}] [--export-filename PATH]
[--breakdown {day,week,month} [{day,week,month} ...]] [--breakdown {day,week,month} [{day,week,month} ...]]
[--cache {none,day,week,month}]
optional arguments: optional arguments:
-h, --help show this help message and exit -h, --help show this help message and exit
@ -76,7 +77,9 @@ optional arguments:
_today.json` _today.json`
--breakdown {day,week,month} [{day,week,month} ...] --breakdown {day,week,month} [{day,week,month} ...]
Show backtesting breakdown per [day, week, month]. Show backtesting breakdown per [day, week, month].
--no-cache Do not reuse cached backtest results. --cache {none,day,week,month}
Load a cached backtest result no older than specified
age (default: day).
Common arguments: Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages). -v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
@ -460,11 +463,11 @@ The output will show a table containing the realized absolute Profit (in stake c
### Backtest result caching ### Backtest result caching
To save time, by default backtest will reuse a cached result when backtested strategy and config match that of previous backtest. To force a new backtest despite existing result for identical run specify `--no-cache` parameter. To save time, by default backtest will reuse a cached result from within the last day when the backtested strategy and config match that of a previous backtest. To force a new backtest despite existing result for an identical run specify `--cache none` parameter.
!!! Warning !!! Warning
Caching is automatically disabled for open-ended timeranges (`--timerange 20210101-`), as freqtrade cannot ensure reliably that the underlying data didn't change. It can also use cached results where it shouldn't if the original backtest had missing data at the end, which was fixed by downloading more data. Caching is automatically disabled for open-ended timeranges (`--timerange 20210101-`), as freqtrade cannot ensure reliably that the underlying data didn't change. It can also use cached results where it shouldn't if the original backtest had missing data at the end, which was fixed by downloading more data.
In this instance, please use `--no-cache` once to get a fresh backtest. In this instance, please use `--cache none` once to force a fresh backtest.
### Further backtest-result analysis ### Further backtest-result analysis

View File

@ -24,7 +24,7 @@ ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", "dataformat_ohlcv",
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions", ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions",
"enable_protections", "dry_run_wallet", "timeframe_detail", "enable_protections", "dry_run_wallet", "timeframe_detail",
"strategy_list", "export", "exportfilename", "strategy_list", "export", "exportfilename",
"backtest_breakdown", "no_backtest_cache"] "backtest_breakdown", "backtest_cache"]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path", ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
"position_stacking", "use_max_market_positions", "position_stacking", "use_max_market_positions",

View File

@ -205,10 +205,11 @@ AVAILABLE_CLI_OPTIONS = {
nargs='+', nargs='+',
choices=constants.BACKTEST_BREAKDOWNS choices=constants.BACKTEST_BREAKDOWNS
), ),
"no_backtest_cache": Arg( "backtest_cache": Arg(
'--no-cache', '--cache',
help='Do not reuse cached backtest results.', help='Load a cached backtest result no older than specified age (default: %(default)s).',
action='store_true' default=constants.BACKTEST_CACHE_DEFAULT,
choices=constants.BACKTEST_CACHE_AGE,
), ),
# Edge # Edge
"stoploss_range": Arg( "stoploss_range": Arg(

View File

@ -276,8 +276,8 @@ class Configuration:
self._args_to_config(config, argname='backtest_breakdown', self._args_to_config(config, argname='backtest_breakdown',
logstring='Parameter --breakdown detected ...') logstring='Parameter --breakdown detected ...')
self._args_to_config(config, argname='no_backtest_cache', self._args_to_config(config, argname='backtest_cache',
logstring='Parameter --no-cache detected ...') logstring='Parameter --cache={} detected ...')
self._args_to_config(config, argname='disableparamexport', self._args_to_config(config, argname='disableparamexport',
logstring='Parameter --disableparamexport detected: {} ...') logstring='Parameter --disableparamexport detected: {} ...')

View File

@ -34,6 +34,8 @@ AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList',
AVAILABLE_PROTECTIONS = ['CooldownPeriod', 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard'] AVAILABLE_PROTECTIONS = ['CooldownPeriod', 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']
AVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5'] AVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5']
BACKTEST_BREAKDOWNS = ['day', 'week', 'month'] BACKTEST_BREAKDOWNS = ['day', 'week', 'month']
BACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']
BACKTEST_CACHE_DEFAULT = 'day'
DRY_RUN_WALLET = 1000 DRY_RUN_WALLET = 1000
DATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S' DATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S'
MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons

View File

@ -3,6 +3,7 @@ Helpers when analyzing backtest data
""" """
import logging import logging
from copy import copy from copy import copy
from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union from typing import Any, Dict, List, Optional, Tuple, Union
@ -99,6 +100,9 @@ def get_latest_hyperopt_file(directory: Union[Path, str], predef_filename: str =
if isinstance(directory, str): if isinstance(directory, str):
directory = Path(directory) directory = Path(directory)
if predef_filename: if predef_filename:
if Path(predef_filename).is_absolute():
raise OperationalException(
"--hyperopt-filename expects only the filename, not an absolute path.")
return directory / predef_filename return directory / predef_filename
return directory / get_latest_hyperopt_filename(directory) return directory / get_latest_hyperopt_filename(directory)
@ -143,12 +147,24 @@ def load_backtest_stats(filename: Union[Path, str]) -> Dict[str, Any]:
return data return data
def find_existing_backtest_stats(dirname: Union[Path, str], def _load_and_merge_backtest_result(strategy_name: str, filename: Path, results: Dict[str, Any]):
run_ids: Dict[str, str]) -> Dict[str, Any]: bt_data = load_backtest_stats(filename)
for k in ('metadata', 'strategy'):
results[k][strategy_name] = bt_data[k][strategy_name]
comparison = bt_data['strategy_comparison']
for i in range(len(comparison)):
if comparison[i]['key'] == strategy_name:
results['strategy_comparison'].append(comparison[i])
break
def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, str],
min_backtest_date: datetime = None) -> Dict[str, Any]:
""" """
Find existing backtest stats that match specified run IDs and load them. Find existing backtest stats that match specified run IDs and load them.
:param dirname: pathlib.Path object, or string pointing to the file. :param dirname: pathlib.Path object, or string pointing to the file.
:param run_ids: {strategy_name: id_string} dictionary. :param run_ids: {strategy_name: id_string} dictionary.
:param min_backtest_date: do not load a backtest older than specified date.
:return: results dict. :return: results dict.
""" """
# Copy so we can modify this dict without affecting parent scope. # Copy so we can modify this dict without affecting parent scope.
@ -169,18 +185,30 @@ def find_existing_backtest_stats(dirname: Union[Path, str],
break break
for strategy_name, run_id in list(run_ids.items()): for strategy_name, run_id in list(run_ids.items()):
if metadata.get(strategy_name, {}).get('run_id') == run_id: strategy_metadata = metadata.get(strategy_name, None)
# TODO: load_backtest_stats() may load an old version of backtest which is if not strategy_metadata:
# incompatible with current version. # This strategy is not present in analyzed backtest.
continue
if min_backtest_date is not None:
try:
backtest_date = strategy_metadata['backtest_start_time']
except KeyError:
# TODO: this can be removed starting from feb 2022
# The metadata-file without start_time was only available in develop
# and was never included in an official release.
# Older metadata format without backtest time, too old to consider.
return results
backtest_date = datetime.fromtimestamp(backtest_date, tz=timezone.utc)
if backtest_date < min_backtest_date:
# Do not use a cached result for this strategy as first result is too old.
del run_ids[strategy_name]
continue
if strategy_metadata['run_id'] == run_id:
del run_ids[strategy_name] del run_ids[strategy_name]
bt_data = load_backtest_stats(filename) _load_and_merge_backtest_result(strategy_name, filename, results)
for k in ('metadata', 'strategy'):
results[k][strategy_name] = bt_data[k][strategy_name]
comparison = bt_data['strategy_comparison']
for i in range(len(comparison)):
if comparison[i]['key'] == strategy_name:
results['strategy_comparison'].append(comparison[i])
break
if len(run_ids) == 0: if len(run_ids) == 0:
break break
return results return results

View File

@ -11,6 +11,7 @@ from typing import Any, Dict, List, Optional, Tuple
from pandas import DataFrame from pandas import DataFrame
from freqtrade import constants
from freqtrade.configuration import TimeRange, validate_config_consistency from freqtrade.configuration import TimeRange, validate_config_consistency
from freqtrade.constants import DATETIME_PRINT_FORMAT from freqtrade.constants import DATETIME_PRINT_FORMAT
from freqtrade.data import history from freqtrade.data import history
@ -64,6 +65,7 @@ class Backtesting:
self.results: Dict[str, Any] = {} self.results: Dict[str, Any] = {}
config['dry_run'] = True config['dry_run'] = True
self.run_ids: Dict[str, str] = {}
self.strategylist: List[IStrategy] = [] self.strategylist: List[IStrategy] = []
self.all_results: Dict[str, Dict] = {} self.all_results: Dict[str, Dict] = {}
@ -728,7 +730,7 @@ class Backtesting:
) )
backtest_end_time = datetime.now(timezone.utc) backtest_end_time = datetime.now(timezone.utc)
results.update({ results.update({
'run_id': get_strategy_run_id(strat), 'run_id': self.run_ids.get(strat.get_strategy_name(), ''),
'backtest_start_time': int(backtest_start_time.timestamp()), 'backtest_start_time': int(backtest_start_time.timestamp()),
'backtest_end_time': int(backtest_end_time.timestamp()), 'backtest_end_time': int(backtest_end_time.timestamp()),
}) })
@ -736,6 +738,33 @@ class Backtesting:
return min_date, max_date return min_date, max_date
def _get_min_cached_backtest_date(self):
min_backtest_date = None
backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT)
if self.timerange.stopts == 0 or datetime.fromtimestamp(
self.timerange.stopts, tz=timezone.utc) > datetime.now(tz=timezone.utc):
logger.warning('Backtest result caching disabled due to use of open-ended timerange.')
elif backtest_cache_age == 'day':
min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(days=1)
elif backtest_cache_age == 'week':
min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(weeks=1)
elif backtest_cache_age == 'month':
min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(weeks=4)
return min_backtest_date
def load_prior_backtest(self):
self.run_ids = {
strategy.get_strategy_name(): get_strategy_run_id(strategy)
for strategy in self.strategylist
}
# Load previous result that will be updated incrementally.
# This can be circumvented in certain instances in combination with downloading more data
min_backtest_date = self._get_min_cached_backtest_date()
if min_backtest_date is not None:
self.results = find_existing_backtest_stats(
self.config['user_data_dir'] / 'backtest_results', self.run_ids, min_backtest_date)
def start(self) -> None: def start(self) -> None:
""" """
Run backtesting end-to-end Run backtesting end-to-end
@ -747,21 +776,7 @@ class Backtesting:
self.load_bt_data_detail() self.load_bt_data_detail()
logger.info("Dataload complete. Calculating indicators") logger.info("Dataload complete. Calculating indicators")
run_ids = { self.load_prior_backtest()
strategy.get_strategy_name(): get_strategy_run_id(strategy)
for strategy in self.strategylist
}
# Load previous result that will be updated incrementally.
# This can be circumvented in certain instances in combination with downloading more data
if self.timerange.stopts == 0 or datetime.fromtimestamp(
self.timerange.stopts, tz=timezone.utc) > datetime.now(tz=timezone.utc):
self.config['no_backtest_cache'] = True
logger.warning('Backtest result caching disabled due to use of open-ended timerange.')
if not self.config.get('no_backtest_cache', False):
self.results = find_existing_backtest_stats(
self.config['user_data_dir'] / 'backtest_results', run_ids)
for strat in self.strategylist: for strat in self.strategylist:
if self.results and strat.get_strategy_name() in self.results['strategy']: if self.results and strat.get_strategy_name() in self.results['strategy']:

View File

@ -137,6 +137,7 @@ class HyperoptTools():
} }
if not HyperoptTools._test_hyperopt_results_exist(results_file): if not HyperoptTools._test_hyperopt_results_exist(results_file):
# No file found. # No file found.
logger.warning(f"Hyperopt file {results_file} not found.")
return [], 0 return [], 0
epochs = [] epochs = []

View File

@ -527,7 +527,8 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
strat_stats = generate_strategy_stats(pairlist, strategy, content, strat_stats = generate_strategy_stats(pairlist, strategy, content,
min_date, max_date, market_change=market_change) min_date, max_date, market_change=market_change)
metadata[strategy] = { metadata[strategy] = {
'run_id': content['run_id'] 'run_id': content['run_id'],
'backtest_start_time': content['backtest_start_time'],
} }
result['strategy'][strategy] = strat_stats result['strategy'][strategy] = strat_stats

View File

@ -235,10 +235,12 @@ def plot_trades(fig, trades: pd.DataFrame) -> make_subplots:
# Trades can be empty # Trades can be empty
if trades is not None and len(trades) > 0: if trades is not None and len(trades) > 0:
# Create description for sell summarizing the trade # Create description for sell summarizing the trade
trades['desc'] = trades.apply(lambda row: f"{row['profit_ratio']:.2%}, " trades['desc'] = trades.apply(
f"{row['sell_reason']}, " lambda row: f"{row['profit_ratio']:.2%}, " +
f"{row['trade_duration']} min", (f"{row['buy_tag']}, " if row['buy_tag'] is not None else "") +
axis=1) f"{row['sell_reason']}, " +
f"{row['trade_duration']} min",
axis=1)
trade_buys = go.Scatter( trade_buys = go.Scatter(
x=trades["open_date"], x=trades["open_date"],
y=trades["open_rate"], y=trades["open_rate"],

View File

@ -39,7 +39,8 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac
# Start backtesting # Start backtesting
# Initialize backtesting object # Initialize backtesting object
def run_backtest(): def run_backtest():
from freqtrade.optimize.optimize_reports import generate_backtest_stats from freqtrade.optimize.optimize_reports import (generate_backtest_stats,
store_backtest_stats)
from freqtrade.resolvers import StrategyResolver from freqtrade.resolvers import StrategyResolver
asyncio.set_event_loop(asyncio.new_event_loop()) asyncio.set_event_loop(asyncio.new_event_loop())
try: try:
@ -76,13 +77,25 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac
lastconfig['enable_protections'] = btconfig.get('enable_protections') lastconfig['enable_protections'] = btconfig.get('enable_protections')
lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet') lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet')
ApiServer._bt.abort = False ApiServer._bt.results = {}
min_date, max_date = ApiServer._bt.backtest_one_strategy( ApiServer._bt.load_prior_backtest()
strat, ApiServer._bt_data, ApiServer._bt_timerange)
ApiServer._bt.abort = False
if (ApiServer._bt.results and
strat.get_strategy_name() in ApiServer._bt.results['strategy']):
# When previous result hash matches - reuse that result and skip backtesting.
logger.info(f'Reusing result of previous backtest for {strat.get_strategy_name()}')
else:
min_date, max_date = ApiServer._bt.backtest_one_strategy(
strat, ApiServer._bt_data, ApiServer._bt_timerange)
ApiServer._bt.results = generate_backtest_stats(
ApiServer._bt_data, ApiServer._bt.all_results,
min_date=min_date, max_date=max_date)
if btconfig.get('export', 'none') == 'trades':
store_backtest_stats(btconfig['exportfilename'], ApiServer._bt.results)
ApiServer._bt.results = generate_backtest_stats(
ApiServer._bt_data, ApiServer._bt.all_results,
min_date=min_date, max_date=max_date)
logger.info("Backtest finished.") logger.info("Backtest finished.")
except DependencyException as e: except DependencyException as e:

View File

@ -51,6 +51,12 @@ def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(str(testdatadir.parent)) res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle" assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir): def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json') res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')

View File

@ -11,6 +11,7 @@ import pandas as pd
import pytest import pytest
from arrow import Arrow from arrow import Arrow
from freqtrade import constants
from freqtrade.commands.optimize_commands import setup_optimize_configuration, start_backtesting from freqtrade.commands.optimize_commands import setup_optimize_configuration, start_backtesting
from freqtrade.configuration import TimeRange from freqtrade.configuration import TimeRange
from freqtrade.data import history from freqtrade.data import history
@ -1242,8 +1243,11 @@ def test_backtest_start_multi_strat_nomock_detail(default_conf, mocker,
@pytest.mark.filterwarnings("ignore:deprecated") @pytest.mark.filterwarnings("ignore:deprecated")
def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testdatadir): @pytest.mark.parametrize('run_id', ['2', 'changed'])
@pytest.mark.parametrize('start_delta', [{'days': 0}, {'days': 1}, {'weeks': 1}, {'weeks': 4}])
@pytest.mark.parametrize('cache', constants.BACKTEST_CACHE_AGE)
def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testdatadir, run_id,
start_delta, cache):
default_conf.update({ default_conf.update({
"use_sell_signal": True, "use_sell_signal": True,
"sell_profit_only": False, "sell_profit_only": False,
@ -1263,9 +1267,19 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
mocker.patch('freqtrade.optimize.backtesting.show_backtest_results', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.show_backtest_results', MagicMock())
now = min_backtest_date = datetime.now(tz=timezone.utc)
start_time = now - timedelta(**start_delta) + timedelta(hours=1)
if cache == 'none':
min_backtest_date = now + timedelta(days=1)
elif cache == 'day':
min_backtest_date = now - timedelta(days=1)
elif cache == 'week':
min_backtest_date = now - timedelta(weeks=1)
elif cache == 'month':
min_backtest_date = now - timedelta(weeks=4)
load_backtest_metadata = MagicMock(return_value={ load_backtest_metadata = MagicMock(return_value={
'StrategyTestV2': {'run_id': '1'}, 'StrategyTestV2': {'run_id': '1', 'backtest_start_time': now.timestamp()},
'TestStrategyLegacyV1': {'run_id': 'changed'} 'TestStrategyLegacyV1': {'run_id': run_id, 'backtest_start_time': start_time.timestamp()}
}) })
load_backtest_stats = MagicMock(side_effect=[ load_backtest_stats = MagicMock(side_effect=[
{ {
@ -1279,7 +1293,8 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
'strategy_comparison': [{'key': 'TestStrategyLegacyV1'}] 'strategy_comparison': [{'key': 'TestStrategyLegacyV1'}]
} }
]) ])
mocker.patch('pathlib.Path.glob', return_value=['not important']) mocker.patch('pathlib.Path.glob', return_value=[
Path(datetime.strftime(datetime.now(), 'backtest-result-%Y-%m-%d_%H-%M-%S.json'))])
mocker.patch.multiple('freqtrade.data.btanalysis', mocker.patch.multiple('freqtrade.data.btanalysis',
load_backtest_metadata=load_backtest_metadata, load_backtest_metadata=load_backtest_metadata,
load_backtest_stats=load_backtest_stats) load_backtest_stats=load_backtest_stats)
@ -1296,29 +1311,49 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda
'--timerange', '1510694220-1510700340', '--timerange', '1510694220-1510700340',
'--enable-position-stacking', '--enable-position-stacking',
'--disable-max-market-positions', '--disable-max-market-positions',
'--cache', cache,
'--strategy-list', '--strategy-list',
'StrategyTestV2', 'StrategyTestV2',
'TestStrategyLegacyV1', 'TestStrategyLegacyV1',
] ]
args = get_args(args) args = get_args(args)
start_backtesting(args) start_backtesting(args)
# 1 backtest, 1 loaded from cache
assert backtestmock.call_count == 1
# check the logs, that will contain the backtest result # check the logs, that will contain the backtest result
exists = [ exists = [
'Parameter -i/--timeframe detected ... Using timeframe: 1m ...', 'Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Parameter --timerange detected: 1510694220-1510700340 ...', 'Parameter --timerange detected: 1510694220-1510700340 ...',
f'Using data directory: {testdatadir} ...', f'Using data directory: {testdatadir} ...',
'Loading data from 2017-11-14 20:57:00 ' 'Loading data from 2017-11-14 20:57:00 '
'up to 2017-11-14 22:58:00 (0 days).', 'up to 2017-11-14 22:58:00 (0 days).',
'Backtesting with data from 2017-11-14 21:17:00 '
'up to 2017-11-14 22:58:00 (0 days).',
'Parameter --enable-position-stacking detected ...', 'Parameter --enable-position-stacking detected ...',
'Reusing result of previous backtest for StrategyTestV2',
'Running backtesting for Strategy TestStrategyLegacyV1',
] ]
for line in exists: for line in exists:
assert log_has(line, caplog) assert log_has(line, caplog)
if cache == 'none':
assert backtestmock.call_count == 2
exists = [
'Running backtesting for Strategy StrategyTestV2',
'Running backtesting for Strategy TestStrategyLegacyV1',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
]
elif run_id == '2' and min_backtest_date < start_time:
assert backtestmock.call_count == 0
exists = [
'Reusing result of previous backtest for StrategyTestV2',
'Reusing result of previous backtest for TestStrategyLegacyV1',
]
else:
exists = [
'Reusing result of previous backtest for StrategyTestV2',
'Running backtesting for Strategy TestStrategyLegacyV1',
'Ignoring max_open_trades (--disable-max-market-positions was used) ...',
'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:58:00 (0 days).',
]
assert backtestmock.call_count == 1
for line in exists:
assert log_has(line, caplog)

View File

@ -10,7 +10,7 @@ import rapidjson
from freqtrade.constants import FTHYPT_FILEVERSION from freqtrade.constants import FTHYPT_FILEVERSION
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.optimize.hyperopt_tools import HyperoptTools, hyperopt_serializer from freqtrade.optimize.hyperopt_tools import HyperoptTools, hyperopt_serializer
from tests.conftest import log_has from tests.conftest import log_has, log_has_re
# Functions for recurrent object patching # Functions for recurrent object patching
@ -24,6 +24,7 @@ def test_save_results_saves_epochs(hyperopt, tmpdir, caplog) -> None:
hyperopt.results_file = Path(tmpdir / 'ut_results.fthypt') hyperopt.results_file = Path(tmpdir / 'ut_results.fthypt')
hyperopt_epochs = HyperoptTools.load_filtered_results(hyperopt.results_file, {}) hyperopt_epochs = HyperoptTools.load_filtered_results(hyperopt.results_file, {})
assert log_has_re("Hyperopt file .* not found.", caplog)
assert hyperopt_epochs == ([], 0) assert hyperopt_epochs == ([], 0)
# Test writing to temp dir and reading again # Test writing to temp dir and reading again

View File

@ -220,11 +220,17 @@ def test_rpc_status_table(default_conf, ticker, fee, mocker) -> None:
result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD')
assert "Since" in headers assert "Since" in headers
assert "Pair" in headers assert "Pair" in headers
assert len(result[0]) == 4
assert 'instantly' == result[0][2] assert 'instantly' == result[0][2]
assert 'ETH/BTC' in result[0][1] assert 'ETH/BTC' in result[0][1]
assert '-0.41% (-0.06)' == result[0][3] assert '-0.41% (-0.06)' == result[0][3]
assert '-0.06' == f'{fiat_profit_sum:.2f}' assert '-0.06' == f'{fiat_profit_sum:.2f}'
rpc._config['position_adjustment_enable'] = True
result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD')
assert "# Buys" in headers
assert len(result[0]) == 5
mocker.patch('freqtrade.exchange.Exchange.get_rate', mocker.patch('freqtrade.exchange.Exchange.get_rate',
MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available"))) MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available")))
result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD')

View File

@ -1326,7 +1326,7 @@ def test_sysinfo(botclient):
assert 'ram_pct' in result assert 'ram_pct' in result
def test_api_backtesting(botclient, mocker, fee, caplog): def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir):
ftbot, client = botclient ftbot, client = botclient
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee) mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
@ -1347,6 +1347,11 @@ def test_api_backtesting(botclient, mocker, fee, caplog):
assert result['status'] == 'reset' assert result['status'] == 'reset'
assert not result['running'] assert not result['running']
assert result['status_msg'] == 'Backtest reset' assert result['status_msg'] == 'Backtest reset'
ftbot.config['export'] = 'trades'
ftbot.config['backtest_cache'] = 'none'
ftbot.config['user_data_dir'] = Path(tmpdir)
ftbot.config['exportfilename'] = Path(tmpdir) / "backtest_results"
ftbot.config['exportfilename'].mkdir()
# start backtesting # start backtesting
data = { data = {
@ -1421,6 +1426,14 @@ def test_api_backtesting(botclient, mocker, fee, caplog):
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data)) rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
assert log_has("Backtesting caused an error: ", caplog) assert log_has("Backtesting caused an error: ", caplog)
ftbot.config['backtest_cache'] = 'day'
# Rerun backtest (should get previous result)
rc = client_post(client, f"{BASE_URI}/backtest", data=json.dumps(data))
assert_response(rc)
result = rc.json()
assert log_has_re('Reusing result of previous backtest.*', caplog)
# Delete backtesting to avoid leakage since the backtest-object may stick around. # Delete backtesting to avoid leakage since the backtest-object may stick around.
rc = client_delete(client, f"{BASE_URI}/backtest") rc = client_delete(client, f"{BASE_URI}/backtest")
assert_response(rc) assert_response(rc)

View File

@ -171,7 +171,7 @@ def test_plot_trades(testdatadir, caplog):
assert len(trades) == len(trade_buy.x) assert len(trades) == len(trade_buy.x)
assert trade_buy.marker.color == 'cyan' assert trade_buy.marker.color == 'cyan'
assert trade_buy.marker.symbol == 'circle-open' assert trade_buy.marker.symbol == 'circle-open'
assert trade_buy.text[0] == '3.99%, roi, 15 min' assert trade_buy.text[0] == '3.99%, buy_tag, roi, 15 min'
trade_sell = find_trace_in_fig_data(figure.data, 'Sell - Profit') trade_sell = find_trace_in_fig_data(figure.data, 'Sell - Profit')
assert isinstance(trade_sell, go.Scatter) assert isinstance(trade_sell, go.Scatter)
@ -179,7 +179,7 @@ def test_plot_trades(testdatadir, caplog):
assert len(trades.loc[trades['profit_ratio'] > 0]) == len(trade_sell.x) assert len(trades.loc[trades['profit_ratio'] > 0]) == len(trade_sell.x)
assert trade_sell.marker.color == 'green' assert trade_sell.marker.color == 'green'
assert trade_sell.marker.symbol == 'square-open' assert trade_sell.marker.symbol == 'square-open'
assert trade_sell.text[0] == '3.99%, roi, 15 min' assert trade_sell.text[0] == '3.99%, buy_tag, roi, 15 min'
trade_sell_loss = find_trace_in_fig_data(figure.data, 'Sell - Loss') trade_sell_loss = find_trace_in_fig_data(figure.data, 'Sell - Loss')
assert isinstance(trade_sell_loss, go.Scatter) assert isinstance(trade_sell_loss, go.Scatter)

File diff suppressed because one or more lines are too long