Extract backtest 1 candle from main function

This commit is contained in:
Matthias 2022-10-16 17:12:44 +02:00
parent 0e8cf366f5
commit 8534dfb0d4
1 changed files with 73 additions and 60 deletions

View File

@ -1067,54 +1067,15 @@ class Backtesting:
return None
return row
def backtest(self, processed: Dict, # noqa: max-complexity: 13
start_date: datetime, end_date: datetime,
max_open_trades: int = 0, position_stacking: bool = False,
enable_protections: bool = False) -> Dict[str, Any]:
def backtest_loop(
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
max_open_trades: int, position_stacking: bool, enable_protections: bool,
open_trade_count_start: int) -> int:
"""
Implement backtesting functionality
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
Of course try to not have ugly code. By some accessor are sometime slower than functions.
Avoid extensive logging in this method and functions it calls.
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
optimize memory usage!
:param start_date: backtesting timerange start datetime
:param end_date: backtesting timerange end datetime
:param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited
:param position_stacking: do we allow position stacking?
:param enable_protections: Should protections be enabled?
:return: DataFrame with trades (results of backtesting)
Backtesting processing for one candle.
"""
self.prepare_backtest(enable_protections)
# Ensure wallets are uptodate (important for --strategy-list)
self.wallets.update()
# Use dict of lists with data for performance
# (looping lists is a lot faster than pandas DataFrames)
data: Dict = self._get_ohlcv_as_lists(processed)
# Indexes per pair, so some pairs are allowed to have a missing start.
indexes: Dict = defaultdict(int)
current_time = start_date + timedelta(minutes=self.timeframe_min)
self.progress.init_step(BacktestState.BACKTEST, int(
(end_date - start_date) / timedelta(minutes=self.timeframe_min)))
# Loop timerange and get candle for each pair at that point in time
while current_time <= end_date:
open_trade_count_start = LocalTrade.bt_open_open_trade_count
self.check_abort()
for i, pair in enumerate(data):
row_index = indexes[pair]
row = self.validate_row(data, pair, row_index, current_time)
if not row:
continue
row_index += 1
indexes[pair] = row_index
self.dataprovider._set_dataframe_max_index(row_index)
for t in list(LocalTrade.bt_trades_open_pp[pair]):
# 1. Manage currently open orders of active trades
if self.manage_open_orders(t, current_time, row):
@ -1173,8 +1134,60 @@ class Backtesting:
# logger.debug(f"{pair} - Backtesting exit {trade}")
LocalTrade.close_bt_trade(trade)
self.wallets.update()
self.run_protections(
enable_protections, pair, current_time, trade.trade_direction)
self.run_protections(enable_protections, pair, current_time, trade.trade_direction)
return open_trade_count_start
def backtest(self, processed: Dict, # noqa: max-complexity: 13
start_date: datetime, end_date: datetime,
max_open_trades: int = 0, position_stacking: bool = False,
enable_protections: bool = False) -> Dict[str, Any]:
"""
Implement backtesting functionality
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
Of course try to not have ugly code. By some accessor are sometime slower than functions.
Avoid extensive logging in this method and functions it calls.
:param processed: a processed dictionary with format {pair, data}, which gets cleared to
optimize memory usage!
:param start_date: backtesting timerange start datetime
:param end_date: backtesting timerange end datetime
:param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited
:param position_stacking: do we allow position stacking?
:param enable_protections: Should protections be enabled?
:return: DataFrame with trades (results of backtesting)
"""
self.prepare_backtest(enable_protections)
# Ensure wallets are uptodate (important for --strategy-list)
self.wallets.update()
# Use dict of lists with data for performance
# (looping lists is a lot faster than pandas DataFrames)
data: Dict = self._get_ohlcv_as_lists(processed)
# Indexes per pair, so some pairs are allowed to have a missing start.
indexes: Dict = defaultdict(int)
current_time = start_date + timedelta(minutes=self.timeframe_min)
self.progress.init_step(BacktestState.BACKTEST, int(
(end_date - start_date) / timedelta(minutes=self.timeframe_min)))
# Loop timerange and get candle for each pair at that point in time
while current_time <= end_date:
open_trade_count_start = LocalTrade.bt_open_open_trade_count
self.check_abort()
for i, pair in enumerate(data):
row_index = indexes[pair]
row = self.validate_row(data, pair, row_index, current_time)
if not row:
continue
row_index += 1
indexes[pair] = row_index
self.dataprovider._set_dataframe_max_index(row_index)
open_trade_count_start = self.backtest_loop(
row, pair, current_time, end_date, max_open_trades,
position_stacking, enable_protections, open_trade_count_start)
# Move time one configured time_interval ahead.
self.progress.increment()