Merge branch 'develop' into pr/rextea/4606
This commit is contained in:
@@ -11,22 +11,24 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange, remove_credentials, validate_config_consistency
|
||||
from freqtrade.configuration import TimeRange, validate_config_consistency
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||
from freqtrade.data import history
|
||||
from freqtrade.data.btanalysis import trade_list_to_dataframe
|
||||
from freqtrade.data.converter import trim_dataframe
|
||||
from freqtrade.data.converter import trim_dataframe, trim_dataframes
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.enums import BacktestState, SellType
|
||||
from freqtrade.exceptions import DependencyException, OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
|
||||
from freqtrade.mixins import LoggingMixin
|
||||
from freqtrade.optimize.bt_progress import BTProgress
|
||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results,
|
||||
store_backtest_stats)
|
||||
from freqtrade.persistence import LocalTrade, PairLocks, Trade
|
||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||
from freqtrade.plugins.protectionmanager import ProtectionManager
|
||||
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
||||
from freqtrade.strategy.interface import IStrategy, SellCheckTuple, SellType
|
||||
from freqtrade.strategy.interface import IStrategy, SellCheckTuple
|
||||
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
||||
from freqtrade.wallets import Wallets
|
||||
|
||||
@@ -41,6 +43,7 @@ CLOSE_IDX = 3
|
||||
SELL_IDX = 4
|
||||
LOW_IDX = 5
|
||||
HIGH_IDX = 6
|
||||
BUY_TAG_IDX = 7
|
||||
|
||||
|
||||
class Backtesting:
|
||||
@@ -56,16 +59,14 @@ class Backtesting:
|
||||
|
||||
LoggingMixin.show_output = False
|
||||
self.config = config
|
||||
self.results: Optional[Dict[str, Any]] = None
|
||||
|
||||
# Reset keys for backtesting
|
||||
remove_credentials(self.config)
|
||||
config['dry_run'] = True
|
||||
self.strategylist: List[IStrategy] = []
|
||||
self.all_results: Dict[str, Dict] = {}
|
||||
|
||||
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config)
|
||||
|
||||
dataprovider = DataProvider(self.config, self.exchange)
|
||||
IStrategy.dp = dataprovider
|
||||
self.dataprovider = DataProvider(self.config, None)
|
||||
|
||||
if self.config.get('strategy_list', None):
|
||||
for strat in list(self.config['strategy_list']):
|
||||
@@ -84,7 +85,7 @@ class Backtesting:
|
||||
"configuration or as cli argument `--timeframe 5m`")
|
||||
self.timeframe = str(self.config.get('timeframe'))
|
||||
self.timeframe_min = timeframe_to_minutes(self.timeframe)
|
||||
|
||||
self.init_backtest_detail()
|
||||
self.pairlists = PairListManager(self.exchange, self.config)
|
||||
if 'VolumePairList' in self.pairlists.name_list:
|
||||
raise OperationalException("VolumePairList not allowed for backtesting.")
|
||||
@@ -96,7 +97,7 @@ class Backtesting:
|
||||
"PrecisionFilter not allowed for backtesting multiple strategies."
|
||||
)
|
||||
|
||||
dataprovider.add_pairlisthandler(self.pairlists)
|
||||
self.dataprovider.add_pairlisthandler(self.pairlists)
|
||||
self.pairlists.refresh_pairlist()
|
||||
|
||||
if len(self.pairlists.whitelist) == 0:
|
||||
@@ -107,49 +108,79 @@ class Backtesting:
|
||||
else:
|
||||
self.fee = self.exchange.get_fee(symbol=self.pairlists.whitelist[0])
|
||||
|
||||
Trade.use_db = False
|
||||
Trade.reset_trades()
|
||||
PairLocks.timeframe = self.config['timeframe']
|
||||
PairLocks.use_db = False
|
||||
PairLocks.reset_locks()
|
||||
if self.config.get('enable_protections', False):
|
||||
self.protections = ProtectionManager(self.config)
|
||||
|
||||
self.wallets = Wallets(self.config, self.exchange, log=False)
|
||||
self.timerange = TimeRange.parse_timerange(
|
||||
None if self.config.get('timerange') is None else str(self.config.get('timerange')))
|
||||
|
||||
# Get maximum required startup period
|
||||
self.required_startup = max([strat.startup_candle_count for strat in self.strategylist])
|
||||
# Load one (first) strategy
|
||||
self._set_strategy(self.strategylist[0])
|
||||
# Add maximum startup candle count to configuration for informative pairs support
|
||||
self.config['startup_candle_count'] = self.required_startup
|
||||
self.exchange.validate_required_startup_candles(self.required_startup, self.timeframe)
|
||||
self.init_backtest()
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
def cleanup(self):
|
||||
LoggingMixin.show_output = True
|
||||
PairLocks.use_db = True
|
||||
Trade.use_db = True
|
||||
|
||||
def init_backtest_detail(self):
|
||||
# Load detail timeframe if specified
|
||||
self.timeframe_detail = str(self.config.get('timeframe_detail', ''))
|
||||
if self.timeframe_detail:
|
||||
self.timeframe_detail_min = timeframe_to_minutes(self.timeframe_detail)
|
||||
if self.timeframe_min <= self.timeframe_detail_min:
|
||||
raise OperationalException(
|
||||
"Detail timeframe must be smaller than strategy timeframe.")
|
||||
|
||||
else:
|
||||
self.timeframe_detail_min = 0
|
||||
self.detail_data: Dict[str, DataFrame] = {}
|
||||
|
||||
def init_backtest(self):
|
||||
|
||||
self.prepare_backtest(False)
|
||||
|
||||
self.wallets = Wallets(self.config, self.exchange, log=False)
|
||||
|
||||
self.progress = BTProgress()
|
||||
self.abort = False
|
||||
|
||||
def _set_strategy(self, strategy: IStrategy):
|
||||
"""
|
||||
Load strategy into backtesting
|
||||
"""
|
||||
self.strategy: IStrategy = strategy
|
||||
strategy.dp = self.dataprovider
|
||||
# Attach Wallets to Strategy baseclass
|
||||
strategy.wallets = self.wallets
|
||||
# Set stoploss_on_exchange to false for backtesting,
|
||||
# since a "perfect" stoploss-sell is assumed anyway
|
||||
# And the regular "stoploss" function would not apply to that case
|
||||
self.strategy.order_types['stoploss_on_exchange'] = False
|
||||
|
||||
def _load_protections(self, strategy: IStrategy):
|
||||
if self.config.get('enable_protections', False):
|
||||
conf = self.config
|
||||
if hasattr(strategy, 'protections'):
|
||||
conf = deepcopy(conf)
|
||||
conf['protections'] = strategy.protections
|
||||
self.protections = ProtectionManager(self.config, strategy.protections)
|
||||
|
||||
def load_bt_data(self) -> Tuple[Dict[str, DataFrame], TimeRange]:
|
||||
"""
|
||||
Loads backtest data and returns the data combined with the timerange
|
||||
as tuple.
|
||||
"""
|
||||
timerange = TimeRange.parse_timerange(None if self.config.get(
|
||||
'timerange') is None else str(self.config.get('timerange')))
|
||||
self.progress.init_step(BacktestState.DATALOAD, 1)
|
||||
|
||||
data = history.load_data(
|
||||
datadir=self.config['datadir'],
|
||||
pairs=self.pairlists.whitelist,
|
||||
timeframe=self.timeframe,
|
||||
timerange=timerange,
|
||||
timerange=self.timerange,
|
||||
startup_candles=self.required_startup,
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
@@ -159,13 +190,31 @@ class Backtesting:
|
||||
|
||||
logger.info(f'Loading data from {min_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'({(max_date - min_date).days} days)..')
|
||||
f'({(max_date - min_date).days} days).')
|
||||
|
||||
# Adjust startts forward if not enough data is available
|
||||
timerange.adjust_start_if_necessary(timeframe_to_seconds(self.timeframe),
|
||||
self.required_startup, min_date)
|
||||
self.timerange.adjust_start_if_necessary(timeframe_to_seconds(self.timeframe),
|
||||
self.required_startup, min_date)
|
||||
|
||||
return data, timerange
|
||||
self.progress.set_new_value(1)
|
||||
return data, self.timerange
|
||||
|
||||
def load_bt_data_detail(self) -> None:
|
||||
"""
|
||||
Loads backtest detail data (smaller timeframe) if necessary.
|
||||
"""
|
||||
if self.timeframe_detail:
|
||||
self.detail_data = history.load_data(
|
||||
datadir=self.config['datadir'],
|
||||
pairs=self.pairlists.whitelist,
|
||||
timeframe=self.timeframe_detail,
|
||||
timerange=self.timerange,
|
||||
startup_candles=0,
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
)
|
||||
else:
|
||||
self.detail_data = {}
|
||||
|
||||
def prepare_backtest(self, enable_protections):
|
||||
"""
|
||||
@@ -176,6 +225,19 @@ class Backtesting:
|
||||
Trade.use_db = False
|
||||
PairLocks.reset_locks()
|
||||
Trade.reset_trades()
|
||||
self.rejected_trades = 0
|
||||
self.dataprovider.clear_cache()
|
||||
if enable_protections:
|
||||
self._load_protections(self.strategy)
|
||||
|
||||
def check_abort(self):
|
||||
"""
|
||||
Check if abort was requested, raise DependencyException if that's the case
|
||||
Only applies to Interactive backtest mode (webserver mode)
|
||||
"""
|
||||
if self.abort:
|
||||
self.abort = False
|
||||
raise DependencyException("Stop requested")
|
||||
|
||||
def _get_ohlcv_as_lists(self, processed: Dict[str, DataFrame]) -> Dict[str, Tuple]:
|
||||
"""
|
||||
@@ -185,26 +247,38 @@ class Backtesting:
|
||||
"""
|
||||
# Every change to this headers list must evaluate further usages of the resulting tuple
|
||||
# and eventually change the constants for indexes at the top
|
||||
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high']
|
||||
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high', 'buy_tag']
|
||||
data: Dict = {}
|
||||
self.progress.init_step(BacktestState.CONVERT, len(processed))
|
||||
|
||||
# Create dict with data
|
||||
for pair, pair_data in processed.items():
|
||||
pair_data.loc[:, 'buy'] = 0 # cleanup from previous run
|
||||
pair_data.loc[:, 'sell'] = 0 # cleanup from previous run
|
||||
self.check_abort()
|
||||
self.progress.increment()
|
||||
if not pair_data.empty:
|
||||
pair_data.loc[:, 'buy'] = 0 # cleanup if buy_signal is exist
|
||||
pair_data.loc[:, 'sell'] = 0 # cleanup if sell_signal is exist
|
||||
pair_data.loc[:, 'buy_tag'] = None # cleanup if buy_tag is exist
|
||||
|
||||
df_analyzed = self.strategy.advise_sell(
|
||||
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair})[headers].copy()
|
||||
|
||||
self.strategy.advise_buy(pair_data, {'pair': pair}), {'pair': pair}).copy()
|
||||
# Trim startup period from analyzed dataframe
|
||||
df_analyzed = trim_dataframe(df_analyzed, self.timerange,
|
||||
startup_candles=self.required_startup)
|
||||
# To avoid using data from future, we use buy/sell signals shifted
|
||||
# from the previous candle
|
||||
df_analyzed.loc[:, 'buy'] = df_analyzed.loc[:, 'buy'].shift(1)
|
||||
df_analyzed.loc[:, 'sell'] = df_analyzed.loc[:, 'sell'].shift(1)
|
||||
df_analyzed.loc[:, 'buy_tag'] = df_analyzed.loc[:, 'buy_tag'].shift(1)
|
||||
|
||||
df_analyzed.drop(df_analyzed.head(1).index, inplace=True)
|
||||
# Update dataprovider cache
|
||||
self.dataprovider._set_cached_df(pair, self.timeframe, df_analyzed)
|
||||
|
||||
df_analyzed = df_analyzed.drop(df_analyzed.head(1).index)
|
||||
|
||||
# Convert from Pandas to list for performance reasons
|
||||
# (Looping Pandas is slow.)
|
||||
data[pair] = df_analyzed.values.tolist()
|
||||
data[pair] = df_analyzed[headers].values.tolist()
|
||||
return data
|
||||
|
||||
def _get_close_rate(self, sell_row: Tuple, trade: LocalTrade, sell: SellCheckTuple,
|
||||
@@ -214,6 +288,32 @@ class Backtesting:
|
||||
"""
|
||||
# Special handling if high or low hit STOP_LOSS or ROI
|
||||
if sell.sell_type in (SellType.STOP_LOSS, SellType.TRAILING_STOP_LOSS):
|
||||
if trade.stop_loss > sell_row[HIGH_IDX]:
|
||||
# our stoploss was already higher than candle high,
|
||||
# possibly due to a cancelled trade exit.
|
||||
# sell at open price.
|
||||
return sell_row[OPEN_IDX]
|
||||
|
||||
# Special case: trailing triggers within same candle as trade opened. Assume most
|
||||
# pessimistic price movement, which is moving just enough to arm stoploss and
|
||||
# immediately going down to stop price.
|
||||
if sell.sell_type == SellType.TRAILING_STOP_LOSS and trade_dur == 0:
|
||||
if (
|
||||
not self.strategy.use_custom_stoploss and self.strategy.trailing_stop
|
||||
and self.strategy.trailing_only_offset_is_reached
|
||||
and self.strategy.trailing_stop_positive_offset is not None
|
||||
and self.strategy.trailing_stop_positive
|
||||
):
|
||||
# Worst case: price reaches stop_positive_offset and dives down.
|
||||
stop_rate = (sell_row[OPEN_IDX] *
|
||||
(1 + abs(self.strategy.trailing_stop_positive_offset) -
|
||||
abs(self.strategy.trailing_stop_positive)))
|
||||
else:
|
||||
# Worst case: price ticks tiny bit above open and dives down.
|
||||
stop_rate = sell_row[OPEN_IDX] * (1 - abs(trade.stop_loss_pct))
|
||||
assert stop_rate < sell_row[HIGH_IDX]
|
||||
return stop_rate
|
||||
|
||||
# Set close_rate to stoploss
|
||||
return trade.stop_loss
|
||||
elif sell.sell_type == (SellType.ROI):
|
||||
@@ -239,7 +339,7 @@ class Backtesting:
|
||||
# Use the maximum between close_rate and low as we
|
||||
# cannot sell outside of a candle.
|
||||
# Applies when a new ROI setting comes in place and the whole candle is above that.
|
||||
return max(close_rate, sell_row[LOW_IDX])
|
||||
return min(max(close_rate, sell_row[LOW_IDX]), sell_row[HIGH_IDX])
|
||||
|
||||
else:
|
||||
# This should not be reached...
|
||||
@@ -247,41 +347,100 @@ class Backtesting:
|
||||
else:
|
||||
return sell_row[OPEN_IDX]
|
||||
|
||||
def _get_sell_trade_entry(self, trade: LocalTrade, sell_row: Tuple) -> Optional[LocalTrade]:
|
||||
|
||||
def _get_sell_trade_entry_for_candle(self, trade: LocalTrade,
|
||||
sell_row: Tuple) -> Optional[LocalTrade]:
|
||||
sell_candle_time = sell_row[DATE_IDX].to_pydatetime()
|
||||
sell = self.strategy.should_sell(trade, sell_row[OPEN_IDX], # type: ignore
|
||||
sell_row[DATE_IDX], sell_row[BUY_IDX], sell_row[SELL_IDX],
|
||||
sell_candle_time, sell_row[BUY_IDX],
|
||||
sell_row[SELL_IDX],
|
||||
low=sell_row[LOW_IDX], high=sell_row[HIGH_IDX])
|
||||
if sell.sell_flag:
|
||||
|
||||
trade.close_date = sell_row[DATE_IDX]
|
||||
trade.sell_reason = sell.sell_type.value
|
||||
if sell.sell_flag:
|
||||
trade.close_date = sell_candle_time
|
||||
trade.sell_reason = sell.sell_reason
|
||||
trade_dur = int((trade.close_date_utc - trade.open_date_utc).total_seconds() // 60)
|
||||
closerate = self._get_close_rate(sell_row, trade, sell, trade_dur)
|
||||
|
||||
# Confirm trade exit:
|
||||
time_in_force = self.strategy.order_time_in_force['sell']
|
||||
if not strategy_safe_wrapper(self.strategy.confirm_trade_exit, default_retval=True)(
|
||||
pair=trade.pair, trade=trade, order_type='limit', amount=trade.amount,
|
||||
rate=closerate,
|
||||
time_in_force=time_in_force,
|
||||
sell_reason=sell.sell_reason,
|
||||
current_time=sell_candle_time):
|
||||
return None
|
||||
|
||||
trade.close(closerate, show_msg=False)
|
||||
return trade
|
||||
|
||||
return None
|
||||
|
||||
def _enter_trade(self, pair: str, row: List, max_open_trades: int,
|
||||
open_trade_count: int) -> Optional[LocalTrade]:
|
||||
def _get_sell_trade_entry(self, trade: LocalTrade, sell_row: Tuple) -> Optional[LocalTrade]:
|
||||
if self.timeframe_detail and trade.pair in self.detail_data:
|
||||
sell_candle_time = sell_row[DATE_IDX].to_pydatetime()
|
||||
sell_candle_end = sell_candle_time + timedelta(minutes=self.timeframe_min)
|
||||
|
||||
detail_data = self.detail_data[trade.pair]
|
||||
detail_data = detail_data.loc[
|
||||
(detail_data['date'] >= sell_candle_time) &
|
||||
(detail_data['date'] < sell_candle_end)
|
||||
].copy()
|
||||
if len(detail_data) == 0:
|
||||
# Fall back to "regular" data if no detail data was found for this candle
|
||||
return self._get_sell_trade_entry_for_candle(trade, sell_row)
|
||||
detail_data.loc[:, 'buy'] = sell_row[BUY_IDX]
|
||||
detail_data.loc[:, 'sell'] = sell_row[SELL_IDX]
|
||||
headers = ['date', 'buy', 'open', 'close', 'sell', 'low', 'high']
|
||||
for det_row in detail_data[headers].values.tolist():
|
||||
res = self._get_sell_trade_entry_for_candle(trade, det_row)
|
||||
if res:
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
else:
|
||||
return self._get_sell_trade_entry_for_candle(trade, sell_row)
|
||||
|
||||
def _enter_trade(self, pair: str, row: List) -> Optional[LocalTrade]:
|
||||
try:
|
||||
stake_amount = self.wallets.get_trade_stake_amount(
|
||||
pair, max_open_trades - open_trade_count, None)
|
||||
stake_amount = self.wallets.get_trade_stake_amount(pair, None)
|
||||
except DependencyException:
|
||||
return None
|
||||
min_stake_amount = self.exchange.get_min_pair_stake_amount(pair, row[OPEN_IDX], -0.05)
|
||||
|
||||
min_stake_amount = self.exchange.get_min_pair_stake_amount(pair, row[OPEN_IDX], -0.05) or 0
|
||||
max_stake_amount = self.wallets.get_available_stake_amount()
|
||||
|
||||
stake_amount = strategy_safe_wrapper(self.strategy.custom_stake_amount,
|
||||
default_retval=stake_amount)(
|
||||
pair=pair, current_time=row[DATE_IDX].to_pydatetime(), current_rate=row[OPEN_IDX],
|
||||
proposed_stake=stake_amount, min_stake=min_stake_amount, max_stake=max_stake_amount)
|
||||
stake_amount = self.wallets._validate_stake_amount(pair, stake_amount, min_stake_amount)
|
||||
|
||||
if not stake_amount:
|
||||
return None
|
||||
|
||||
order_type = self.strategy.order_types['buy']
|
||||
time_in_force = self.strategy.order_time_in_force['sell']
|
||||
# Confirm trade entry:
|
||||
if not strategy_safe_wrapper(self.strategy.confirm_trade_entry, default_retval=True)(
|
||||
pair=pair, order_type=order_type, amount=stake_amount, rate=row[OPEN_IDX],
|
||||
time_in_force=time_in_force, current_time=row[DATE_IDX].to_pydatetime()):
|
||||
return None
|
||||
|
||||
if stake_amount and (not min_stake_amount or stake_amount > min_stake_amount):
|
||||
# Enter trade
|
||||
has_buy_tag = len(row) >= BUY_TAG_IDX + 1
|
||||
trade = LocalTrade(
|
||||
pair=pair,
|
||||
open_rate=row[OPEN_IDX],
|
||||
open_date=row[DATE_IDX],
|
||||
open_date=row[DATE_IDX].to_pydatetime(),
|
||||
stake_amount=stake_amount,
|
||||
amount=round(stake_amount / row[OPEN_IDX], 8),
|
||||
fee_open=self.fee,
|
||||
fee_close=self.fee,
|
||||
is_open=True,
|
||||
buy_tag=row[BUY_TAG_IDX] if has_buy_tag else None,
|
||||
exchange='backtesting',
|
||||
)
|
||||
return trade
|
||||
@@ -298,7 +457,7 @@ class Backtesting:
|
||||
for trade in open_trades[pair]:
|
||||
sell_row = data[pair][-1]
|
||||
|
||||
trade.close_date = sell_row[DATE_IDX]
|
||||
trade.close_date = sell_row[DATE_IDX].to_pydatetime()
|
||||
trade.sell_reason = SellType.FORCE_SELL.value
|
||||
trade.close(sell_row[OPEN_IDX], show_msg=False)
|
||||
LocalTrade.close_bt_trade(trade)
|
||||
@@ -308,10 +467,18 @@ class Backtesting:
|
||||
trades.append(trade1)
|
||||
return trades
|
||||
|
||||
def trade_slot_available(self, max_open_trades: int, open_trade_count: int) -> bool:
|
||||
# Always allow trades when max_open_trades is enabled.
|
||||
if max_open_trades <= 0 or open_trade_count < max_open_trades:
|
||||
return True
|
||||
# Rejected trade
|
||||
self.rejected_trades += 1
|
||||
return False
|
||||
|
||||
def backtest(self, processed: Dict,
|
||||
start_date: datetime, end_date: datetime,
|
||||
max_open_trades: int = 0, position_stacking: bool = False,
|
||||
enable_protections: bool = False) -> DataFrame:
|
||||
enable_protections: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
Implement backtesting functionality
|
||||
|
||||
@@ -335,22 +502,25 @@ class Backtesting:
|
||||
data: Dict = self._get_ohlcv_as_lists(processed)
|
||||
|
||||
# Indexes per pair, so some pairs are allowed to have a missing start.
|
||||
indexes: Dict = {}
|
||||
indexes: Dict = defaultdict(int)
|
||||
tmp = start_date + timedelta(minutes=self.timeframe_min)
|
||||
|
||||
open_trades: Dict[str, List[LocalTrade]] = defaultdict(list)
|
||||
open_trade_count = 0
|
||||
|
||||
self.progress.init_step(BacktestState.BACKTEST, int(
|
||||
(end_date - start_date) / timedelta(minutes=self.timeframe_min)))
|
||||
|
||||
# Loop timerange and get candle for each pair at that point in time
|
||||
while tmp <= end_date:
|
||||
open_trade_count_start = open_trade_count
|
||||
|
||||
self.check_abort()
|
||||
for i, pair in enumerate(data):
|
||||
if pair not in indexes:
|
||||
indexes[pair] = 0
|
||||
|
||||
row_index = indexes[pair]
|
||||
try:
|
||||
row = data[pair][indexes[pair]]
|
||||
# Row is treated as "current incomplete candle".
|
||||
# Buy / sell signals are shifted by 1 to compensate for this.
|
||||
row = data[pair][row_index]
|
||||
except IndexError:
|
||||
# missing Data for one pair at the end.
|
||||
# Warnings for this are shown during data loading
|
||||
@@ -359,17 +529,23 @@ class Backtesting:
|
||||
# Waits until the time-counter reaches the start of the data for this pair.
|
||||
if row[DATE_IDX] > tmp:
|
||||
continue
|
||||
indexes[pair] += 1
|
||||
|
||||
row_index += 1
|
||||
indexes[pair] = row_index
|
||||
self.dataprovider._set_dataframe_max_index(row_index)
|
||||
|
||||
# without positionstacking, we can only have one open trade per pair.
|
||||
# max_open_trades must be respected
|
||||
# don't open on the last row
|
||||
if ((position_stacking or len(open_trades[pair]) == 0)
|
||||
and (max_open_trades <= 0 or open_trade_count_start < max_open_trades)
|
||||
and tmp != end_date
|
||||
and row[BUY_IDX] == 1 and row[SELL_IDX] != 1
|
||||
and not PairLocks.is_pair_locked(pair, row[DATE_IDX])):
|
||||
trade = self._enter_trade(pair, row, max_open_trades, open_trade_count_start)
|
||||
if (
|
||||
(position_stacking or len(open_trades[pair]) == 0)
|
||||
and self.trade_slot_available(max_open_trades, open_trade_count_start)
|
||||
and tmp != end_date
|
||||
and row[BUY_IDX] == 1
|
||||
and row[SELL_IDX] != 1
|
||||
and not PairLocks.is_pair_locked(pair, row[DATE_IDX])
|
||||
):
|
||||
trade = self._enter_trade(pair, row)
|
||||
if trade:
|
||||
# TODO: hacky workaround to avoid opening > max_open_trades
|
||||
# This emulates previous behaviour - not sure if this is correct
|
||||
@@ -380,10 +556,10 @@ class Backtesting:
|
||||
open_trades[pair].append(trade)
|
||||
LocalTrade.add_bt_trade(trade)
|
||||
|
||||
for trade in open_trades[pair]:
|
||||
for trade in list(open_trades[pair]):
|
||||
# also check the buying candle for sell conditions.
|
||||
trade_entry = self._get_sell_trade_entry(trade, row)
|
||||
# Sell occured
|
||||
# Sell occurred
|
||||
if trade_entry:
|
||||
# logger.debug(f"{pair} - Backtesting sell {trade}")
|
||||
open_trade_count -= 1
|
||||
@@ -396,14 +572,25 @@ class Backtesting:
|
||||
self.protections.global_stop(tmp)
|
||||
|
||||
# Move time one configured time_interval ahead.
|
||||
self.progress.increment()
|
||||
tmp += timedelta(minutes=self.timeframe_min)
|
||||
|
||||
trades += self.handle_left_open(open_trades, data=data)
|
||||
self.wallets.update()
|
||||
|
||||
return trade_list_to_dataframe(trades)
|
||||
results = trade_list_to_dataframe(trades)
|
||||
return {
|
||||
'results': results,
|
||||
'config': self.strategy.config,
|
||||
'locks': PairLocks.get_all_locks(),
|
||||
'rejected_signals': self.rejected_trades,
|
||||
'final_balance': self.wallets.get_total(self.strategy.config['stake_currency']),
|
||||
}
|
||||
|
||||
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame],
|
||||
timerange: TimeRange):
|
||||
self.progress.init_step(BacktestState.ANALYZE, 0)
|
||||
|
||||
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, Any], timerange: TimeRange):
|
||||
logger.info("Running backtesting for Strategy %s", strat.get_strategy_name())
|
||||
backtest_start_time = datetime.now(timezone.utc)
|
||||
self._set_strategy(strat)
|
||||
@@ -420,34 +607,37 @@ class Backtesting:
|
||||
max_open_trades = 0
|
||||
|
||||
# need to reprocess data every time to populate signals
|
||||
preprocessed = self.strategy.ohlcvdata_to_dataframe(data)
|
||||
preprocessed = self.strategy.advise_all_indicators(data)
|
||||
|
||||
# Trim startup period from analyzed dataframe
|
||||
for pair, df in preprocessed.items():
|
||||
preprocessed[pair] = trim_dataframe(df, timerange)
|
||||
min_date, max_date = history.get_timerange(preprocessed)
|
||||
preprocessed_tmp = trim_dataframes(preprocessed, timerange, self.required_startup)
|
||||
|
||||
if not preprocessed_tmp:
|
||||
raise OperationalException(
|
||||
"No data left after adjusting for startup candles.")
|
||||
|
||||
# Use preprocessed_tmp for date generation (the trimmed dataframe).
|
||||
# Backtesting will re-trim the dataframes after buy/sell signal generation.
|
||||
min_date, max_date = history.get_timerange(preprocessed_tmp)
|
||||
logger.info(f'Backtesting with data from {min_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'({(max_date - min_date).days} days)..')
|
||||
f'({(max_date - min_date).days} days).')
|
||||
# Execute backtest and store results
|
||||
results = self.backtest(
|
||||
processed=preprocessed,
|
||||
start_date=min_date.datetime,
|
||||
end_date=max_date.datetime,
|
||||
start_date=min_date,
|
||||
end_date=max_date,
|
||||
max_open_trades=max_open_trades,
|
||||
position_stacking=self.config.get('position_stacking', False),
|
||||
enable_protections=self.config.get('enable_protections', False),
|
||||
)
|
||||
backtest_end_time = datetime.now(timezone.utc)
|
||||
self.all_results[self.strategy.get_strategy_name()] = {
|
||||
'results': results,
|
||||
'config': self.strategy.config,
|
||||
'locks': PairLocks.get_all_locks(),
|
||||
'final_balance': self.wallets.get_total(self.strategy.config['stake_currency']),
|
||||
results.update({
|
||||
'backtest_start_time': int(backtest_start_time.timestamp()),
|
||||
'backtest_end_time': int(backtest_end_time.timestamp()),
|
||||
}
|
||||
})
|
||||
self.all_results[self.strategy.get_strategy_name()] = results
|
||||
|
||||
return min_date, max_date
|
||||
|
||||
def start(self) -> None:
|
||||
@@ -458,15 +648,18 @@ class Backtesting:
|
||||
data: Dict[str, Any] = {}
|
||||
|
||||
data, timerange = self.load_bt_data()
|
||||
self.load_bt_data_detail()
|
||||
logger.info("Dataload complete. Calculating indicators")
|
||||
|
||||
for strat in self.strategylist:
|
||||
min_date, max_date = self.backtest_one_strategy(strat, data, timerange)
|
||||
if len(self.strategylist) > 0:
|
||||
stats = generate_backtest_stats(data, self.all_results,
|
||||
min_date=min_date, max_date=max_date)
|
||||
|
||||
if self.config.get('export', False):
|
||||
store_backtest_stats(self.config['exportfilename'], stats)
|
||||
self.results = generate_backtest_stats(data, self.all_results,
|
||||
min_date=min_date, max_date=max_date)
|
||||
|
||||
if self.config.get('export', 'none') == 'trades':
|
||||
store_backtest_stats(self.config['exportfilename'], self.results)
|
||||
|
||||
# Show backtest results
|
||||
show_backtest_results(self.config, stats)
|
||||
show_backtest_results(self.config, self.results)
|
||||
|
33
freqtrade/optimize/bt_progress.py
Normal file
33
freqtrade/optimize/bt_progress.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from freqtrade.enums import BacktestState
|
||||
|
||||
|
||||
class BTProgress:
|
||||
_action: BacktestState = BacktestState.STARTUP
|
||||
_progress: float = 0
|
||||
_max_steps: float = 0
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def init_step(self, action: BacktestState, max_steps: float):
|
||||
self._action = action
|
||||
self._max_steps = max_steps
|
||||
self._proress = 0
|
||||
|
||||
def set_new_value(self, new_value: float):
|
||||
self._progress = new_value
|
||||
|
||||
def increment(self):
|
||||
self._progress += 1
|
||||
|
||||
@property
|
||||
def progress(self):
|
||||
"""
|
||||
Get progress as ratio, capped to be between 0 and 1 (to avoid small calculation errors).
|
||||
"""
|
||||
return max(min(round(self._progress / self._max_steps, 5)
|
||||
if self._max_steps > 0 else 0, 1), 0)
|
||||
|
||||
@property
|
||||
def action(self):
|
||||
return str(self._action)
|
@@ -7,7 +7,8 @@ import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from freqtrade import constants
|
||||
from freqtrade.configuration import TimeRange, remove_credentials, validate_config_consistency
|
||||
from freqtrade.configuration import TimeRange, validate_config_consistency
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.edge import Edge
|
||||
from freqtrade.optimize.optimize_reports import generate_edge_table
|
||||
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
||||
@@ -28,11 +29,12 @@ class EdgeCli:
|
||||
def __init__(self, config: Dict[str, Any]) -> None:
|
||||
self.config = config
|
||||
|
||||
# Reset keys for edge
|
||||
remove_credentials(self.config)
|
||||
# Ensure using dry-run
|
||||
self.config['dry_run'] = True
|
||||
self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
|
||||
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config)
|
||||
self.strategy = StrategyResolver.load_strategy(self.config)
|
||||
self.strategy.dp = DataProvider(config, None)
|
||||
|
||||
validate_config_consistency(self.config)
|
||||
|
||||
@@ -44,7 +46,7 @@ class EdgeCli:
|
||||
'timerange') is None else str(self.config.get('timerange')))
|
||||
|
||||
def start(self) -> None:
|
||||
result = self.edge.calculate()
|
||||
result = self.edge.calculate(self.config['exchange']['pair_whitelist'])
|
||||
if result:
|
||||
print('') # blank line for readability
|
||||
print(generate_edge_table(self.edge._cached_pairs))
|
||||
|
@@ -4,38 +4,34 @@
|
||||
This module contains the hyperopt logic
|
||||
"""
|
||||
|
||||
import io
|
||||
import locale
|
||||
import logging
|
||||
import random
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone
|
||||
from math import ceil
|
||||
from operator import itemgetter
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import progressbar
|
||||
import rapidjson
|
||||
import tabulate
|
||||
from colorama import Fore, Style
|
||||
from colorama import init as colorama_init
|
||||
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
|
||||
from pandas import DataFrame, isna, json_normalize
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN
|
||||
from freqtrade.data.converter import trim_dataframe
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN
|
||||
from freqtrade.data.converter import trim_dataframes
|
||||
from freqtrade.data.history import get_timerange
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.misc import file_dump_json, plural, round_dict
|
||||
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
|
||||
from freqtrade.optimize.backtesting import Backtesting
|
||||
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
|
||||
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
|
||||
from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F401
|
||||
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F401
|
||||
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver, HyperOptResolver
|
||||
from freqtrade.strategy import IStrategy
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptTools, hyperopt_serializer
|
||||
from freqtrade.optimize.optimize_reports import generate_strategy_stats
|
||||
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
|
||||
|
||||
|
||||
# Suppress scikit-learn FutureWarnings from skopt
|
||||
@@ -49,7 +45,7 @@ progressbar.streams.wrap_stdout()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
INITIAL_POINTS = 30
|
||||
INITIAL_POINTS = 5
|
||||
|
||||
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
|
||||
# in the skopt model queue, to optimize memory consumption
|
||||
@@ -66,22 +62,37 @@ class Hyperopt:
|
||||
hyperopt = Hyperopt(config)
|
||||
hyperopt.start()
|
||||
"""
|
||||
custom_hyperopt: IHyperOpt
|
||||
|
||||
def __init__(self, config: Dict[str, Any]) -> None:
|
||||
self.buy_space: List[Dimension] = []
|
||||
self.sell_space: List[Dimension] = []
|
||||
self.protection_space: List[Dimension] = []
|
||||
self.roi_space: List[Dimension] = []
|
||||
self.stoploss_space: List[Dimension] = []
|
||||
self.trailing_space: List[Dimension] = []
|
||||
self.dimensions: List[Dimension] = []
|
||||
|
||||
self.config = config
|
||||
|
||||
self.backtesting = Backtesting(self.config)
|
||||
|
||||
self.custom_hyperopt = HyperOptResolver.load_hyperopt(self.config)
|
||||
self.custom_hyperopt.__class__.strategy = self.backtesting.strategy
|
||||
if not self.config.get('hyperopt'):
|
||||
self.custom_hyperopt = HyperOptAuto(self.config)
|
||||
else:
|
||||
raise OperationalException(
|
||||
"Using separate Hyperopt files has been removed in 2021.9. Please convert "
|
||||
"your existing Hyperopt file to the new Hyperoptable strategy interface")
|
||||
|
||||
self.backtesting._set_strategy(self.backtesting.strategylist[0])
|
||||
self.custom_hyperopt.strategy = self.backtesting.strategy
|
||||
|
||||
self.custom_hyperoptloss = HyperOptLossResolver.load_hyperoptloss(self.config)
|
||||
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
|
||||
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
strategy = str(self.config['strategy'])
|
||||
self.results_file = (self.config['user_data_dir'] /
|
||||
'hyperopt_results' /
|
||||
f'strategy_{strategy}_hyperopt_results_{time_now}.pickle')
|
||||
self.results_file: Path = (self.config['user_data_dir'] / 'hyperopt_results' /
|
||||
f'strategy_{strategy}_{time_now}.fthypt')
|
||||
self.data_pickle_file = (self.config['user_data_dir'] /
|
||||
'hyperopt_results' / 'hyperopt_tickerdata.pkl')
|
||||
self.total_epochs = config.get('epochs', 0)
|
||||
@@ -91,20 +102,7 @@ class Hyperopt:
|
||||
self.clean_hyperopt()
|
||||
|
||||
self.num_epochs_saved = 0
|
||||
|
||||
# Previous evaluations
|
||||
self.epochs: List = []
|
||||
|
||||
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
|
||||
if hasattr(self.custom_hyperopt, 'populate_indicators'):
|
||||
self.backtesting.strategy.advise_indicators = ( # type: ignore
|
||||
self.custom_hyperopt.populate_indicators) # type: ignore
|
||||
if hasattr(self.custom_hyperopt, 'populate_buy_trend'):
|
||||
self.backtesting.strategy.advise_buy = ( # type: ignore
|
||||
self.custom_hyperopt.populate_buy_trend) # type: ignore
|
||||
if hasattr(self.custom_hyperopt, 'populate_sell_trend'):
|
||||
self.backtesting.strategy.advise_sell = ( # type: ignore
|
||||
self.custom_hyperopt.populate_sell_trend) # type: ignore
|
||||
self.current_best_epoch: Optional[Dict[str, Any]] = None
|
||||
|
||||
# Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
|
||||
if self.config.get('use_max_market_positions', True):
|
||||
@@ -114,7 +112,7 @@ class Hyperopt:
|
||||
self.max_open_trades = 0
|
||||
self.position_stacking = self.config.get('position_stacking', False)
|
||||
|
||||
if self.has_space('sell'):
|
||||
if HyperoptTools.has_space(self.config, 'sell'):
|
||||
# Make sure use_sell_signal is enabled
|
||||
if 'ask_strategy' not in self.config:
|
||||
self.config['ask_strategy'] = {}
|
||||
@@ -140,9 +138,7 @@ class Hyperopt:
|
||||
logger.info(f"Removing `{p}`.")
|
||||
p.unlink()
|
||||
|
||||
def _get_params_dict(self, raw_params: List[Any]) -> Dict:
|
||||
|
||||
dimensions: List[Dimension] = self.dimensions
|
||||
def _get_params_dict(self, dimensions: List[Dimension], raw_params: List[Any]) -> Dict:
|
||||
|
||||
# Ensure the number of dimensions match
|
||||
# the number of parameters in the list.
|
||||
@@ -153,30 +149,26 @@ class Hyperopt:
|
||||
# and the values are taken from the list of parameters.
|
||||
return {d.name: v for d, v in zip(dimensions, raw_params)}
|
||||
|
||||
def _save_results(self) -> None:
|
||||
def _save_result(self, epoch: Dict) -> None:
|
||||
"""
|
||||
Save hyperopt results to file
|
||||
Store one line per epoch.
|
||||
While not a valid json object - this allows appending easily.
|
||||
:param epoch: result dictionary for this epoch.
|
||||
"""
|
||||
num_epochs = len(self.epochs)
|
||||
if num_epochs > self.num_epochs_saved:
|
||||
logger.debug(f"Saving {num_epochs} {plural(num_epochs, 'epoch')}.")
|
||||
dump(self.epochs, self.results_file)
|
||||
self.num_epochs_saved = num_epochs
|
||||
logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'.")
|
||||
# Store hyperopt filename
|
||||
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)},
|
||||
log=False)
|
||||
epoch[FTHYPT_FILEVERSION] = 2
|
||||
with self.results_file.open('a') as f:
|
||||
rapidjson.dump(epoch, f, default=hyperopt_serializer,
|
||||
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN)
|
||||
f.write("\n")
|
||||
|
||||
@staticmethod
|
||||
def _read_results(results_file: Path) -> List:
|
||||
"""
|
||||
Read hyperopt results from file
|
||||
"""
|
||||
logger.info("Reading epochs from '%s'", results_file)
|
||||
data = load(results_file)
|
||||
return data
|
||||
self.num_epochs_saved += 1
|
||||
logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'.")
|
||||
# Store hyperopt filename
|
||||
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)},
|
||||
log=False)
|
||||
|
||||
def _get_params_details(self, params: Dict) -> Dict:
|
||||
"""
|
||||
@@ -184,118 +176,51 @@ class Hyperopt:
|
||||
"""
|
||||
result: Dict = {}
|
||||
|
||||
if self.has_space('buy'):
|
||||
result['buy'] = {p.name: params.get(p.name)
|
||||
for p in self.hyperopt_space('buy')}
|
||||
if self.has_space('sell'):
|
||||
result['sell'] = {p.name: params.get(p.name)
|
||||
for p in self.hyperopt_space('sell')}
|
||||
if self.has_space('roi'):
|
||||
result['roi'] = self.custom_hyperopt.generate_roi_table(params)
|
||||
if self.has_space('stoploss'):
|
||||
result['stoploss'] = {p.name: params.get(p.name)
|
||||
for p in self.hyperopt_space('stoploss')}
|
||||
if self.has_space('trailing'):
|
||||
if HyperoptTools.has_space(self.config, 'buy'):
|
||||
result['buy'] = {p.name: params.get(p.name) for p in self.buy_space}
|
||||
if HyperoptTools.has_space(self.config, 'sell'):
|
||||
result['sell'] = {p.name: params.get(p.name) for p in self.sell_space}
|
||||
if HyperoptTools.has_space(self.config, 'protection'):
|
||||
result['protection'] = {p.name: params.get(p.name) for p in self.protection_space}
|
||||
if HyperoptTools.has_space(self.config, 'roi'):
|
||||
result['roi'] = {str(k): v for k, v in
|
||||
self.custom_hyperopt.generate_roi_table(params).items()}
|
||||
if HyperoptTools.has_space(self.config, 'stoploss'):
|
||||
result['stoploss'] = {p.name: params.get(p.name) for p in self.stoploss_space}
|
||||
if HyperoptTools.has_space(self.config, 'trailing'):
|
||||
result['trailing'] = self.custom_hyperopt.generate_trailing_params(params)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def print_epoch_details(results, total_epochs: int, print_json: bool,
|
||||
no_header: bool = False, header_str: str = None) -> None:
|
||||
def _get_no_optimize_details(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Display details of the hyperopt result
|
||||
Get non-optimized parameters
|
||||
"""
|
||||
params = results.get('params_details', {})
|
||||
|
||||
# Default header string
|
||||
if header_str is None:
|
||||
header_str = "Best result"
|
||||
|
||||
if not no_header:
|
||||
explanation_str = Hyperopt._format_explanation_string(results, total_epochs)
|
||||
print(f"\n{header_str}:\n\n{explanation_str}\n")
|
||||
|
||||
if print_json:
|
||||
result_dict: Dict = {}
|
||||
for s in ['buy', 'sell', 'roi', 'stoploss', 'trailing']:
|
||||
Hyperopt._params_update_for_json(result_dict, params, s)
|
||||
print(rapidjson.dumps(result_dict, default=str, number_mode=rapidjson.NM_NATIVE))
|
||||
|
||||
else:
|
||||
Hyperopt._params_pretty_print(params, 'buy', "Buy hyperspace params:")
|
||||
Hyperopt._params_pretty_print(params, 'sell', "Sell hyperspace params:")
|
||||
Hyperopt._params_pretty_print(params, 'roi', "ROI table:")
|
||||
Hyperopt._params_pretty_print(params, 'stoploss', "Stoploss:")
|
||||
Hyperopt._params_pretty_print(params, 'trailing', "Trailing stop:")
|
||||
|
||||
@staticmethod
|
||||
def _params_update_for_json(result_dict, params, space: str) -> None:
|
||||
if space in params:
|
||||
space_params = Hyperopt._space_params(params, space)
|
||||
if space in ['buy', 'sell']:
|
||||
result_dict.setdefault('params', {}).update(space_params)
|
||||
elif space == 'roi':
|
||||
# TODO: get rid of OrderedDict when support for python 3.6 will be
|
||||
# dropped (dicts keep the order as the language feature)
|
||||
|
||||
# Convert keys in min_roi dict to strings because
|
||||
# rapidjson cannot dump dicts with integer keys...
|
||||
# OrderedDict is used to keep the numeric order of the items
|
||||
# in the dict.
|
||||
result_dict['minimal_roi'] = OrderedDict(
|
||||
(str(k), v) for k, v in space_params.items()
|
||||
)
|
||||
else: # 'stoploss', 'trailing'
|
||||
result_dict.update(space_params)
|
||||
|
||||
@staticmethod
|
||||
def _params_pretty_print(params, space: str, header: str) -> None:
|
||||
if space in params:
|
||||
space_params = Hyperopt._space_params(params, space, 5)
|
||||
params_result = f"\n# {header}\n"
|
||||
if space == 'stoploss':
|
||||
params_result += f"stoploss = {space_params.get('stoploss')}"
|
||||
elif space == 'roi':
|
||||
# TODO: get rid of OrderedDict when support for python 3.6 will be
|
||||
# dropped (dicts keep the order as the language feature)
|
||||
minimal_roi_result = rapidjson.dumps(
|
||||
OrderedDict(
|
||||
(str(k), v) for k, v in space_params.items()
|
||||
),
|
||||
default=str, indent=4, number_mode=rapidjson.NM_NATIVE)
|
||||
params_result += f"minimal_roi = {minimal_roi_result}"
|
||||
elif space == 'trailing':
|
||||
|
||||
for k, v in space_params.items():
|
||||
params_result += f'{k} = {v}\n'
|
||||
|
||||
else:
|
||||
params_result += f"{space}_params = {pformat(space_params, indent=4)}"
|
||||
params_result = params_result.replace("}", "\n}").replace("{", "{\n ")
|
||||
|
||||
params_result = params_result.replace("\n", "\n ")
|
||||
print(params_result)
|
||||
|
||||
@staticmethod
|
||||
def _space_params(params, space: str, r: int = None) -> Dict:
|
||||
d = params[space]
|
||||
# Round floats to `r` digits after the decimal point if requested
|
||||
return round_dict(d, r) if r else d
|
||||
|
||||
@staticmethod
|
||||
def is_best_loss(results, current_best_loss: float) -> bool:
|
||||
return results['loss'] < current_best_loss
|
||||
result: Dict[str, Any] = {}
|
||||
strategy = self.backtesting.strategy
|
||||
if not HyperoptTools.has_space(self.config, 'roi'):
|
||||
result['roi'] = {str(k): v for k, v in strategy.minimal_roi.items()}
|
||||
if not HyperoptTools.has_space(self.config, 'stoploss'):
|
||||
result['stoploss'] = {'stoploss': strategy.stoploss}
|
||||
if not HyperoptTools.has_space(self.config, 'trailing'):
|
||||
result['trailing'] = {
|
||||
'trailing_stop': strategy.trailing_stop,
|
||||
'trailing_stop_positive': strategy.trailing_stop_positive,
|
||||
'trailing_stop_positive_offset': strategy.trailing_stop_positive_offset,
|
||||
'trailing_only_offset_is_reached': strategy.trailing_only_offset_is_reached,
|
||||
}
|
||||
return result
|
||||
|
||||
def print_results(self, results) -> None:
|
||||
"""
|
||||
Log results if it is better than any previous evaluation
|
||||
TODO: this should be moved to HyperoptTools too
|
||||
"""
|
||||
is_best = results['is_best']
|
||||
|
||||
if self.print_all or is_best:
|
||||
print(
|
||||
self.get_result_table(
|
||||
HyperoptTools.get_result_table(
|
||||
self.config, results, self.total_epochs,
|
||||
self.print_all, self.print_colorized,
|
||||
self.hyperopt_table_header
|
||||
@@ -303,231 +228,76 @@ class Hyperopt:
|
||||
)
|
||||
self.hyperopt_table_header = 2
|
||||
|
||||
@staticmethod
|
||||
def _format_explanation_string(results, total_epochs) -> str:
|
||||
return (("*" if results['is_initial_point'] else " ") +
|
||||
f"{results['current_epoch']:5d}/{total_epochs}: " +
|
||||
f"{results['results_explanation']} " +
|
||||
f"Objective: {results['loss']:.5f}")
|
||||
|
||||
@staticmethod
|
||||
def get_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
print_colorized: bool, remove_header: int) -> str:
|
||||
def init_spaces(self):
|
||||
"""
|
||||
Log result table
|
||||
Assign the dimensions in the hyperoptimization space.
|
||||
"""
|
||||
if not results:
|
||||
return ''
|
||||
if HyperoptTools.has_space(self.config, 'protection'):
|
||||
# Protections can only be optimized when using the Parameter interface
|
||||
logger.debug("Hyperopt has 'protection' space")
|
||||
# Enable Protections if protection space is selected.
|
||||
self.config['enable_protections'] = True
|
||||
self.protection_space = self.custom_hyperopt.protection_space()
|
||||
|
||||
tabulate.PRESERVE_WHITESPACE = True
|
||||
|
||||
trials = json_normalize(results, max_level=1)
|
||||
trials['Best'] = ''
|
||||
if 'results_metrics.winsdrawslosses' not in trials.columns:
|
||||
# Ensure compatibility with older versions of hyperopt results
|
||||
trials['results_metrics.winsdrawslosses'] = 'N/A'
|
||||
|
||||
trials = trials[['Best', 'current_epoch', 'results_metrics.trade_count',
|
||||
'results_metrics.winsdrawslosses',
|
||||
'results_metrics.avg_profit', 'results_metrics.total_profit',
|
||||
'results_metrics.profit', 'results_metrics.duration',
|
||||
'loss', 'is_initial_point', 'is_best']]
|
||||
trials.columns = ['Best', 'Epoch', 'Trades', ' Win Draw Loss', 'Avg profit',
|
||||
'Total profit', 'Profit', 'Avg duration', 'Objective',
|
||||
'is_initial_point', 'is_best']
|
||||
trials['is_profit'] = False
|
||||
trials.loc[trials['is_initial_point'], 'Best'] = '* '
|
||||
trials.loc[trials['is_best'], 'Best'] = 'Best'
|
||||
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
||||
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
||||
trials['Trades'] = trials['Trades'].astype(str)
|
||||
|
||||
trials['Epoch'] = trials['Epoch'].apply(
|
||||
lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs)
|
||||
)
|
||||
trials['Avg profit'] = trials['Avg profit'].apply(
|
||||
lambda x: '{:,.2f}%'.format(x).rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
|
||||
)
|
||||
trials['Avg duration'] = trials['Avg duration'].apply(
|
||||
lambda x: '{:,.1f} m'.format(x).rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
|
||||
)
|
||||
trials['Objective'] = trials['Objective'].apply(
|
||||
lambda x: '{:,.5f}'.format(x).rjust(8, ' ') if x != 100000 else "N/A".rjust(8, ' ')
|
||||
)
|
||||
|
||||
trials['Profit'] = trials.apply(
|
||||
lambda x: '{:,.8f} {} {}'.format(
|
||||
x['Total profit'], config['stake_currency'],
|
||||
'({:,.2f}%)'.format(x['Profit']).rjust(10, ' ')
|
||||
).rjust(25+len(config['stake_currency']))
|
||||
if x['Total profit'] != 0.0 else '--'.rjust(25+len(config['stake_currency'])),
|
||||
axis=1
|
||||
)
|
||||
trials = trials.drop(columns=['Total profit'])
|
||||
|
||||
if print_colorized:
|
||||
for i in range(len(trials)):
|
||||
if trials.loc[i]['is_profit']:
|
||||
for j in range(len(trials.loc[i])-3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Fore.GREEN,
|
||||
str(trials.loc[i][j]), Fore.RESET)
|
||||
if trials.loc[i]['is_best'] and highlight_best:
|
||||
for j in range(len(trials.loc[i])-3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Style.BRIGHT,
|
||||
str(trials.loc[i][j]), Style.RESET_ALL)
|
||||
|
||||
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
|
||||
if remove_header > 0:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='orgtbl',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
|
||||
table = table.split("\n", remove_header)[remove_header]
|
||||
elif remove_header < 0:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='psql',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
table = "\n".join(table.split("\n")[0:remove_header])
|
||||
else:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='psql',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
return table
|
||||
|
||||
@staticmethod
|
||||
def export_csv_file(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
csv_file: str) -> None:
|
||||
"""
|
||||
Log result to csv-file
|
||||
"""
|
||||
if not results:
|
||||
return
|
||||
|
||||
# Verification for overwrite
|
||||
if Path(csv_file).is_file():
|
||||
logger.error(f"CSV file already exists: {csv_file}")
|
||||
return
|
||||
|
||||
try:
|
||||
io.open(csv_file, 'w+').close()
|
||||
except IOError:
|
||||
logger.error(f"Failed to create CSV file: {csv_file}")
|
||||
return
|
||||
|
||||
trials = json_normalize(results, max_level=1)
|
||||
trials['Best'] = ''
|
||||
trials['Stake currency'] = config['stake_currency']
|
||||
|
||||
base_metrics = ['Best', 'current_epoch', 'results_metrics.trade_count',
|
||||
'results_metrics.avg_profit', 'results_metrics.median_profit',
|
||||
'results_metrics.total_profit',
|
||||
'Stake currency', 'results_metrics.profit', 'results_metrics.duration',
|
||||
'loss', 'is_initial_point', 'is_best']
|
||||
param_metrics = [("params_dict."+param) for param in results[0]['params_dict'].keys()]
|
||||
trials = trials[base_metrics + param_metrics]
|
||||
|
||||
base_columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Median profit', 'Total profit',
|
||||
'Stake currency', 'Profit', 'Avg duration', 'Objective',
|
||||
'is_initial_point', 'is_best']
|
||||
param_columns = list(results[0]['params_dict'].keys())
|
||||
trials.columns = base_columns + param_columns
|
||||
|
||||
trials['is_profit'] = False
|
||||
trials.loc[trials['is_initial_point'], 'Best'] = '*'
|
||||
trials.loc[trials['is_best'], 'Best'] = 'Best'
|
||||
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
||||
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
||||
trials['Epoch'] = trials['Epoch'].astype(str)
|
||||
trials['Trades'] = trials['Trades'].astype(str)
|
||||
|
||||
trials['Total profit'] = trials['Total profit'].apply(
|
||||
lambda x: '{:,.8f}'.format(x) if x != 0.0 else ""
|
||||
)
|
||||
trials['Profit'] = trials['Profit'].apply(
|
||||
lambda x: '{:,.2f}'.format(x) if not isna(x) else ""
|
||||
)
|
||||
trials['Avg profit'] = trials['Avg profit'].apply(
|
||||
lambda x: '{:,.2f}%'.format(x) if not isna(x) else ""
|
||||
)
|
||||
trials['Avg duration'] = trials['Avg duration'].apply(
|
||||
lambda x: '{:,.1f} m'.format(x) if not isna(x) else ""
|
||||
)
|
||||
trials['Objective'] = trials['Objective'].apply(
|
||||
lambda x: '{:,.5f}'.format(x) if x != 100000 else ""
|
||||
)
|
||||
|
||||
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
|
||||
trials.to_csv(csv_file, index=False, header=True, mode='w', encoding='UTF-8')
|
||||
logger.info(f"CSV file created: {csv_file}")
|
||||
|
||||
def has_space(self, space: str) -> bool:
|
||||
"""
|
||||
Tell if the space value is contained in the configuration
|
||||
"""
|
||||
# The 'trailing' space is not included in the 'default' set of spaces
|
||||
if space == 'trailing':
|
||||
return any(s in self.config['spaces'] for s in [space, 'all'])
|
||||
else:
|
||||
return any(s in self.config['spaces'] for s in [space, 'all', 'default'])
|
||||
|
||||
def hyperopt_space(self, space: Optional[str] = None) -> List[Dimension]:
|
||||
"""
|
||||
Return the dimensions in the hyperoptimization space.
|
||||
:param space: Defines hyperspace to return dimensions for.
|
||||
If None, then the self.has_space() will be used to return dimensions
|
||||
for all hyperspaces used.
|
||||
"""
|
||||
spaces: List[Dimension] = []
|
||||
|
||||
if space == 'buy' or (space is None and self.has_space('buy')):
|
||||
if HyperoptTools.has_space(self.config, 'buy'):
|
||||
logger.debug("Hyperopt has 'buy' space")
|
||||
spaces += self.custom_hyperopt.indicator_space()
|
||||
self.buy_space = self.custom_hyperopt.buy_indicator_space()
|
||||
|
||||
if space == 'sell' or (space is None and self.has_space('sell')):
|
||||
if HyperoptTools.has_space(self.config, 'sell'):
|
||||
logger.debug("Hyperopt has 'sell' space")
|
||||
spaces += self.custom_hyperopt.sell_indicator_space()
|
||||
self.sell_space = self.custom_hyperopt.sell_indicator_space()
|
||||
|
||||
if space == 'roi' or (space is None and self.has_space('roi')):
|
||||
if HyperoptTools.has_space(self.config, 'roi'):
|
||||
logger.debug("Hyperopt has 'roi' space")
|
||||
spaces += self.custom_hyperopt.roi_space()
|
||||
self.roi_space = self.custom_hyperopt.roi_space()
|
||||
|
||||
if space == 'stoploss' or (space is None and self.has_space('stoploss')):
|
||||
if HyperoptTools.has_space(self.config, 'stoploss'):
|
||||
logger.debug("Hyperopt has 'stoploss' space")
|
||||
spaces += self.custom_hyperopt.stoploss_space()
|
||||
self.stoploss_space = self.custom_hyperopt.stoploss_space()
|
||||
|
||||
if space == 'trailing' or (space is None and self.has_space('trailing')):
|
||||
if HyperoptTools.has_space(self.config, 'trailing'):
|
||||
logger.debug("Hyperopt has 'trailing' space")
|
||||
spaces += self.custom_hyperopt.trailing_space()
|
||||
self.trailing_space = self.custom_hyperopt.trailing_space()
|
||||
|
||||
return spaces
|
||||
self.dimensions = (self.buy_space + self.sell_space + self.protection_space
|
||||
+ self.roi_space + self.stoploss_space + self.trailing_space)
|
||||
|
||||
def assign_params(self, params_dict: Dict, category: str) -> None:
|
||||
"""
|
||||
Assign hyperoptable parameters
|
||||
"""
|
||||
for attr_name, attr in self.backtesting.strategy.enumerate_parameters(category):
|
||||
if attr.optimize:
|
||||
# noinspection PyProtectedMember
|
||||
attr.value = params_dict[attr_name]
|
||||
|
||||
def generate_optimizer(self, raw_params: List[Any], iteration=None) -> Dict:
|
||||
"""
|
||||
Used Optimize function. Called once per epoch to optimize whatever is configured.
|
||||
Used Optimize function.
|
||||
Called once per epoch to optimize whatever is configured.
|
||||
Keep this function as optimized as possible!
|
||||
"""
|
||||
params_dict = self._get_params_dict(raw_params)
|
||||
params_details = self._get_params_details(params_dict)
|
||||
backtest_start_time = datetime.now(timezone.utc)
|
||||
params_dict = self._get_params_dict(self.dimensions, raw_params)
|
||||
|
||||
if self.has_space('roi'):
|
||||
# Apply parameters
|
||||
if HyperoptTools.has_space(self.config, 'buy'):
|
||||
self.assign_params(params_dict, 'buy')
|
||||
|
||||
if HyperoptTools.has_space(self.config, 'sell'):
|
||||
self.assign_params(params_dict, 'sell')
|
||||
|
||||
if HyperoptTools.has_space(self.config, 'protection'):
|
||||
self.assign_params(params_dict, 'protection')
|
||||
|
||||
if HyperoptTools.has_space(self.config, 'roi'):
|
||||
self.backtesting.strategy.minimal_roi = ( # type: ignore
|
||||
self.custom_hyperopt.generate_roi_table(params_dict))
|
||||
|
||||
if self.has_space('buy'):
|
||||
self.backtesting.strategy.advise_buy = ( # type: ignore
|
||||
self.custom_hyperopt.buy_strategy_generator(params_dict))
|
||||
|
||||
if self.has_space('sell'):
|
||||
self.backtesting.strategy.advise_sell = ( # type: ignore
|
||||
self.custom_hyperopt.sell_strategy_generator(params_dict))
|
||||
|
||||
if self.has_space('stoploss'):
|
||||
if HyperoptTools.has_space(self.config, 'stoploss'):
|
||||
self.backtesting.strategy.stoploss = params_dict['stoploss']
|
||||
|
||||
if self.has_space('trailing'):
|
||||
if HyperoptTools.has_space(self.config, 'trailing'):
|
||||
d = self.custom_hyperopt.generate_trailing_params(params_dict)
|
||||
self.backtesting.strategy.trailing_stop = d['trailing_stop']
|
||||
self.backtesting.strategy.trailing_stop_positive = d['trailing_stop_positive']
|
||||
@@ -536,30 +306,43 @@ class Hyperopt:
|
||||
self.backtesting.strategy.trailing_only_offset_is_reached = \
|
||||
d['trailing_only_offset_is_reached']
|
||||
|
||||
processed = load(self.data_pickle_file)
|
||||
|
||||
min_date, max_date = get_timerange(processed)
|
||||
|
||||
backtesting_results = self.backtesting.backtest(
|
||||
with self.data_pickle_file.open('rb') as f:
|
||||
processed = load(f, mmap_mode='r')
|
||||
bt_results = self.backtesting.backtest(
|
||||
processed=processed,
|
||||
start_date=min_date.datetime,
|
||||
end_date=max_date.datetime,
|
||||
start_date=self.min_date,
|
||||
end_date=self.max_date,
|
||||
max_open_trades=self.max_open_trades,
|
||||
position_stacking=self.position_stacking,
|
||||
enable_protections=self.config.get('enable_protections', False),
|
||||
|
||||
)
|
||||
return self._get_results_dict(backtesting_results, min_date, max_date,
|
||||
params_dict, params_details,
|
||||
backtest_end_time = datetime.now(timezone.utc)
|
||||
bt_results.update({
|
||||
'backtest_start_time': int(backtest_start_time.timestamp()),
|
||||
'backtest_end_time': int(backtest_end_time.timestamp()),
|
||||
})
|
||||
|
||||
return self._get_results_dict(bt_results, self.min_date, self.max_date,
|
||||
params_dict,
|
||||
processed=processed)
|
||||
|
||||
def _get_results_dict(self, backtesting_results, min_date, max_date,
|
||||
params_dict, params_details, processed: Dict[str, DataFrame]):
|
||||
results_metrics = self._calculate_results_metrics(backtesting_results)
|
||||
results_explanation = self._format_results_explanation_string(results_metrics)
|
||||
params_dict, processed: Dict[str, DataFrame]
|
||||
) -> Dict[str, Any]:
|
||||
params_details = self._get_params_details(params_dict)
|
||||
|
||||
trade_count = results_metrics['trade_count']
|
||||
total_profit = results_metrics['total_profit']
|
||||
strat_stats = generate_strategy_stats(
|
||||
processed, self.backtesting.strategy.get_strategy_name(),
|
||||
backtesting_results, min_date, max_date, market_change=0
|
||||
)
|
||||
results_explanation = HyperoptTools.format_results_explanation_string(
|
||||
strat_stats, self.config['stake_currency'])
|
||||
|
||||
not_optimized = self.backtesting.strategy.get_no_optimize_params()
|
||||
not_optimized = deep_merge_dicts(not_optimized, self._get_no_optimize_details())
|
||||
|
||||
trade_count = strat_stats['total_trades']
|
||||
total_profit = strat_stats['profit_total']
|
||||
|
||||
# If this evaluation contains too short amount of trades to be
|
||||
# interesting -- consider it as 'bad' (assigned max. loss value)
|
||||
@@ -567,55 +350,36 @@ class Hyperopt:
|
||||
# path. We do not want to optimize 'hodl' strategies.
|
||||
loss: float = MAX_LOSS
|
||||
if trade_count >= self.config['hyperopt_min_trades']:
|
||||
loss = self.calculate_loss(results=backtesting_results, trade_count=trade_count,
|
||||
min_date=min_date.datetime, max_date=max_date.datetime,
|
||||
config=self.config, processed=processed)
|
||||
loss = self.calculate_loss(results=backtesting_results['results'],
|
||||
trade_count=trade_count,
|
||||
min_date=min_date, max_date=max_date,
|
||||
config=self.config, processed=processed,
|
||||
backtest_stats=strat_stats)
|
||||
return {
|
||||
'loss': loss,
|
||||
'params_dict': params_dict,
|
||||
'params_details': params_details,
|
||||
'results_metrics': results_metrics,
|
||||
'params_not_optimized': not_optimized,
|
||||
'results_metrics': strat_stats,
|
||||
'results_explanation': results_explanation,
|
||||
'total_profit': total_profit,
|
||||
}
|
||||
|
||||
def _calculate_results_metrics(self, backtesting_results: DataFrame) -> Dict:
|
||||
wins = len(backtesting_results[backtesting_results['profit_ratio'] > 0])
|
||||
draws = len(backtesting_results[backtesting_results['profit_ratio'] == 0])
|
||||
losses = len(backtesting_results[backtesting_results['profit_ratio'] < 0])
|
||||
return {
|
||||
'trade_count': len(backtesting_results.index),
|
||||
'wins': wins,
|
||||
'draws': draws,
|
||||
'losses': losses,
|
||||
'winsdrawslosses': f"{wins:>4} {draws:>4} {losses:>4}",
|
||||
'avg_profit': backtesting_results['profit_ratio'].mean() * 100.0,
|
||||
'median_profit': backtesting_results['profit_ratio'].median() * 100.0,
|
||||
'total_profit': backtesting_results['profit_abs'].sum(),
|
||||
'profit': backtesting_results['profit_ratio'].sum() * 100.0,
|
||||
'duration': backtesting_results['trade_duration'].mean(),
|
||||
}
|
||||
|
||||
def _format_results_explanation_string(self, results_metrics: Dict) -> str:
|
||||
"""
|
||||
Return the formatted results explanation in a string
|
||||
"""
|
||||
stake_cur = self.config['stake_currency']
|
||||
return (f"{results_metrics['trade_count']:6d} trades. "
|
||||
f"{results_metrics['wins']}/{results_metrics['draws']}"
|
||||
f"/{results_metrics['losses']} Wins/Draws/Losses. "
|
||||
f"Avg profit {results_metrics['avg_profit']: 6.2f}%. "
|
||||
f"Median profit {results_metrics['median_profit']: 6.2f}%. "
|
||||
f"Total profit {results_metrics['total_profit']: 11.8f} {stake_cur} "
|
||||
f"({results_metrics['profit']: 7.2f}\N{GREEK CAPITAL LETTER SIGMA}%). "
|
||||
f"Avg duration {results_metrics['duration']:5.1f} min."
|
||||
).encode(locale.getpreferredencoding(), 'replace').decode('utf-8')
|
||||
|
||||
def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer:
|
||||
estimator = self.custom_hyperopt.generate_estimator()
|
||||
|
||||
acq_optimizer = "sampling"
|
||||
if isinstance(estimator, str):
|
||||
if estimator not in ("GP", "RF", "ET", "GBRT"):
|
||||
raise OperationalException(f"Estimator {estimator} not supported.")
|
||||
else:
|
||||
acq_optimizer = "auto"
|
||||
|
||||
logger.info(f"Using estimator {estimator}.")
|
||||
return Optimizer(
|
||||
dimensions,
|
||||
base_estimator="ET",
|
||||
acq_optimizer="auto",
|
||||
base_estimator=estimator,
|
||||
acq_optimizer=acq_optimizer,
|
||||
n_initial_points=INITIAL_POINTS,
|
||||
acq_optimizer_kwargs={'n_jobs': cpu_count},
|
||||
random_state=self.random_state,
|
||||
@@ -626,43 +390,33 @@ class Hyperopt:
|
||||
return parallel(delayed(
|
||||
wrap_non_picklable_objects(self.generate_optimizer))(v, i) for v in asked)
|
||||
|
||||
@staticmethod
|
||||
def load_previous_results(results_file: Path) -> List:
|
||||
"""
|
||||
Load data for epochs from the file if we have one
|
||||
"""
|
||||
epochs: List = []
|
||||
if results_file.is_file() and results_file.stat().st_size > 0:
|
||||
epochs = Hyperopt._read_results(results_file)
|
||||
# Detection of some old format, without 'is_best' field saved
|
||||
if epochs[0].get('is_best') is None:
|
||||
raise OperationalException(
|
||||
"The file with Hyperopt results is incompatible with this version "
|
||||
"of Freqtrade and cannot be loaded.")
|
||||
logger.info(f"Loaded {len(epochs)} previous evaluations from disk.")
|
||||
return epochs
|
||||
|
||||
def _set_random_state(self, random_state: Optional[int]) -> int:
|
||||
return random_state or random.randint(1, 2**16 - 1)
|
||||
|
||||
def prepare_hyperopt_data(self) -> None:
|
||||
data, timerange = self.backtesting.load_bt_data()
|
||||
logger.info("Dataload complete. Calculating indicators")
|
||||
|
||||
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
||||
|
||||
# Trim startup period from analyzed dataframe to get correct dates for output.
|
||||
processed = trim_dataframes(preprocessed, timerange, self.backtesting.required_startup)
|
||||
self.min_date, self.max_date = get_timerange(processed)
|
||||
|
||||
logger.info(f'Hyperopting with data from {self.min_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'({(self.max_date - self.min_date).days} days)..')
|
||||
# Store non-trimmed data - will be trimmed after signal generation.
|
||||
dump(preprocessed, self.data_pickle_file)
|
||||
|
||||
def start(self) -> None:
|
||||
self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None))
|
||||
logger.info(f"Using optimizer random state: {self.random_state}")
|
||||
self.hyperopt_table_header = -1
|
||||
data, timerange = self.backtesting.load_bt_data()
|
||||
# Initialize spaces ...
|
||||
self.init_spaces()
|
||||
|
||||
preprocessed = self.backtesting.strategy.ohlcvdata_to_dataframe(data)
|
||||
|
||||
# Trim startup period from analyzed dataframe
|
||||
for pair, df in preprocessed.items():
|
||||
preprocessed[pair] = trim_dataframe(df, timerange)
|
||||
min_date, max_date = get_timerange(preprocessed)
|
||||
|
||||
logger.info(f'Hyperopting with data from {min_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} '
|
||||
f'({(max_date - min_date).days} days)..')
|
||||
|
||||
dump(preprocessed, self.data_pickle_file)
|
||||
self.prepare_hyperopt_data()
|
||||
|
||||
# We don't need exchange instance anymore while running hyperopt
|
||||
self.backtesting.exchange.close()
|
||||
@@ -670,15 +424,12 @@ class Hyperopt:
|
||||
self.backtesting.exchange._api_async = None # type: ignore
|
||||
# self.backtesting.exchange = None # type: ignore
|
||||
self.backtesting.pairlists = None # type: ignore
|
||||
self.backtesting.strategy.dp = None # type: ignore
|
||||
IStrategy.dp = None # type: ignore
|
||||
|
||||
cpus = cpu_count()
|
||||
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
|
||||
config_jobs = self.config.get('hyperopt_jobs', -1)
|
||||
logger.info(f'Number of parallel jobs set as: {config_jobs}')
|
||||
|
||||
self.dimensions: List[Dimension] = self.hyperopt_space()
|
||||
self.opt = self.get_optimizer(self.dimensions, config_jobs)
|
||||
|
||||
if self.print_colorized:
|
||||
@@ -711,9 +462,9 @@ class Hyperopt:
|
||||
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
|
||||
]
|
||||
with progressbar.ProgressBar(
|
||||
max_value=self.total_epochs, redirect_stdout=False, redirect_stderr=False,
|
||||
widgets=widgets
|
||||
) as pbar:
|
||||
max_value=self.total_epochs, redirect_stdout=False, redirect_stderr=False,
|
||||
widgets=widgets
|
||||
) as pbar:
|
||||
EVALS = ceil(self.total_epochs / jobs)
|
||||
for i in range(EVALS):
|
||||
# Correct the number of epochs to be processed for the last
|
||||
@@ -734,7 +485,7 @@ class Hyperopt:
|
||||
|
||||
logger.debug(f"Optimizer epoch evaluated: {val}")
|
||||
|
||||
is_best = self.is_best_loss(val, self.current_best_loss)
|
||||
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
|
||||
# This value is assigned here and not in the optimization method
|
||||
# to keep proper order in the list of results. That's because
|
||||
# evaluations can take different time. Here they are aligned in the
|
||||
@@ -744,25 +495,26 @@ class Hyperopt:
|
||||
|
||||
if is_best:
|
||||
self.current_best_loss = val['loss']
|
||||
self.epochs.append(val)
|
||||
self.current_best_epoch = val
|
||||
|
||||
# Save results after each best epoch and every 100 epochs
|
||||
if is_best or current % 100 == 0:
|
||||
self._save_results()
|
||||
self._save_result(val)
|
||||
|
||||
pbar.update(current)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print('User interrupted..')
|
||||
|
||||
self._save_results()
|
||||
logger.info(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'.")
|
||||
|
||||
if self.epochs:
|
||||
sorted_epochs = sorted(self.epochs, key=itemgetter('loss'))
|
||||
best_epoch = sorted_epochs[0]
|
||||
self.print_epoch_details(best_epoch, self.total_epochs, self.print_json)
|
||||
if self.current_best_epoch:
|
||||
HyperoptTools.try_export_params(
|
||||
self.config,
|
||||
self.backtesting.strategy.get_strategy_name(),
|
||||
self.current_best_epoch)
|
||||
|
||||
HyperoptTools.show_epoch_details(self.current_best_epoch, self.total_epochs,
|
||||
self.print_json)
|
||||
else:
|
||||
# This is printed when Ctrl+C is pressed quickly, before first epochs have
|
||||
# a chance to be evaluated.
|
||||
|
95
freqtrade/optimize/hyperopt_auto.py
Normal file
95
freqtrade/optimize/hyperopt_auto.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
HyperOptAuto class.
|
||||
This module implements a convenience auto-hyperopt class, which can be used together with strategies
|
||||
that implement IHyperStrategy interface.
|
||||
"""
|
||||
import logging
|
||||
from contextlib import suppress
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
from skopt.space import Dimension
|
||||
|
||||
from freqtrade.optimize.hyperopt_interface import EstimatorType, IHyperOpt
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _format_exception_message(space: str, ignore_missing_space: bool) -> None:
|
||||
msg = (f"The '{space}' space is included into the hyperoptimization "
|
||||
f"but no parameter for this space was not found in your Strategy. "
|
||||
)
|
||||
if ignore_missing_space:
|
||||
logger.warning(msg + "This space will be ignored.")
|
||||
else:
|
||||
raise OperationalException(
|
||||
msg + f"Please make sure to have parameters for this space enabled for optimization "
|
||||
f"or remove the '{space}' space from hyperoptimization.")
|
||||
|
||||
|
||||
class HyperOptAuto(IHyperOpt):
|
||||
"""
|
||||
This class delegates functionality to Strategy(IHyperStrategy) and Strategy.HyperOpt classes.
|
||||
Most of the time Strategy.HyperOpt class would only implement indicator_space and
|
||||
sell_indicator_space methods, but other hyperopt methods can be overridden as well.
|
||||
"""
|
||||
|
||||
def _get_func(self, name) -> Callable:
|
||||
"""
|
||||
Return a function defined in Strategy.HyperOpt class, or one defined in super() class.
|
||||
:param name: function name.
|
||||
:return: a requested function.
|
||||
"""
|
||||
hyperopt_cls = getattr(self.strategy, 'HyperOpt', None)
|
||||
default_func = getattr(super(), name)
|
||||
if hyperopt_cls:
|
||||
return getattr(hyperopt_cls, name, default_func)
|
||||
else:
|
||||
return default_func
|
||||
|
||||
def _generate_indicator_space(self, category):
|
||||
for attr_name, attr in self.strategy.enumerate_parameters(category):
|
||||
if attr.optimize:
|
||||
yield attr.get_space(attr_name)
|
||||
|
||||
def _get_indicator_space(self, category) -> List:
|
||||
# TODO: is this necessary, or can we call "generate_space" directly?
|
||||
indicator_space = list(self._generate_indicator_space(category))
|
||||
if len(indicator_space) > 0:
|
||||
return indicator_space
|
||||
else:
|
||||
_format_exception_message(
|
||||
category,
|
||||
self.config.get("hyperopt_ignore_missing_space", False))
|
||||
return []
|
||||
|
||||
def buy_indicator_space(self) -> List['Dimension']:
|
||||
return self._get_indicator_space('buy')
|
||||
|
||||
def sell_indicator_space(self) -> List['Dimension']:
|
||||
return self._get_indicator_space('sell')
|
||||
|
||||
def protection_space(self) -> List['Dimension']:
|
||||
return self._get_indicator_space('protection')
|
||||
|
||||
def generate_roi_table(self, params: Dict) -> Dict[int, float]:
|
||||
return self._get_func('generate_roi_table')(params)
|
||||
|
||||
def roi_space(self) -> List['Dimension']:
|
||||
return self._get_func('roi_space')()
|
||||
|
||||
def stoploss_space(self) -> List['Dimension']:
|
||||
return self._get_func('stoploss_space')()
|
||||
|
||||
def generate_trailing_params(self, params: Dict) -> Dict:
|
||||
return self._get_func('generate_trailing_params')(params)
|
||||
|
||||
def trailing_space(self) -> List['Dimension']:
|
||||
return self._get_func('trailing_space')()
|
||||
|
||||
def generate_estimator(self) -> EstimatorType:
|
||||
return self._get_func('generate_estimator')()
|
128
freqtrade/optimize/hyperopt_epoch_filters.py
Normal file
128
freqtrade/optimize/hyperopt_epoch_filters.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def hyperopt_filter_epochs(epochs: List, filteroptions: dict, log: bool = True) -> List:
|
||||
"""
|
||||
Filter our items from the list of hyperopt results
|
||||
"""
|
||||
if filteroptions['only_best']:
|
||||
epochs = [x for x in epochs if x['is_best']]
|
||||
if filteroptions['only_profitable']:
|
||||
epochs = [x for x in epochs
|
||||
if x['results_metrics'].get('profit_total', 0) > 0]
|
||||
|
||||
epochs = _hyperopt_filter_epochs_trade_count(epochs, filteroptions)
|
||||
|
||||
epochs = _hyperopt_filter_epochs_duration(epochs, filteroptions)
|
||||
|
||||
epochs = _hyperopt_filter_epochs_profit(epochs, filteroptions)
|
||||
|
||||
epochs = _hyperopt_filter_epochs_objective(epochs, filteroptions)
|
||||
if log:
|
||||
logger.info(f"{len(epochs)} " +
|
||||
("best " if filteroptions['only_best'] else "") +
|
||||
("profitable " if filteroptions['only_profitable'] else "") +
|
||||
"epochs found.")
|
||||
return epochs
|
||||
|
||||
|
||||
def _hyperopt_filter_epochs_trade(epochs: List, trade_count: int):
|
||||
"""
|
||||
Filter epochs with trade-counts > trades
|
||||
"""
|
||||
return [
|
||||
x for x in epochs if x['results_metrics'].get('total_trades', 0) > trade_count
|
||||
]
|
||||
|
||||
|
||||
def _hyperopt_filter_epochs_trade_count(epochs: List, filteroptions: dict) -> List:
|
||||
|
||||
if filteroptions['filter_min_trades'] > 0:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, filteroptions['filter_min_trades'])
|
||||
|
||||
if filteroptions['filter_max_trades'] > 0:
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if x['results_metrics'].get('total_trades') < filteroptions['filter_max_trades']
|
||||
]
|
||||
return epochs
|
||||
|
||||
|
||||
def _hyperopt_filter_epochs_duration(epochs: List, filteroptions: dict) -> List:
|
||||
|
||||
def get_duration_value(x):
|
||||
# Duration in minutes ...
|
||||
if 'holding_avg_s' in x['results_metrics']:
|
||||
avg = x['results_metrics']['holding_avg_s']
|
||||
return avg // 60
|
||||
raise OperationalException(
|
||||
"Holding-average not available. Please omit the filter on average time, "
|
||||
"or rerun hyperopt with this version")
|
||||
|
||||
if filteroptions['filter_min_avg_time'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if get_duration_value(x) > filteroptions['filter_min_avg_time']
|
||||
]
|
||||
if filteroptions['filter_max_avg_time'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if get_duration_value(x) < filteroptions['filter_max_avg_time']
|
||||
]
|
||||
|
||||
return epochs
|
||||
|
||||
|
||||
def _hyperopt_filter_epochs_profit(epochs: List, filteroptions: dict) -> List:
|
||||
|
||||
if filteroptions['filter_min_avg_profit'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if x['results_metrics'].get('profit_mean', 0) * 100
|
||||
> filteroptions['filter_min_avg_profit']
|
||||
]
|
||||
if filteroptions['filter_max_avg_profit'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if x['results_metrics'].get('profit_mean', 0) * 100
|
||||
< filteroptions['filter_max_avg_profit']
|
||||
]
|
||||
if filteroptions['filter_min_total_profit'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if x['results_metrics'].get('profit_total_abs', 0)
|
||||
> filteroptions['filter_min_total_profit']
|
||||
]
|
||||
if filteroptions['filter_max_total_profit'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
epochs = [
|
||||
x for x in epochs
|
||||
if x['results_metrics'].get('profit_total_abs', 0)
|
||||
< filteroptions['filter_max_total_profit']
|
||||
]
|
||||
return epochs
|
||||
|
||||
|
||||
def _hyperopt_filter_epochs_objective(epochs: List, filteroptions: dict) -> List:
|
||||
|
||||
if filteroptions['filter_min_objective'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
|
||||
epochs = [x for x in epochs if x['loss'] < filteroptions['filter_min_objective']]
|
||||
if filteroptions['filter_max_objective'] is not None:
|
||||
epochs = _hyperopt_filter_epochs_trade(epochs, 0)
|
||||
|
||||
epochs = [x for x in epochs if x['loss'] > filteroptions['filter_max_objective']]
|
||||
|
||||
return epochs
|
@@ -5,24 +5,20 @@ This module defines the interface to apply for hyperopt
|
||||
import logging
|
||||
import math
|
||||
from abc import ABC
|
||||
from typing import Any, Callable, Dict, List
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from skopt.space import Categorical, Dimension, Integer, Real
|
||||
from sklearn.base import RegressorMixin
|
||||
from skopt.space import Categorical, Dimension, Integer
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes
|
||||
from freqtrade.misc import round_dict
|
||||
from freqtrade.optimize.space import SKDecimal
|
||||
from freqtrade.strategy import IStrategy
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _format_exception_message(method: str, space: str) -> str:
|
||||
return (f"The '{space}' space is included into the hyperoptimization "
|
||||
f"but {method}() method is not found in your "
|
||||
f"custom Hyperopt class. You should either implement this "
|
||||
f"method or remove the '{space}' space from hyperoptimization.")
|
||||
EstimatorType = Union[RegressorMixin, str]
|
||||
|
||||
|
||||
class IHyperOpt(ABC):
|
||||
@@ -31,7 +27,7 @@ class IHyperOpt(ABC):
|
||||
Defines the mandatory structure must follow any custom hyperopt
|
||||
|
||||
Class attributes you can use:
|
||||
ticker_interval -> int: value of the ticker interval to use for the strategy
|
||||
timeframe -> int: value of the timeframe to use for the strategy
|
||||
"""
|
||||
ticker_interval: str # DEPRECATED
|
||||
timeframe: str
|
||||
@@ -44,36 +40,15 @@ class IHyperOpt(ABC):
|
||||
IHyperOpt.ticker_interval = str(config['timeframe']) # DEPRECATED
|
||||
IHyperOpt.timeframe = str(config['timeframe'])
|
||||
|
||||
@staticmethod
|
||||
def buy_strategy_generator(params: Dict[str, Any]) -> Callable:
|
||||
def generate_estimator(self) -> EstimatorType:
|
||||
"""
|
||||
Create a buy strategy generator.
|
||||
Return base_estimator.
|
||||
Can be any of "GP", "RF", "ET", "GBRT" or an instance of a class
|
||||
inheriting from RegressorMixin (from sklearn).
|
||||
"""
|
||||
raise OperationalException(_format_exception_message('buy_strategy_generator', 'buy'))
|
||||
return 'ET'
|
||||
|
||||
@staticmethod
|
||||
def sell_strategy_generator(params: Dict[str, Any]) -> Callable:
|
||||
"""
|
||||
Create a sell strategy generator.
|
||||
"""
|
||||
raise OperationalException(_format_exception_message('sell_strategy_generator', 'sell'))
|
||||
|
||||
@staticmethod
|
||||
def indicator_space() -> List[Dimension]:
|
||||
"""
|
||||
Create an indicator space.
|
||||
"""
|
||||
raise OperationalException(_format_exception_message('indicator_space', 'buy'))
|
||||
|
||||
@staticmethod
|
||||
def sell_indicator_space() -> List[Dimension]:
|
||||
"""
|
||||
Create a sell indicator space.
|
||||
"""
|
||||
raise OperationalException(_format_exception_message('sell_indicator_space', 'sell'))
|
||||
|
||||
@staticmethod
|
||||
def generate_roi_table(params: Dict) -> Dict[int, float]:
|
||||
def generate_roi_table(self, params: Dict) -> Dict[int, float]:
|
||||
"""
|
||||
Create a ROI table.
|
||||
|
||||
@@ -88,8 +63,7 @@ class IHyperOpt(ABC):
|
||||
|
||||
return roi_table
|
||||
|
||||
@staticmethod
|
||||
def roi_space() -> List[Dimension]:
|
||||
def roi_space(self) -> List[Dimension]:
|
||||
"""
|
||||
Create a ROI space.
|
||||
|
||||
@@ -97,7 +71,7 @@ class IHyperOpt(ABC):
|
||||
|
||||
This method implements adaptive roi hyperspace with varied
|
||||
ranges for parameters which automatically adapts to the
|
||||
ticker interval used.
|
||||
timeframe used.
|
||||
|
||||
It's used by Freqtrade by default, if no custom roi_space method is defined.
|
||||
"""
|
||||
@@ -109,7 +83,7 @@ class IHyperOpt(ABC):
|
||||
roi_t_alpha = 1.0
|
||||
roi_p_alpha = 1.0
|
||||
|
||||
timeframe_min = timeframe_to_minutes(IHyperOpt.ticker_interval)
|
||||
timeframe_min = timeframe_to_minutes(self.timeframe)
|
||||
|
||||
# We define here limits for the ROI space parameters automagically adapted to the
|
||||
# timeframe used by the bot:
|
||||
@@ -119,7 +93,7 @@ class IHyperOpt(ABC):
|
||||
# * 'roi_p' (limits for the ROI value steps) components are scaled logarithmically.
|
||||
#
|
||||
# The scaling is designed so that it maps exactly to the legacy Freqtrade roi_space()
|
||||
# method for the 5m ticker interval.
|
||||
# method for the 5m timeframe.
|
||||
roi_t_scale = timeframe_min / 5
|
||||
roi_p_scale = math.log1p(timeframe_min) / math.log1p(5)
|
||||
roi_limits = {
|
||||
@@ -145,7 +119,7 @@ class IHyperOpt(ABC):
|
||||
'roi_p2': roi_limits['roi_p2_min'],
|
||||
'roi_p3': roi_limits['roi_p3_min'],
|
||||
}
|
||||
logger.info(f"Min roi table: {round_dict(IHyperOpt.generate_roi_table(p), 5)}")
|
||||
logger.info(f"Min roi table: {round_dict(self.generate_roi_table(p), 3)}")
|
||||
p = {
|
||||
'roi_t1': roi_limits['roi_t1_max'],
|
||||
'roi_t2': roi_limits['roi_t2_max'],
|
||||
@@ -154,19 +128,21 @@ class IHyperOpt(ABC):
|
||||
'roi_p2': roi_limits['roi_p2_max'],
|
||||
'roi_p3': roi_limits['roi_p3_max'],
|
||||
}
|
||||
logger.info(f"Max roi table: {round_dict(IHyperOpt.generate_roi_table(p), 5)}")
|
||||
logger.info(f"Max roi table: {round_dict(self.generate_roi_table(p), 3)}")
|
||||
|
||||
return [
|
||||
Integer(roi_limits['roi_t1_min'], roi_limits['roi_t1_max'], name='roi_t1'),
|
||||
Integer(roi_limits['roi_t2_min'], roi_limits['roi_t2_max'], name='roi_t2'),
|
||||
Integer(roi_limits['roi_t3_min'], roi_limits['roi_t3_max'], name='roi_t3'),
|
||||
Real(roi_limits['roi_p1_min'], roi_limits['roi_p1_max'], name='roi_p1'),
|
||||
Real(roi_limits['roi_p2_min'], roi_limits['roi_p2_max'], name='roi_p2'),
|
||||
Real(roi_limits['roi_p3_min'], roi_limits['roi_p3_max'], name='roi_p3'),
|
||||
SKDecimal(roi_limits['roi_p1_min'], roi_limits['roi_p1_max'], decimals=3,
|
||||
name='roi_p1'),
|
||||
SKDecimal(roi_limits['roi_p2_min'], roi_limits['roi_p2_max'], decimals=3,
|
||||
name='roi_p2'),
|
||||
SKDecimal(roi_limits['roi_p3_min'], roi_limits['roi_p3_max'], decimals=3,
|
||||
name='roi_p3'),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def stoploss_space() -> List[Dimension]:
|
||||
def stoploss_space(self) -> List[Dimension]:
|
||||
"""
|
||||
Create a stoploss space.
|
||||
|
||||
@@ -174,11 +150,10 @@ class IHyperOpt(ABC):
|
||||
You may override it in your custom Hyperopt class.
|
||||
"""
|
||||
return [
|
||||
Real(-0.35, -0.02, name='stoploss'),
|
||||
SKDecimal(-0.35, -0.02, decimals=3, name='stoploss'),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def generate_trailing_params(params: Dict) -> Dict:
|
||||
def generate_trailing_params(self, params: Dict) -> Dict:
|
||||
"""
|
||||
Create dict with trailing stop parameters.
|
||||
"""
|
||||
@@ -190,8 +165,7 @@ class IHyperOpt(ABC):
|
||||
'trailing_only_offset_is_reached': params['trailing_only_offset_is_reached'],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def trailing_space() -> List[Dimension]:
|
||||
def trailing_space(self) -> List[Dimension]:
|
||||
"""
|
||||
Create a trailing stoploss space.
|
||||
|
||||
@@ -206,14 +180,14 @@ class IHyperOpt(ABC):
|
||||
# other 'trailing' hyperspace parameters.
|
||||
Categorical([True], name='trailing_stop'),
|
||||
|
||||
Real(0.01, 0.35, name='trailing_stop_positive'),
|
||||
SKDecimal(0.01, 0.35, decimals=3, name='trailing_stop_positive'),
|
||||
|
||||
# 'trailing_stop_positive_offset' should be greater than 'trailing_stop_positive',
|
||||
# so this intermediate parameter is used as the value of the difference between
|
||||
# them. The value of the 'trailing_stop_positive_offset' is constructed in the
|
||||
# generate_trailing_params() method.
|
||||
# This is similar to the hyperspace dimensions used for constructing the ROI tables.
|
||||
Real(0.001, 0.1, name='trailing_stop_positive_offset_p1'),
|
||||
SKDecimal(0.001, 0.1, decimals=3, name='trailing_stop_positive_offset_p1'),
|
||||
|
||||
Categorical([True, False], name='trailing_only_offset_is_reached'),
|
||||
]
|
||||
|
@@ -5,7 +5,7 @@ This module defines the interface for the loss-function for hyperopt
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
from typing import Any, Dict
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
@@ -22,6 +22,7 @@ class IHyperOptLoss(ABC):
|
||||
def hyperopt_loss_function(results: DataFrame, trade_count: int,
|
||||
min_date: datetime, max_date: datetime,
|
||||
config: Dict, processed: Dict[str, DataFrame],
|
||||
backtest_stats: Dict[str, Any],
|
||||
*args, **kwargs) -> float:
|
||||
"""
|
||||
Objective function, returns smaller number for better results
|
||||
|
41
freqtrade/optimize/hyperopt_loss_max_drawdown.py
Normal file
41
freqtrade/optimize/hyperopt_loss_max_drawdown.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""
|
||||
MaxDrawDownHyperOptLoss
|
||||
|
||||
This module defines the alternative HyperOptLoss class which can be used for
|
||||
Hyperoptimization.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.data.btanalysis import calculate_max_drawdown
|
||||
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
||||
|
||||
|
||||
class MaxDrawDownHyperOptLoss(IHyperOptLoss):
|
||||
|
||||
"""
|
||||
Defines the loss function for hyperopt.
|
||||
|
||||
This implementation optimizes for max draw down and profit
|
||||
Less max drawdown more profit -> Lower return value
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def hyperopt_loss_function(results: DataFrame, trade_count: int,
|
||||
min_date: datetime, max_date: datetime,
|
||||
*args, **kwargs) -> float:
|
||||
|
||||
"""
|
||||
Objective function.
|
||||
|
||||
Uses profit ratio weighted max_drawdown when drawdown is available.
|
||||
Otherwise directly optimizes profit ratio.
|
||||
"""
|
||||
total_profit = results['profit_abs'].sum()
|
||||
try:
|
||||
max_drawdown = calculate_max_drawdown(results, value_col='profit_abs')
|
||||
except ValueError:
|
||||
# No losing trade, therefore no drawdown.
|
||||
return -total_profit
|
||||
return -total_profit / max_drawdown[0]
|
@@ -9,23 +9,11 @@ from pandas import DataFrame
|
||||
from freqtrade.optimize.hyperopt import IHyperOptLoss
|
||||
|
||||
|
||||
# This is assumed to be expected avg profit * expected trade count.
|
||||
# For example, for 0.35% avg per trade (or 0.0035 as ratio) and 1100 trades,
|
||||
# expected max profit = 3.85
|
||||
#
|
||||
# Note, this is ratio. 3.85 stated above means 385Σ%, 3.0 means 300Σ%.
|
||||
#
|
||||
# In this implementation it's only used in calculation of the resulting value
|
||||
# of the objective function as a normalization coefficient and does not
|
||||
# represent any limit for profits as in the Freqtrade legacy default loss function.
|
||||
EXPECTED_MAX_PROFIT = 3.0
|
||||
|
||||
|
||||
class OnlyProfitHyperOptLoss(IHyperOptLoss):
|
||||
"""
|
||||
Defines the loss function for hyperopt.
|
||||
|
||||
This implementation takes only profit into account.
|
||||
This implementation takes only absolute profit into account, not looking at any other indicator.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@@ -34,5 +22,5 @@ class OnlyProfitHyperOptLoss(IHyperOptLoss):
|
||||
"""
|
||||
Objective function, returns smaller number for better results.
|
||||
"""
|
||||
total_profit = results['profit_ratio'].sum()
|
||||
return 1 - total_profit / EXPECTED_MAX_PROFIT
|
||||
total_profit = results['profit_abs'].sum()
|
||||
return -1 * total_profit
|
||||
|
502
freqtrade/optimize/hyperopt_tools.py
Executable file
502
freqtrade/optimize/hyperopt_tools.py
Executable file
@@ -0,0 +1,502 @@
|
||||
|
||||
import io
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import rapidjson
|
||||
import tabulate
|
||||
from colorama import Fore, Style
|
||||
from pandas import isna, json_normalize
|
||||
|
||||
from freqtrade.constants import FTHYPT_FILEVERSION, USERPATH_STRATEGIES
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.misc import deep_merge_dicts, round_coin_value, round_dict, safe_value_fallback2
|
||||
from freqtrade.optimize.hyperopt_epoch_filters import hyperopt_filter_epochs
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NON_OPT_PARAM_APPENDIX = " # value loaded from strategy"
|
||||
|
||||
|
||||
def hyperopt_serializer(x):
|
||||
if isinstance(x, np.integer):
|
||||
return int(x)
|
||||
if isinstance(x, np.bool_):
|
||||
return bool(x)
|
||||
|
||||
return str(x)
|
||||
|
||||
|
||||
class HyperoptTools():
|
||||
|
||||
@staticmethod
|
||||
def get_strategy_filename(config: Dict, strategy_name: str) -> Optional[Path]:
|
||||
"""
|
||||
Get Strategy-location (filename) from strategy_name
|
||||
"""
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
directory = Path(config.get('strategy_path', config['user_data_dir'] / USERPATH_STRATEGIES))
|
||||
strategy_objs = StrategyResolver.search_all_objects(directory, False)
|
||||
strategies = [s for s in strategy_objs if s['name'] == strategy_name]
|
||||
if strategies:
|
||||
strategy = strategies[0]
|
||||
|
||||
return Path(strategy['location'])
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def export_params(params, strategy_name: str, filename: Path):
|
||||
"""
|
||||
Generate files
|
||||
"""
|
||||
final_params = deepcopy(params['params_not_optimized'])
|
||||
final_params = deep_merge_dicts(params['params_details'], final_params)
|
||||
final_params = {
|
||||
'strategy_name': strategy_name,
|
||||
'params': final_params,
|
||||
'ft_stratparam_v': 1,
|
||||
'export_time': datetime.now(timezone.utc),
|
||||
}
|
||||
logger.info(f"Dumping parameters to {filename}")
|
||||
rapidjson.dump(final_params, filename.open('w'), indent=2,
|
||||
default=hyperopt_serializer,
|
||||
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def try_export_params(config: Dict[str, Any], strategy_name: str, params: Dict):
|
||||
if params.get(FTHYPT_FILEVERSION, 1) >= 2 and not config.get('disableparamexport', False):
|
||||
# Export parameters ...
|
||||
fn = HyperoptTools.get_strategy_filename(config, strategy_name)
|
||||
if fn:
|
||||
HyperoptTools.export_params(params, strategy_name, fn.with_suffix('.json'))
|
||||
else:
|
||||
logger.warning("Strategy not found, not exporting parameter file.")
|
||||
|
||||
@staticmethod
|
||||
def has_space(config: Dict[str, Any], space: str) -> bool:
|
||||
"""
|
||||
Tell if the space value is contained in the configuration
|
||||
"""
|
||||
# 'trailing' and 'protection spaces are not included in the 'default' set of spaces
|
||||
if space in ('trailing', 'protection'):
|
||||
return any(s in config['spaces'] for s in [space, 'all'])
|
||||
else:
|
||||
return any(s in config['spaces'] for s in [space, 'all', 'default'])
|
||||
|
||||
@staticmethod
|
||||
def _read_results(results_file: Path, batch_size: int = 10) -> Iterator[List[Any]]:
|
||||
"""
|
||||
Stream hyperopt results from file
|
||||
"""
|
||||
import rapidjson
|
||||
logger.info(f"Reading epochs from '{results_file}'")
|
||||
with results_file.open('r') as f:
|
||||
data = []
|
||||
for line in f:
|
||||
data += [rapidjson.loads(line)]
|
||||
if len(data) >= batch_size:
|
||||
yield data
|
||||
data = []
|
||||
yield data
|
||||
|
||||
@staticmethod
|
||||
def _test_hyperopt_results_exist(results_file) -> bool:
|
||||
if results_file.is_file() and results_file.stat().st_size > 0:
|
||||
if results_file.suffix == '.pickle':
|
||||
raise OperationalException(
|
||||
"Legacy hyperopt results are no longer supported."
|
||||
"Please rerun hyperopt or use an older version to load this file."
|
||||
)
|
||||
return True
|
||||
else:
|
||||
# No file found.
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def load_filtered_results(results_file: Path, config: Dict[str, Any]) -> Tuple[List, int]:
|
||||
filteroptions = {
|
||||
'only_best': config.get('hyperopt_list_best', False),
|
||||
'only_profitable': config.get('hyperopt_list_profitable', False),
|
||||
'filter_min_trades': config.get('hyperopt_list_min_trades', 0),
|
||||
'filter_max_trades': config.get('hyperopt_list_max_trades', 0),
|
||||
'filter_min_avg_time': config.get('hyperopt_list_min_avg_time', None),
|
||||
'filter_max_avg_time': config.get('hyperopt_list_max_avg_time', None),
|
||||
'filter_min_avg_profit': config.get('hyperopt_list_min_avg_profit', None),
|
||||
'filter_max_avg_profit': config.get('hyperopt_list_max_avg_profit', None),
|
||||
'filter_min_total_profit': config.get('hyperopt_list_min_total_profit', None),
|
||||
'filter_max_total_profit': config.get('hyperopt_list_max_total_profit', None),
|
||||
'filter_min_objective': config.get('hyperopt_list_min_objective', None),
|
||||
'filter_max_objective': config.get('hyperopt_list_max_objective', None),
|
||||
}
|
||||
if not HyperoptTools._test_hyperopt_results_exist(results_file):
|
||||
# No file found.
|
||||
return [], 0
|
||||
|
||||
epochs = []
|
||||
total_epochs = 0
|
||||
for epochs_tmp in HyperoptTools._read_results(results_file):
|
||||
if total_epochs == 0 and epochs_tmp[0].get('is_best') is None:
|
||||
raise OperationalException(
|
||||
"The file with HyperoptTools results is incompatible with this version "
|
||||
"of Freqtrade and cannot be loaded.")
|
||||
total_epochs += len(epochs_tmp)
|
||||
epochs += hyperopt_filter_epochs(epochs_tmp, filteroptions, log=False)
|
||||
|
||||
logger.info(f"Loaded {total_epochs} previous evaluations from disk.")
|
||||
|
||||
# Final filter run ...
|
||||
epochs = hyperopt_filter_epochs(epochs, filteroptions, log=True)
|
||||
|
||||
return epochs, total_epochs
|
||||
|
||||
@staticmethod
|
||||
def show_epoch_details(results, total_epochs: int, print_json: bool,
|
||||
no_header: bool = False, header_str: str = None) -> None:
|
||||
"""
|
||||
Display details of the hyperopt result
|
||||
"""
|
||||
params = results.get('params_details', {})
|
||||
non_optimized = results.get('params_not_optimized', {})
|
||||
|
||||
# Default header string
|
||||
if header_str is None:
|
||||
header_str = "Best result"
|
||||
|
||||
if not no_header:
|
||||
explanation_str = HyperoptTools._format_explanation_string(results, total_epochs)
|
||||
print(f"\n{header_str}:\n\n{explanation_str}\n")
|
||||
|
||||
if print_json:
|
||||
result_dict: Dict = {}
|
||||
for s in ['buy', 'sell', 'protection', 'roi', 'stoploss', 'trailing']:
|
||||
HyperoptTools._params_update_for_json(result_dict, params, non_optimized, s)
|
||||
print(rapidjson.dumps(result_dict, default=str, number_mode=rapidjson.NM_NATIVE))
|
||||
|
||||
else:
|
||||
HyperoptTools._params_pretty_print(params, 'buy', "Buy hyperspace params:",
|
||||
non_optimized)
|
||||
HyperoptTools._params_pretty_print(params, 'sell', "Sell hyperspace params:",
|
||||
non_optimized)
|
||||
HyperoptTools._params_pretty_print(params, 'protection',
|
||||
"Protection hyperspace params:", non_optimized)
|
||||
HyperoptTools._params_pretty_print(params, 'roi', "ROI table:", non_optimized)
|
||||
HyperoptTools._params_pretty_print(params, 'stoploss', "Stoploss:", non_optimized)
|
||||
HyperoptTools._params_pretty_print(params, 'trailing', "Trailing stop:", non_optimized)
|
||||
|
||||
@staticmethod
|
||||
def _params_update_for_json(result_dict, params, non_optimized, space: str) -> None:
|
||||
if (space in params) or (space in non_optimized):
|
||||
space_params = HyperoptTools._space_params(params, space)
|
||||
space_non_optimized = HyperoptTools._space_params(non_optimized, space)
|
||||
all_space_params = space_params
|
||||
|
||||
# Merge non optimized params if there are any
|
||||
if len(space_non_optimized) > 0:
|
||||
all_space_params = {**space_params, **space_non_optimized}
|
||||
|
||||
if space in ['buy', 'sell']:
|
||||
result_dict.setdefault('params', {}).update(all_space_params)
|
||||
elif space == 'roi':
|
||||
# Convert keys in min_roi dict to strings because
|
||||
# rapidjson cannot dump dicts with integer keys...
|
||||
result_dict['minimal_roi'] = {str(k): v for k, v in all_space_params.items()}
|
||||
else: # 'stoploss', 'trailing'
|
||||
result_dict.update(all_space_params)
|
||||
|
||||
@staticmethod
|
||||
def _params_pretty_print(params, space: str, header: str, non_optimized={}) -> None:
|
||||
if space in params or space in non_optimized:
|
||||
space_params = HyperoptTools._space_params(params, space, 5)
|
||||
no_params = HyperoptTools._space_params(non_optimized, space, 5)
|
||||
appendix = ''
|
||||
if not space_params and not no_params:
|
||||
# No parameters - don't print
|
||||
return
|
||||
if not space_params:
|
||||
# Not optimized parameters - append string
|
||||
appendix = NON_OPT_PARAM_APPENDIX
|
||||
|
||||
result = f"\n# {header}\n"
|
||||
if space == "stoploss":
|
||||
stoploss = safe_value_fallback2(space_params, no_params, space, space)
|
||||
result += (f"stoploss = {stoploss}{appendix}")
|
||||
|
||||
elif space == "roi":
|
||||
result = result[:-1] + f'{appendix}\n'
|
||||
minimal_roi_result = rapidjson.dumps({
|
||||
str(k): v for k, v in (space_params or no_params).items()
|
||||
}, default=str, indent=4, number_mode=rapidjson.NM_NATIVE)
|
||||
result += f"minimal_roi = {minimal_roi_result}"
|
||||
elif space == "trailing":
|
||||
for k, v in (space_params or no_params).items():
|
||||
result += f"{k} = {v}{appendix}\n"
|
||||
|
||||
else:
|
||||
# Buy / sell parameters
|
||||
|
||||
result += f"{space}_params = {HyperoptTools._pprint_dict(space_params, no_params)}"
|
||||
|
||||
result = result.replace("\n", "\n ")
|
||||
print(result)
|
||||
|
||||
@staticmethod
|
||||
def _space_params(params, space: str, r: int = None) -> Dict:
|
||||
d = params.get(space)
|
||||
if d:
|
||||
# Round floats to `r` digits after the decimal point if requested
|
||||
return round_dict(d, r) if r else d
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _pprint_dict(params, non_optimized, indent: int = 4):
|
||||
"""
|
||||
Pretty-print hyperopt results (based on 2 dicts - with add. comment)
|
||||
"""
|
||||
p = params.copy()
|
||||
p.update(non_optimized)
|
||||
result = '{\n'
|
||||
|
||||
for k, param in p.items():
|
||||
result += " " * indent + f'"{k}": '
|
||||
result += f'"{param}",' if isinstance(param, str) else f'{param},'
|
||||
if k in non_optimized:
|
||||
result += NON_OPT_PARAM_APPENDIX
|
||||
result += "\n"
|
||||
result += '}'
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def is_best_loss(results, current_best_loss: float) -> bool:
|
||||
return bool(results['loss'] < current_best_loss)
|
||||
|
||||
@staticmethod
|
||||
def format_results_explanation_string(results_metrics: Dict, stake_currency: str) -> str:
|
||||
"""
|
||||
Return the formatted results explanation in a string
|
||||
"""
|
||||
return (f"{results_metrics['total_trades']:6d} trades. "
|
||||
f"{results_metrics['wins']}/{results_metrics['draws']}"
|
||||
f"/{results_metrics['losses']} Wins/Draws/Losses. "
|
||||
f"Avg profit {results_metrics['profit_mean'] * 100: 6.2f}%. "
|
||||
f"Median profit {results_metrics['profit_median'] * 100: 6.2f}%. "
|
||||
f"Total profit {results_metrics['profit_total_abs']: 11.8f} {stake_currency} "
|
||||
f"({results_metrics['profit_total'] * 100: 7.2f}%). "
|
||||
f"Avg duration {results_metrics['holding_avg']} min."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _format_explanation_string(results, total_epochs) -> str:
|
||||
return (("*" if results['is_initial_point'] else " ") +
|
||||
f"{results['current_epoch']:5d}/{total_epochs}: " +
|
||||
f"{results['results_explanation']} " +
|
||||
f"Objective: {results['loss']:.5f}")
|
||||
|
||||
@staticmethod
|
||||
def prepare_trials_columns(trials: pd.DataFrame, legacy_mode: bool,
|
||||
has_drawdown: bool) -> pd.DataFrame:
|
||||
trials['Best'] = ''
|
||||
|
||||
if 'results_metrics.winsdrawslosses' not in trials.columns:
|
||||
# Ensure compatibility with older versions of hyperopt results
|
||||
trials['results_metrics.winsdrawslosses'] = 'N/A'
|
||||
|
||||
if not has_drawdown:
|
||||
# Ensure compatibility with older versions of hyperopt results
|
||||
trials['results_metrics.max_drawdown_abs'] = None
|
||||
trials['results_metrics.max_drawdown'] = None
|
||||
|
||||
if not legacy_mode:
|
||||
# New mode, using backtest result for metrics
|
||||
trials['results_metrics.winsdrawslosses'] = trials.apply(
|
||||
lambda x: f"{x['results_metrics.wins']} {x['results_metrics.draws']:>4} "
|
||||
f"{x['results_metrics.losses']:>4}", axis=1)
|
||||
trials = trials[['Best', 'current_epoch', 'results_metrics.total_trades',
|
||||
'results_metrics.winsdrawslosses',
|
||||
'results_metrics.profit_mean', 'results_metrics.profit_total_abs',
|
||||
'results_metrics.profit_total', 'results_metrics.holding_avg',
|
||||
'results_metrics.max_drawdown', 'results_metrics.max_drawdown_abs',
|
||||
'loss', 'is_initial_point', 'is_best']]
|
||||
|
||||
else:
|
||||
# Legacy mode
|
||||
trials = trials[['Best', 'current_epoch', 'results_metrics.trade_count',
|
||||
'results_metrics.winsdrawslosses', 'results_metrics.avg_profit',
|
||||
'results_metrics.total_profit', 'results_metrics.profit',
|
||||
'results_metrics.duration', 'results_metrics.max_drawdown',
|
||||
'results_metrics.max_drawdown_abs', 'loss', 'is_initial_point',
|
||||
'is_best']]
|
||||
|
||||
trials.columns = ['Best', 'Epoch', 'Trades', ' Win Draw Loss', 'Avg profit',
|
||||
'Total profit', 'Profit', 'Avg duration', 'Max Drawdown',
|
||||
'max_drawdown_abs', 'Objective', 'is_initial_point', 'is_best']
|
||||
|
||||
return trials
|
||||
|
||||
@staticmethod
|
||||
def get_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
print_colorized: bool, remove_header: int) -> str:
|
||||
"""
|
||||
Log result table
|
||||
"""
|
||||
if not results:
|
||||
return ''
|
||||
|
||||
tabulate.PRESERVE_WHITESPACE = True
|
||||
trials = json_normalize(results, max_level=1)
|
||||
|
||||
legacy_mode = 'results_metrics.total_trades' not in trials
|
||||
has_drawdown = 'results_metrics.max_drawdown_abs' in trials.columns
|
||||
|
||||
trials = HyperoptTools.prepare_trials_columns(trials, legacy_mode, has_drawdown)
|
||||
|
||||
trials['is_profit'] = False
|
||||
trials.loc[trials['is_initial_point'], 'Best'] = '* '
|
||||
trials.loc[trials['is_best'], 'Best'] = 'Best'
|
||||
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
||||
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
||||
trials['Trades'] = trials['Trades'].astype(str)
|
||||
perc_multi = 1 if legacy_mode else 100
|
||||
trials['Epoch'] = trials['Epoch'].apply(
|
||||
lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs)
|
||||
)
|
||||
trials['Avg profit'] = trials['Avg profit'].apply(
|
||||
lambda x: f'{x * perc_multi:,.2f}%'.rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ')
|
||||
)
|
||||
trials['Avg duration'] = trials['Avg duration'].apply(
|
||||
lambda x: f'{x:,.1f} m'.rjust(7, ' ') if isinstance(x, float) else f"{x}"
|
||||
if not isna(x) else "--".rjust(7, ' ')
|
||||
)
|
||||
trials['Objective'] = trials['Objective'].apply(
|
||||
lambda x: f'{x:,.5f}'.rjust(8, ' ') if x != 100000 else "N/A".rjust(8, ' ')
|
||||
)
|
||||
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
if has_drawdown:
|
||||
trials['Max Drawdown'] = trials.apply(
|
||||
lambda x: '{} {}'.format(
|
||||
round_coin_value(x['max_drawdown_abs'], stake_currency),
|
||||
'({:,.2f}%)'.format(x['Max Drawdown'] * perc_multi).rjust(10, ' ')
|
||||
).rjust(25 + len(stake_currency))
|
||||
if x['Max Drawdown'] != 0.0 else '--'.rjust(25 + len(stake_currency)),
|
||||
axis=1
|
||||
)
|
||||
else:
|
||||
trials = trials.drop(columns=['Max Drawdown'])
|
||||
|
||||
trials = trials.drop(columns=['max_drawdown_abs'])
|
||||
|
||||
trials['Profit'] = trials.apply(
|
||||
lambda x: '{} {}'.format(
|
||||
round_coin_value(x['Total profit'], stake_currency),
|
||||
'({:,.2f}%)'.format(x['Profit'] * perc_multi).rjust(10, ' ')
|
||||
).rjust(25+len(stake_currency))
|
||||
if x['Total profit'] != 0.0 else '--'.rjust(25+len(stake_currency)),
|
||||
axis=1
|
||||
)
|
||||
trials = trials.drop(columns=['Total profit'])
|
||||
|
||||
if print_colorized:
|
||||
for i in range(len(trials)):
|
||||
if trials.loc[i]['is_profit']:
|
||||
for j in range(len(trials.loc[i])-3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Fore.GREEN,
|
||||
str(trials.loc[i][j]), Fore.RESET)
|
||||
if trials.loc[i]['is_best'] and highlight_best:
|
||||
for j in range(len(trials.loc[i])-3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Style.BRIGHT,
|
||||
str(trials.loc[i][j]), Style.RESET_ALL)
|
||||
|
||||
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
|
||||
if remove_header > 0:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='orgtbl',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
|
||||
table = table.split("\n", remove_header)[remove_header]
|
||||
elif remove_header < 0:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='psql',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
table = "\n".join(table.split("\n")[0:remove_header])
|
||||
else:
|
||||
table = tabulate.tabulate(
|
||||
trials.to_dict(orient='list'), tablefmt='psql',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
return table
|
||||
|
||||
@staticmethod
|
||||
def export_csv_file(config: dict, results: list, csv_file: str) -> None:
|
||||
"""
|
||||
Log result to csv-file
|
||||
"""
|
||||
if not results:
|
||||
return
|
||||
|
||||
# Verification for overwrite
|
||||
if Path(csv_file).is_file():
|
||||
logger.error(f"CSV file already exists: {csv_file}")
|
||||
return
|
||||
|
||||
try:
|
||||
io.open(csv_file, 'w+').close()
|
||||
except IOError:
|
||||
logger.error(f"Failed to create CSV file: {csv_file}")
|
||||
return
|
||||
|
||||
trials = json_normalize(results, max_level=1)
|
||||
trials['Best'] = ''
|
||||
trials['Stake currency'] = config['stake_currency']
|
||||
|
||||
base_metrics = ['Best', 'current_epoch', 'results_metrics.total_trades',
|
||||
'results_metrics.profit_mean', 'results_metrics.profit_median',
|
||||
'results_metrics.profit_total',
|
||||
'Stake currency',
|
||||
'results_metrics.profit_total_abs', 'results_metrics.holding_avg',
|
||||
'loss', 'is_initial_point', 'is_best']
|
||||
perc_multi = 100
|
||||
|
||||
param_metrics = [("params_dict."+param) for param in results[0]['params_dict'].keys()]
|
||||
trials = trials[base_metrics + param_metrics]
|
||||
|
||||
base_columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Median profit', 'Total profit',
|
||||
'Stake currency', 'Profit', 'Avg duration', 'Objective',
|
||||
'is_initial_point', 'is_best']
|
||||
param_columns = list(results[0]['params_dict'].keys())
|
||||
trials.columns = base_columns + param_columns
|
||||
|
||||
trials['is_profit'] = False
|
||||
trials.loc[trials['is_initial_point'], 'Best'] = '*'
|
||||
trials.loc[trials['is_best'], 'Best'] = 'Best'
|
||||
trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best'
|
||||
trials.loc[trials['Total profit'] > 0, 'is_profit'] = True
|
||||
trials['Epoch'] = trials['Epoch'].astype(str)
|
||||
trials['Trades'] = trials['Trades'].astype(str)
|
||||
trials['Median profit'] = trials['Median profit'] * perc_multi
|
||||
|
||||
trials['Total profit'] = trials['Total profit'].apply(
|
||||
lambda x: f'{x:,.8f}' if x != 0.0 else ""
|
||||
)
|
||||
trials['Profit'] = trials['Profit'].apply(
|
||||
lambda x: f'{x:,.2f}' if not isna(x) else ""
|
||||
)
|
||||
trials['Avg profit'] = trials['Avg profit'].apply(
|
||||
lambda x: f'{x * perc_multi:,.2f}%' if not isna(x) else ""
|
||||
)
|
||||
trials['Objective'] = trials['Objective'].apply(
|
||||
lambda x: f'{x:,.5f}' if x != 100000 else ""
|
||||
)
|
||||
|
||||
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit'])
|
||||
trials.to_csv(csv_file, index=False, header=True, mode='w', encoding='UTF-8')
|
||||
logger.info(f"CSV file created: {csv_file}")
|
@@ -3,7 +3,6 @@ from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from arrow import Arrow
|
||||
from numpy import int64
|
||||
from pandas import DataFrame
|
||||
from tabulate import tabulate
|
||||
@@ -22,7 +21,7 @@ def store_backtest_stats(recordfilename: Path, stats: Dict[str, DataFrame]) -> N
|
||||
Stores backtest results
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for diectories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
@@ -32,7 +31,7 @@ def store_backtest_stats(recordfilename: Path, stats: Dict[str, DataFrame]) -> N
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent,
|
||||
f'{recordfilename.stem}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
|
||||
).with_suffix(recordfilename.suffix)
|
||||
).with_suffix(recordfilename.suffix)
|
||||
file_dump_json(filename, stats)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
@@ -44,7 +43,7 @@ def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
"""
|
||||
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
|
||||
'.2f', 'd', 'd', 'd', 'd']
|
||||
'.2f', 'd', 's', 's']
|
||||
|
||||
|
||||
def _get_line_header(first_column: str, stake_currency: str) -> List[str]:
|
||||
@@ -53,7 +52,17 @@ def _get_line_header(first_column: str, stake_currency: str) -> List[str]:
|
||||
"""
|
||||
return [first_column, 'Buys', 'Avg Profit %', 'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
|
||||
'Wins', 'Draws', 'Losses']
|
||||
'Win Draw Loss Win%']
|
||||
|
||||
|
||||
def _generate_wins_draws_losses(wins, draws, losses):
|
||||
if wins > 0 and losses == 0:
|
||||
wl_ratio = '100'
|
||||
elif wins == 0:
|
||||
wl_ratio = '0'
|
||||
else:
|
||||
wl_ratio = f'{100.0 / (wins + draws + losses) * wins:.1f}' if losses > 0 else '100'
|
||||
return f'{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}'
|
||||
|
||||
|
||||
def _generate_result_line(result: DataFrame, starting_balance: int, first_column: str) -> Dict:
|
||||
@@ -110,6 +119,9 @@ def generate_pair_metrics(data: Dict[str, Dict], stake_currency: str, starting_b
|
||||
|
||||
tabular_data.append(_generate_result_line(result, starting_balance, pair))
|
||||
|
||||
# Sort by total profit %:
|
||||
tabular_data = sorted(tabular_data, key=lambda k: k['profit_total_abs'], reverse=True)
|
||||
|
||||
# Append Total
|
||||
tabular_data.append(_generate_result_line(results, starting_balance, 'TOTAL'))
|
||||
return tabular_data
|
||||
@@ -150,7 +162,7 @@ def generate_sell_reason_stats(max_open_trades: int, results: DataFrame) -> List
|
||||
return tabular_data
|
||||
|
||||
|
||||
def generate_strategy_metrics(all_results: Dict) -> List[Dict]:
|
||||
def generate_strategy_comparison(all_results: Dict) -> List[Dict]:
|
||||
"""
|
||||
Generate summary per strategy
|
||||
:param all_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
@@ -162,6 +174,17 @@ def generate_strategy_metrics(all_results: Dict) -> List[Dict]:
|
||||
tabular_data.append(_generate_result_line(
|
||||
results['results'], results['config']['dry_run_wallet'], strategy)
|
||||
)
|
||||
try:
|
||||
max_drawdown_per, _, _, _, _ = calculate_max_drawdown(results['results'],
|
||||
value_col='profit_ratio')
|
||||
max_drawdown_abs, _, _, _, _ = calculate_max_drawdown(results['results'],
|
||||
value_col='profit_abs')
|
||||
except ValueError:
|
||||
max_drawdown_per = 0
|
||||
max_drawdown_abs = 0
|
||||
tabular_data[-1]['max_drawdown_per'] = round(max_drawdown_per * 100, 2)
|
||||
tabular_data[-1]['max_drawdown_abs'] = \
|
||||
round_coin_value(max_drawdown_abs, results['config']['stake_currency'], False)
|
||||
return tabular_data
|
||||
|
||||
|
||||
@@ -213,7 +236,44 @@ def generate_days_breakdown_stats(results: DataFrame, starting_balance: int) ->
|
||||
return days_stats
|
||||
|
||||
|
||||
def generate_trading_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
""" Generate overall trade statistics """
|
||||
if len(results) == 0:
|
||||
return {
|
||||
'wins': 0,
|
||||
'losses': 0,
|
||||
'draws': 0,
|
||||
'holding_avg': timedelta(),
|
||||
'winner_holding_avg': timedelta(),
|
||||
'loser_holding_avg': timedelta(),
|
||||
}
|
||||
|
||||
winning_trades = results.loc[results['profit_ratio'] > 0]
|
||||
draw_trades = results.loc[results['profit_ratio'] == 0]
|
||||
losing_trades = results.loc[results['profit_ratio'] < 0]
|
||||
|
||||
holding_avg = (timedelta(minutes=round(results['trade_duration'].mean()))
|
||||
if not results.empty else timedelta())
|
||||
winner_holding_avg = (timedelta(minutes=round(winning_trades['trade_duration'].mean()))
|
||||
if not winning_trades.empty else timedelta())
|
||||
loser_holding_avg = (timedelta(minutes=round(losing_trades['trade_duration'].mean()))
|
||||
if not losing_trades.empty else timedelta())
|
||||
|
||||
return {
|
||||
'wins': len(winning_trades),
|
||||
'losses': len(losing_trades),
|
||||
'draws': len(draw_trades),
|
||||
'holding_avg': holding_avg,
|
||||
'holding_avg_s': holding_avg.total_seconds(),
|
||||
'winner_holding_avg': winner_holding_avg,
|
||||
'winner_holding_avg_s': winner_holding_avg.total_seconds(),
|
||||
'loser_holding_avg': loser_holding_avg,
|
||||
'loser_holding_avg_s': loser_holding_avg.total_seconds(),
|
||||
}
|
||||
|
||||
|
||||
def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
""" Generate daily statistics """
|
||||
if len(results) == 0:
|
||||
return {
|
||||
'backtest_best_day': 0,
|
||||
@@ -223,8 +283,7 @@ def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
'winning_days': 0,
|
||||
'draw_days': 0,
|
||||
'losing_days': 0,
|
||||
'winner_holding_avg': timedelta(),
|
||||
'loser_holding_avg': timedelta(),
|
||||
'daily_profit_list': [],
|
||||
}
|
||||
daily_profit_rel = results.resample('1d', on='close_date')['profit_ratio'].sum()
|
||||
daily_profit = results.resample('1d', on='close_date')['profit_abs'].sum().round(10)
|
||||
@@ -235,9 +294,7 @@ def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
winning_days = sum(daily_profit > 0)
|
||||
draw_days = sum(daily_profit == 0)
|
||||
losing_days = sum(daily_profit < 0)
|
||||
|
||||
winning_trades = results.loc[results['profit_ratio'] > 0]
|
||||
losing_trades = results.loc[results['profit_ratio'] < 0]
|
||||
daily_profit_list = [(str(idx.date()), val) for idx, val in daily_profit.iteritems()]
|
||||
|
||||
return {
|
||||
'backtest_best_day': best_rel,
|
||||
@@ -247,16 +304,159 @@ def generate_daily_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
'winning_days': winning_days,
|
||||
'draw_days': draw_days,
|
||||
'losing_days': losing_days,
|
||||
'winner_holding_avg': (timedelta(minutes=round(winning_trades['trade_duration'].mean()))
|
||||
if not winning_trades.empty else timedelta()),
|
||||
'loser_holding_avg': (timedelta(minutes=round(losing_trades['trade_duration'].mean()))
|
||||
if not losing_trades.empty else timedelta()),
|
||||
'daily_profit': daily_profit_list,
|
||||
}
|
||||
|
||||
|
||||
def generate_strategy_stats(btdata: Dict[str, DataFrame],
|
||||
strategy: str,
|
||||
content: Dict[str, Any],
|
||||
min_date: datetime, max_date: datetime,
|
||||
market_change: float
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
:param btdata: Backtest data
|
||||
:param strategy: Strategy name
|
||||
:param content: Backtest result data in the format:
|
||||
{'results: results, 'config: config}}.
|
||||
:param min_date: Backtest start date
|
||||
:param max_date: Backtest end date
|
||||
:param market_change: float indicating the market change
|
||||
:return: Dictionary containing results per strategy and a strategy summary.
|
||||
"""
|
||||
results: Dict[str, DataFrame] = content['results']
|
||||
if not isinstance(results, DataFrame):
|
||||
return {}
|
||||
config = content['config']
|
||||
max_open_trades = min(config['max_open_trades'], len(btdata.keys()))
|
||||
starting_balance = config['dry_run_wallet']
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
pair_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
||||
starting_balance=starting_balance,
|
||||
results=results, skip_nan=False)
|
||||
sell_reason_stats = generate_sell_reason_stats(max_open_trades=max_open_trades,
|
||||
results=results)
|
||||
left_open_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
||||
starting_balance=starting_balance,
|
||||
results=results.loc[results['is_open']],
|
||||
skip_nan=True)
|
||||
days_breakdown_stats = generate_days_breakdown_stats(
|
||||
results=results, starting_balance=starting_balance)
|
||||
daily_stats = generate_daily_stats(results)
|
||||
trade_stats = generate_trading_stats(results)
|
||||
best_pair = max([pair for pair in pair_results if pair['key'] != 'TOTAL'],
|
||||
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
|
||||
worst_pair = min([pair for pair in pair_results if pair['key'] != 'TOTAL'],
|
||||
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
|
||||
if not results.empty:
|
||||
results['open_timestamp'] = results['open_date'].view(int64) // 1e6
|
||||
results['close_timestamp'] = results['close_date'].view(int64) // 1e6
|
||||
|
||||
backtest_days = (max_date - min_date).days
|
||||
strat_stats = {
|
||||
'trades': results.to_dict(orient='records'),
|
||||
'locks': [lock.to_json() for lock in content['locks']],
|
||||
'best_pair': best_pair,
|
||||
'worst_pair': worst_pair,
|
||||
'results_per_pair': pair_results,
|
||||
'sell_reason_summary': sell_reason_stats,
|
||||
'left_open_trades': left_open_results,
|
||||
'days_breakdown_stats': days_breakdown_stats,
|
||||
|
||||
'total_trades': len(results),
|
||||
'total_volume': float(results['stake_amount'].sum()),
|
||||
'avg_stake_amount': results['stake_amount'].mean() if len(results) > 0 else 0,
|
||||
'profit_mean': results['profit_ratio'].mean() if len(results) > 0 else 0,
|
||||
'profit_median': results['profit_ratio'].median() if len(results) > 0 else 0,
|
||||
'profit_total': results['profit_abs'].sum() / starting_balance,
|
||||
'profit_total_abs': results['profit_abs'].sum(),
|
||||
'backtest_start': min_date.strftime(DATETIME_PRINT_FORMAT),
|
||||
'backtest_start_ts': int(min_date.timestamp() * 1000),
|
||||
'backtest_end': max_date.strftime(DATETIME_PRINT_FORMAT),
|
||||
'backtest_end_ts': int(max_date.timestamp() * 1000),
|
||||
'backtest_days': backtest_days,
|
||||
|
||||
'backtest_run_start_ts': content['backtest_start_time'],
|
||||
'backtest_run_end_ts': content['backtest_end_time'],
|
||||
|
||||
'trades_per_day': round(len(results) / backtest_days, 2) if backtest_days > 0 else 0,
|
||||
'market_change': market_change,
|
||||
'pairlist': list(btdata.keys()),
|
||||
'stake_amount': config['stake_amount'],
|
||||
'stake_currency': config['stake_currency'],
|
||||
'stake_currency_decimals': decimals_per_coin(config['stake_currency']),
|
||||
'starting_balance': starting_balance,
|
||||
'dry_run_wallet': starting_balance,
|
||||
'final_balance': content['final_balance'],
|
||||
'rejected_signals': content['rejected_signals'],
|
||||
'max_open_trades': max_open_trades,
|
||||
'max_open_trades_setting': (config['max_open_trades']
|
||||
if config['max_open_trades'] != float('inf') else -1),
|
||||
'timeframe': config['timeframe'],
|
||||
'timeframe_detail': config.get('timeframe_detail', ''),
|
||||
'timerange': config.get('timerange', ''),
|
||||
'enable_protections': config.get('enable_protections', False),
|
||||
'strategy_name': strategy,
|
||||
# Parameters relevant for backtesting
|
||||
'stoploss': config['stoploss'],
|
||||
'trailing_stop': config.get('trailing_stop', False),
|
||||
'trailing_stop_positive': config.get('trailing_stop_positive'),
|
||||
'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset', 0.0),
|
||||
'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached', False),
|
||||
'use_custom_stoploss': config.get('use_custom_stoploss', False),
|
||||
'minimal_roi': config['minimal_roi'],
|
||||
'use_sell_signal': config['use_sell_signal'],
|
||||
'sell_profit_only': config['sell_profit_only'],
|
||||
'sell_profit_offset': config['sell_profit_offset'],
|
||||
'ignore_roi_if_buy_signal': config['ignore_roi_if_buy_signal'],
|
||||
**daily_stats,
|
||||
**trade_stats
|
||||
}
|
||||
|
||||
try:
|
||||
max_drawdown, _, _, _, _ = calculate_max_drawdown(
|
||||
results, value_col='profit_ratio')
|
||||
drawdown_abs, drawdown_start, drawdown_end, high_val, low_val = calculate_max_drawdown(
|
||||
results, value_col='profit_abs')
|
||||
strat_stats.update({
|
||||
'max_drawdown': max_drawdown,
|
||||
'max_drawdown_abs': drawdown_abs,
|
||||
'drawdown_start': drawdown_start.strftime(DATETIME_PRINT_FORMAT),
|
||||
'drawdown_start_ts': drawdown_start.timestamp() * 1000,
|
||||
'drawdown_end': drawdown_end.strftime(DATETIME_PRINT_FORMAT),
|
||||
'drawdown_end_ts': drawdown_end.timestamp() * 1000,
|
||||
|
||||
'max_drawdown_low': low_val,
|
||||
'max_drawdown_high': high_val,
|
||||
})
|
||||
|
||||
csum_min, csum_max = calculate_csum(results, starting_balance)
|
||||
strat_stats.update({
|
||||
'csum_min': csum_min,
|
||||
'csum_max': csum_max
|
||||
})
|
||||
|
||||
except ValueError:
|
||||
strat_stats.update({
|
||||
'max_drawdown': 0.0,
|
||||
'max_drawdown_abs': 0.0,
|
||||
'max_drawdown_low': 0.0,
|
||||
'max_drawdown_high': 0.0,
|
||||
'drawdown_start': datetime(1970, 1, 1, tzinfo=timezone.utc),
|
||||
'drawdown_start_ts': 0,
|
||||
'drawdown_end': datetime(1970, 1, 1, tzinfo=timezone.utc),
|
||||
'drawdown_end_ts': 0,
|
||||
'csum_min': 0,
|
||||
'csum_max': 0
|
||||
})
|
||||
|
||||
return strat_stats
|
||||
|
||||
|
||||
def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
||||
all_results: Dict[str, Dict[str, Union[DataFrame, Dict]]],
|
||||
min_date: Arrow, max_date: Arrow
|
||||
min_date: datetime, max_date: datetime
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
:param btdata: Backtest data
|
||||
@@ -264,135 +464,17 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
||||
{ Strategy: {'results: results, 'config: config}}.
|
||||
:param min_date: Backtest start date
|
||||
:param max_date: Backtest end date
|
||||
:return:
|
||||
Dictionary containing results per strategy and a stratgy summary.
|
||||
:return: Dictionary containing results per strategy and a strategy summary.
|
||||
"""
|
||||
result: Dict[str, Any] = {'strategy': {}}
|
||||
market_change = calculate_market_change(btdata, 'close')
|
||||
|
||||
for strategy, content in all_results.items():
|
||||
results: Dict[str, DataFrame] = content['results']
|
||||
if not isinstance(results, DataFrame):
|
||||
continue
|
||||
config = content['config']
|
||||
max_open_trades = min(config['max_open_trades'], len(btdata.keys()))
|
||||
starting_balance = config['dry_run_wallet']
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
pair_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
||||
starting_balance=starting_balance,
|
||||
results=results, skip_nan=False)
|
||||
sell_reason_stats = generate_sell_reason_stats(max_open_trades=max_open_trades,
|
||||
results=results)
|
||||
left_open_results = generate_pair_metrics(btdata, stake_currency=stake_currency,
|
||||
starting_balance=starting_balance,
|
||||
results=results.loc[results['is_open']],
|
||||
skip_nan=True)
|
||||
days_breakdown_stats = generate_days_breakdown_stats(results=results,
|
||||
starting_balance=starting_balance)
|
||||
daily_stats = generate_daily_stats(results)
|
||||
best_pair = max([pair for pair in pair_results if pair['key'] != 'TOTAL'],
|
||||
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
|
||||
worst_pair = min([pair for pair in pair_results if pair['key'] != 'TOTAL'],
|
||||
key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None
|
||||
results['open_timestamp'] = results['open_date'].astype(int64) // 1e6
|
||||
results['close_timestamp'] = results['close_date'].astype(int64) // 1e6
|
||||
|
||||
backtest_days = (max_date - min_date).days
|
||||
strat_stats = {
|
||||
'trades': results.to_dict(orient='records'),
|
||||
'locks': [lock.to_json() for lock in content['locks']],
|
||||
'best_pair': best_pair,
|
||||
'worst_pair': worst_pair,
|
||||
'results_per_pair': pair_results,
|
||||
'sell_reason_summary': sell_reason_stats,
|
||||
'left_open_trades': left_open_results,
|
||||
'days_breakdown_stats': days_breakdown_stats,
|
||||
'total_trades': len(results),
|
||||
'total_volume': float(results['stake_amount'].sum()),
|
||||
'avg_stake_amount': results['stake_amount'].mean() if len(results) > 0 else 0,
|
||||
'profit_mean': results['profit_ratio'].mean() if len(results) > 0 else 0,
|
||||
'profit_total': results['profit_abs'].sum() / starting_balance,
|
||||
'profit_total_abs': results['profit_abs'].sum(),
|
||||
'backtest_start': min_date.datetime,
|
||||
'backtest_start_ts': min_date.int_timestamp * 1000,
|
||||
'backtest_end': max_date.datetime,
|
||||
'backtest_end_ts': max_date.int_timestamp * 1000,
|
||||
'backtest_days': backtest_days,
|
||||
|
||||
'backtest_run_start_ts': content['backtest_start_time'],
|
||||
'backtest_run_end_ts': content['backtest_end_time'],
|
||||
|
||||
'trades_per_day': round(len(results) / backtest_days, 2) if backtest_days > 0 else 0,
|
||||
'market_change': market_change,
|
||||
'pairlist': list(btdata.keys()),
|
||||
'stake_amount': config['stake_amount'],
|
||||
'stake_currency': config['stake_currency'],
|
||||
'stake_currency_decimals': decimals_per_coin(config['stake_currency']),
|
||||
'starting_balance': starting_balance,
|
||||
'dry_run_wallet': starting_balance,
|
||||
'final_balance': content['final_balance'],
|
||||
'max_open_trades': max_open_trades,
|
||||
'max_open_trades_setting': (config['max_open_trades']
|
||||
if config['max_open_trades'] != float('inf') else -1),
|
||||
'timeframe': config['timeframe'],
|
||||
'timerange': config.get('timerange', ''),
|
||||
'enable_protections': config.get('enable_protections', False),
|
||||
'strategy_name': strategy,
|
||||
# Parameters relevant for backtesting
|
||||
'stoploss': config['stoploss'],
|
||||
'trailing_stop': config.get('trailing_stop', False),
|
||||
'trailing_stop_positive': config.get('trailing_stop_positive'),
|
||||
'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset', 0.0),
|
||||
'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached', False),
|
||||
'use_custom_stoploss': config.get('use_custom_stoploss', False),
|
||||
'minimal_roi': config['minimal_roi'],
|
||||
'use_sell_signal': config['ask_strategy']['use_sell_signal'],
|
||||
'sell_profit_only': config['ask_strategy']['sell_profit_only'],
|
||||
'sell_profit_offset': config['ask_strategy']['sell_profit_offset'],
|
||||
'ignore_roi_if_buy_signal': config['ask_strategy']['ignore_roi_if_buy_signal'],
|
||||
**daily_stats,
|
||||
}
|
||||
strat_stats = generate_strategy_stats(btdata, strategy, content,
|
||||
min_date, max_date, market_change=market_change)
|
||||
result['strategy'][strategy] = strat_stats
|
||||
|
||||
try:
|
||||
max_drawdown, _, _, _, _ = calculate_max_drawdown(
|
||||
results, value_col='profit_ratio')
|
||||
drawdown_abs, drawdown_start, drawdown_end, high_val, low_val = calculate_max_drawdown(
|
||||
results, value_col='profit_abs')
|
||||
strat_stats.update({
|
||||
'max_drawdown': max_drawdown,
|
||||
'max_drawdown_abs': drawdown_abs,
|
||||
'drawdown_start': drawdown_start,
|
||||
'drawdown_start_ts': drawdown_start.timestamp() * 1000,
|
||||
'drawdown_end': drawdown_end,
|
||||
'drawdown_end_ts': drawdown_end.timestamp() * 1000,
|
||||
|
||||
'max_drawdown_low': low_val,
|
||||
'max_drawdown_high': high_val,
|
||||
})
|
||||
|
||||
csum_min, csum_max = calculate_csum(results, starting_balance)
|
||||
strat_stats.update({
|
||||
'csum_min': csum_min,
|
||||
'csum_max': csum_max
|
||||
})
|
||||
|
||||
except ValueError:
|
||||
strat_stats.update({
|
||||
'max_drawdown': 0.0,
|
||||
'max_drawdown_abs': 0.0,
|
||||
'max_drawdown_low': 0.0,
|
||||
'max_drawdown_high': 0.0,
|
||||
'drawdown_start': datetime(1970, 1, 1, tzinfo=timezone.utc),
|
||||
'drawdown_start_ts': 0,
|
||||
'drawdown_end': datetime(1970, 1, 1, tzinfo=timezone.utc),
|
||||
'drawdown_end_ts': 0,
|
||||
'csum_min': 0,
|
||||
'csum_max': 0
|
||||
})
|
||||
|
||||
strategy_results = generate_strategy_metrics(all_results=all_results)
|
||||
strategy_results = generate_strategy_comparison(all_results=all_results)
|
||||
|
||||
result['strategy_comparison'] = strategy_results
|
||||
|
||||
@@ -415,7 +497,8 @@ def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: st
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'], t['wins'], t['draws'], t['losses']
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
_generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
|
||||
] for t in pair_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
@@ -432,9 +515,7 @@ def text_table_sell_reason(sell_reason_stats: List[Dict[str, Any]], stake_curren
|
||||
headers = [
|
||||
'Sell Reason',
|
||||
'Sells',
|
||||
'Wins',
|
||||
'Draws',
|
||||
'Losses',
|
||||
'Win Draws Loss Win%',
|
||||
'Avg Profit %',
|
||||
'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}',
|
||||
@@ -442,7 +523,8 @@ def text_table_sell_reason(sell_reason_stats: List[Dict[str, Any]], stake_curren
|
||||
]
|
||||
|
||||
output = [[
|
||||
t['sell_reason'], t['trades'], t['wins'], t['draws'], t['losses'],
|
||||
t['sell_reason'], t['trades'],
|
||||
_generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
|
||||
t['profit_mean_pct'], t['profit_sum_pct'],
|
||||
round_coin_value(t['profit_total_abs'], stake_currency, False),
|
||||
t['profit_total_pct'],
|
||||
@@ -450,7 +532,8 @@ def text_table_sell_reason(sell_reason_stats: List[Dict[str, Any]], stake_curren
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_days_breakdown(days_breakdown_stats: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
def text_table_days_breakdown(days_breakdown_stats: List[Dict[str, Any]],
|
||||
stake_currency: str) -> str:
|
||||
"""
|
||||
Generate small table with Backtest results by days
|
||||
:param days_breakdown_stats: Days breakdown metrics
|
||||
@@ -475,18 +558,28 @@ def text_table_days_breakdown(days_breakdown_stats: List[Dict[str, Any]], stake_
|
||||
def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
||||
"""
|
||||
Generate summary table per strategy
|
||||
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:param max_open_trades: Maximum allowed open trades used for backtest
|
||||
:param all_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
headers = _get_line_header('Strategy', stake_currency)
|
||||
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
|
||||
# therefore we slip this column in only for strategy summary here.
|
||||
headers.append('Drawdown')
|
||||
|
||||
# Align drawdown string on the center two space separator.
|
||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||
for t, dd in zip(strategy_results, drawdown)]
|
||||
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'], t['wins'], t['draws'], t['losses']
|
||||
] for t in strategy_results]
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
_generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
|
||||
for t, drawdown in zip(strategy_results, drawdown)]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
@@ -496,12 +589,17 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
if len(strat_results['trades']) > 0:
|
||||
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
|
||||
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
|
||||
# command stores these results and newer version of freqtrade must be able to handle old
|
||||
# results with missing new fields.
|
||||
metrics = [
|
||||
('Backtesting from', strat_results['backtest_start'].strftime(DATETIME_PRINT_FORMAT)),
|
||||
('Backtesting to', strat_results['backtest_end'].strftime(DATETIME_PRINT_FORMAT)),
|
||||
('Backtesting from', strat_results['backtest_start']),
|
||||
('Backtesting to', strat_results['backtest_end']),
|
||||
('Max open trades', strat_results['max_open_trades']),
|
||||
('', ''), # Empty line to improve readability
|
||||
('Total trades', strat_results['total_trades']),
|
||||
('Total/Daily Avg Trades',
|
||||
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
|
||||
('Starting balance', round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Final balance', round_coin_value(strat_results['final_balance'],
|
||||
@@ -516,7 +614,6 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
strat_results['stake_currency'])),
|
||||
('Total trade volume', round_coin_value(strat_results['total_volume'],
|
||||
strat_results['stake_currency'])),
|
||||
|
||||
('', ''), # Empty line to improve readability
|
||||
('Best Pair', f"{strat_results['best_pair']['key']} "
|
||||
f"{round(strat_results['best_pair']['profit_sum_pct'], 2)}%"),
|
||||
@@ -531,9 +628,10 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Days win/draw/lose', f"{strat_results['winning_days']} / "
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
|
||||
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
|
||||
('Rejected Buy signals', strat_results.get('rejected_signals', 'N/A')),
|
||||
('', ''), # Empty line to improve readability
|
||||
|
||||
('Min balance', round_coin_value(strat_results['csum_min'],
|
||||
@@ -548,8 +646,8 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown Start', strat_results['drawdown_start'].strftime(DATETIME_PRINT_FORMAT)),
|
||||
('Drawdown End', strat_results['drawdown_end'].strftime(DATETIME_PRINT_FORMAT)),
|
||||
('Drawdown Start', strat_results['drawdown_start']),
|
||||
('Drawdown End', strat_results['drawdown_end']),
|
||||
('Market change', f"{round(strat_results['market_change'] * 100, 2)}%"),
|
||||
]
|
||||
|
||||
@@ -559,7 +657,7 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
strat_results['stake_currency'])
|
||||
stake_amount = round_coin_value(
|
||||
strat_results['stake_amount'], strat_results['stake_currency']
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
|
||||
message = ("No trades made. "
|
||||
f"Your starting balance was {start_balance}, "
|
||||
@@ -568,49 +666,58 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
return message
|
||||
|
||||
|
||||
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
|
||||
show_days=False):
|
||||
"""
|
||||
Print results for one strategy
|
||||
"""
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_sell_reason(sell_reason_stats=results['sell_reason_summary'],
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SELL REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if show_days:
|
||||
table = text_table_days_breakdown(days_breakdown_stats=results['days_breakdown_stats'],
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' DAYS BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print()
|
||||
|
||||
|
||||
def show_backtest_results(config: Dict, backtest_stats: Dict):
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_sell_reason(sell_reason_stats=results['sell_reason_summary'],
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SELL REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if config.get('show_days', False):
|
||||
table = text_table_days_breakdown(days_breakdown_stats=results['days_breakdown_stats'],
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' DAYS BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print()
|
||||
show_backtest_result(strategy, results, stake_currency, config.get('show_days', False))
|
||||
|
||||
if len(backtest_stats['strategy']) > 1:
|
||||
# Print Strategy summary table
|
||||
|
||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||
print(f"{results['backtest_start']} -> {results['backtest_end']} |"
|
||||
f" Max open trades : {results['max_open_trades']}")
|
||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
|
4
freqtrade/optimize/space/__init__.py
Normal file
4
freqtrade/optimize/space/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# flake8: noqa: F401
|
||||
from skopt.space import Categorical, Dimension, Integer, Real
|
||||
|
||||
from .decimalspace import SKDecimal
|
33
freqtrade/optimize/space/decimalspace.py
Normal file
33
freqtrade/optimize/space/decimalspace.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
from skopt.space import Integer
|
||||
|
||||
|
||||
class SKDecimal(Integer):
|
||||
|
||||
def __init__(self, low, high, decimals=3, prior="uniform", base=10, transform=None,
|
||||
name=None, dtype=np.int64):
|
||||
self.decimals = decimals
|
||||
_low = int(low * pow(10, self.decimals))
|
||||
_high = int(high * pow(10, self.decimals))
|
||||
# trunc to precision to avoid points out of space
|
||||
self.low_orig = round(_low * pow(0.1, self.decimals), self.decimals)
|
||||
self.high_orig = round(_high * pow(0.1, self.decimals), self.decimals)
|
||||
|
||||
super().__init__(_low, _high, prior, base, transform, name, dtype)
|
||||
|
||||
def __repr__(self):
|
||||
return "Decimal(low={}, high={}, decimals={}, prior='{}', transform='{}')".format(
|
||||
self.low_orig, self.high_orig, self.decimals, self.prior, self.transform_)
|
||||
|
||||
def __contains__(self, point):
|
||||
if isinstance(point, list):
|
||||
point = np.array(point)
|
||||
return self.low_orig <= point <= self.high_orig
|
||||
|
||||
def transform(self, Xt):
|
||||
aa = [int(x * pow(10, self.decimals)) for x in Xt]
|
||||
return super().transform(aa)
|
||||
|
||||
def inverse_transform(self, Xt):
|
||||
res = super().inverse_transform(Xt)
|
||||
return [round(x * pow(0.1, self.decimals), self.decimals) for x in res]
|
Reference in New Issue
Block a user