From 0a49dcb7124b3e4ba96bbbea1faeff27c6801994 Mon Sep 17 00:00:00 2001 From: orehunt Date: Fri, 21 Feb 2020 10:40:26 +0100 Subject: [PATCH 01/21] - batched hyperopt - auto epochs --- freqtrade/commands/optimize_commands.py | 5 +- freqtrade/optimize/hyperopt.py | 295 +++++++++++++++--------- freqtrade/optimize/hyperopt_backend.py | 30 +++ setup.cfg | 4 + 4 files changed, 229 insertions(+), 105 deletions(-) create mode 100644 freqtrade/optimize/hyperopt_backend.py diff --git a/freqtrade/commands/optimize_commands.py b/freqtrade/commands/optimize_commands.py index a2d1b4601..e1db5ec9f 100644 --- a/freqtrade/commands/optimize_commands.py +++ b/freqtrade/commands/optimize_commands.py @@ -54,6 +54,7 @@ def start_hyperopt(args: Dict[str, Any]) -> None: try: from filelock import FileLock, Timeout from freqtrade.optimize.hyperopt import Hyperopt + from freqtrade.optimize import hyperopt_backend as backend except ImportError as e: raise OperationalException( f"{e}. Please ensure that the hyperopt dependencies are installed.") from e @@ -72,8 +73,8 @@ def start_hyperopt(args: Dict[str, Any]) -> None: logging.getLogger('filelock').setLevel(logging.WARNING) # Initialize backtesting object - hyperopt = Hyperopt(config) - hyperopt.start() + backend.hyperopt = Hyperopt(config) + backend.hyperopt.start() except Timeout: logger.info("Another running instance of freqtrade Hyperopt detected.") diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 0f9076770..14a77cdf5 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -1,15 +1,17 @@ # pragma pylint: disable=too-many-instance-attributes, pointless-string-statement - """ This module contains the hyperopt logic """ +import os +import functools import locale import logging import random import sys import warnings from collections import OrderedDict +from math import factorial, log from operator import itemgetter from pathlib import Path from pprint import pprint @@ -18,9 +20,6 @@ from typing import Any, Dict, List, Optional import rapidjson from colorama import Fore, Style from colorama import init as colorama_init -from joblib import (Parallel, cpu_count, delayed, dump, load, - wrap_non_picklable_objects) -from pandas import DataFrame from freqtrade.data.converter import trim_dataframe from freqtrade.data.history import get_timerange @@ -28,10 +27,14 @@ from freqtrade.exceptions import OperationalException from freqtrade.misc import plural, round_dict from freqtrade.optimize.backtesting import Backtesting # Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules +from freqtrade.optimize.hyperopt_backend import CustomImmediateResultBackend from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F401 from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F401 -from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, - HyperOptResolver) +from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) +from joblib import (Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects) +from joblib._parallel_backends import LokyBackend +from joblib import register_parallel_backend, parallel_backend +from pandas import DataFrame # Suppress scikit-learn FutureWarnings from skopt with warnings.catch_warnings(): @@ -39,10 +42,8 @@ with warnings.catch_warnings(): from skopt import Optimizer from skopt.space import Dimension - logger = logging.getLogger(__name__) - INITIAL_POINTS = 30 # Keep no more than 2*SKOPT_MODELS_MAX_NUM models @@ -60,7 +61,6 @@ class Hyperopt: hyperopt = Hyperopt(config) hyperopt.start() """ - def __init__(self, config: Dict[str, Any]) -> None: self.config = config @@ -71,13 +71,21 @@ class Hyperopt: self.custom_hyperoptloss = HyperOptLossResolver.load_hyperoptloss(self.config) self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function - self.trials_file = (self.config['user_data_dir'] / - 'hyperopt_results' / 'hyperopt_results.pickle') - self.tickerdata_pickle = (self.config['user_data_dir'] / - 'hyperopt_results' / 'hyperopt_tickerdata.pkl') - self.total_epochs = config.get('epochs', 0) + self.trials_file = (self.config['user_data_dir'] / 'hyperopt_results' / + 'hyperopt_results.pickle') + self.tickerdata_pickle = (self.config['user_data_dir'] / 'hyperopt_results' / + 'hyperopt_tickerdata.pkl') + self.effort = config.get('epochs', 0) or 1 + self.total_epochs = 9999 + self.max_epoch = 9999 + self.search_space_size = 0 + self.max_epoch_reached = False + self.min_epochs = INITIAL_POINTS self.current_best_loss = 100 + self.current_best_epoch = 0 + self.epochs_since_last_best = [] + self.avg_best_occurrence = 0 if not self.config.get('hyperopt_continue'): self.clean_hyperopt() @@ -89,6 +97,10 @@ class Hyperopt: # Previous evaluations self.trials: List = [] + self.opt: Optimizer + self.opt = None + self.f_val: List = [] + # Populate functions here (hasattr is slow so should not be run during "regular" operations) if hasattr(self.custom_hyperopt, 'populate_indicators'): self.backtesting.strategy.advise_indicators = \ @@ -175,24 +187,27 @@ class Hyperopt: result: Dict = {} if self.has_space('buy'): - result['buy'] = {p.name: params.get(p.name) - for p in self.hyperopt_space('buy')} + result['buy'] = {p.name: params.get(p.name) for p in self.hyperopt_space('buy')} if self.has_space('sell'): - result['sell'] = {p.name: params.get(p.name) - for p in self.hyperopt_space('sell')} + result['sell'] = {p.name: params.get(p.name) for p in self.hyperopt_space('sell')} if self.has_space('roi'): result['roi'] = self.custom_hyperopt.generate_roi_table(params) if self.has_space('stoploss'): - result['stoploss'] = {p.name: params.get(p.name) - for p in self.hyperopt_space('stoploss')} + result['stoploss'] = { + p.name: params.get(p.name) + for p in self.hyperopt_space('stoploss') + } if self.has_space('trailing'): result['trailing'] = self.custom_hyperopt.generate_trailing_params(params) return result @staticmethod - def print_epoch_details(results, total_epochs: int, print_json: bool, - no_header: bool = False, header_str: str = None) -> None: + def print_epoch_details(results, + total_epochs: int, + print_json: bool, + no_header: bool = False, + header_str: str = None) -> None: """ Display details of the hyperopt result """ @@ -231,8 +246,7 @@ class Hyperopt: # OrderedDict is used to keep the numeric order of the items # in the dict. result_dict['minimal_roi'] = OrderedDict( - (str(k), v) for k, v in space_params.items() - ) + (str(k), v) for k, v in space_params.items()) else: # 'stoploss', 'trailing' result_dict.update(space_params) @@ -261,16 +275,7 @@ class Hyperopt: Log results if it is better than any previous evaluation """ is_best = results['is_best'] - if not self.print_all: - # Print '\n' after each 100th epoch to separate dots from the log messages. - # Otherwise output is messy on a terminal. - print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore - sys.stdout.flush() - if self.print_all or is_best: - if not self.print_all: - # Separate the results explanation string from dots - print("\n") self.print_results_explanation(results, self.total_epochs, self.print_all, self.print_colorized) @@ -291,10 +296,9 @@ class Hyperopt: @staticmethod def _format_explanation_string(results, total_epochs) -> str: - return (("*" if results['is_initial_point'] else " ") + + return (("*" if 'is_initial_point' in results and results['is_initial_point'] else " ") + f"{results['current_epoch']:5d}/{total_epochs}: " + - f"{results['results_explanation']} " + - f"Objective: {results['loss']:.5f}") + f"{results['results_explanation']} " + f"Objective: {results['loss']:.5f}") def has_space(self, space: str) -> bool: """ @@ -381,11 +385,11 @@ class Hyperopt: max_open_trades=self.max_open_trades, position_stacking=self.position_stacking, ) - return self._get_results_dict(backtesting_results, min_date, max_date, - params_dict, params_details) + return self._get_results_dict(backtesting_results, min_date, max_date, params_dict, + params_details) - def _get_results_dict(self, backtesting_results, min_date, max_date, - params_dict, params_details): + def _get_results_dict(self, backtesting_results, min_date, max_date, params_dict, + params_details): results_metrics = self._calculate_results_metrics(backtesting_results) results_explanation = self._format_results_explanation_string(results_metrics) @@ -398,8 +402,10 @@ class Hyperopt: # path. We do not want to optimize 'hodl' strategies. loss: float = MAX_LOSS if trade_count >= self.config['hyperopt_min_trades']: - loss = self.calculate_loss(results=backtesting_results, trade_count=trade_count, - min_date=min_date.datetime, max_date=max_date.datetime) + loss = self.calculate_loss(results=backtesting_results, + trade_count=trade_count, + min_date=min_date.datetime, + max_date=max_date.datetime) return { 'loss': loss, 'params_dict': params_dict, @@ -427,39 +433,75 @@ class Hyperopt: f"Avg profit {results_metrics['avg_profit']: 6.2f}%. " f"Total profit {results_metrics['total_profit']: 11.8f} {stake_cur} " f"({results_metrics['profit']: 7.2f}\N{GREEK CAPITAL LETTER SIGMA}%). " - f"Avg duration {results_metrics['duration']:5.1f} min." - ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8') + f"Avg duration {results_metrics['duration']:5.1f} min.").encode( + locale.getpreferredencoding(), 'replace').decode('utf-8') - def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer: + def get_optimizer(self, dimensions: List[Dimension], cpu_count, + n_initial_points=INITIAL_POINTS) -> Optimizer: return Optimizer( dimensions, base_estimator="ET", acq_optimizer="auto", - n_initial_points=INITIAL_POINTS, + n_initial_points=n_initial_points, acq_optimizer_kwargs={'n_jobs': cpu_count}, + model_queue_size=SKOPT_MODELS_MAX_NUM, random_state=self.random_state, ) - def fix_optimizer_models_list(self) -> None: - """ - WORKAROUND: Since skopt is not actively supported, this resolves problems with skopt - memory usage, see also: https://github.com/scikit-optimize/scikit-optimize/pull/746 + def run_optimizer_parallel(self, parallel, tries: int, first_try: int) -> List: + result = parallel( + delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, i) + for asked, i in zip(self.opt_generator(), range(first_try, first_try + tries))) + return result - This may cease working when skopt updates if implementation of this intrinsic - part changes. - """ - n = len(self.opt.models) - SKOPT_MODELS_MAX_NUM - # Keep no more than 2*SKOPT_MODELS_MAX_NUM models in the skopt models list, - # remove the old ones. These are actually of no use, the current model - # from the estimator is the only one used in the skopt optimizer. - # Freqtrade code also does not inspect details of the models. - if n >= SKOPT_MODELS_MAX_NUM: - logger.debug(f"Fixing skopt models list, removing {n} old items...") - del self.opt.models[0:n] + def opt_generator(self): + while True: + if self.f_val: + # print("opt.tell(): ", + # [v['params_dict'] for v in self.f_val], [v['loss'] for v in self.f_val]) + functools.partial(self.opt.tell, + ([v['params_dict'] + for v in self.f_val], [v['loss'] for v in self.f_val])) + self.f_val = [] + yield self.opt.ask() - def run_optimizer_parallel(self, parallel, asked, i) -> List: - return parallel(delayed( - wrap_non_picklable_objects(self.generate_optimizer))(v, i) for v in asked) + def parallel_objective(self, asked, n): + self.log_results_immediate(n) + return self.generate_optimizer(asked) + + def parallel_callback(self, f_val): + self.f_val.extend(f_val) + + def log_results_immediate(self, n) -> None: + print('.', end='') + sys.stdout.flush() + + def log_results(self, f_val, frame_start, max_epoch) -> None: + """ + Log results if it is better than any previous evaluation + """ + for i, v in enumerate(f_val): + is_best = self.is_best_loss(v, self.current_best_loss) + current = frame_start + i + 1 + v['is_best'] = is_best + v['current_epoch'] = current + v['is_initial_point'] = current <= self.n_initial_points + logger.debug(f"Optimizer epoch evaluated: {v}") + if is_best: + self.current_best_loss = v['loss'] + self.update_max_epoch(v, current) + self.print_results(v) + self.trials.append(v) + # Save results after every batch + print('\n') + self.save_trials() + # give up if no best since max epochs + if current > self.max_epoch: + self.max_epoch_reached = True + # testing trapdoor + if os.getenv('FQT_HYPEROPT_TRAP'): + logger.debug('bypassing hyperopt loop') + self.max_epoch = 1 @staticmethod def load_previous_results(trials_file: Path) -> List: @@ -479,6 +521,55 @@ class Hyperopt: def _set_random_state(self, random_state: Optional[int]) -> int: return random_state or random.randint(1, 2**16 - 1) + @staticmethod + def calc_epochs(dimensions: List[Dimension], config_jobs: int, effort: int): + """ Compute a reasonable number of initial points and + a minimum number of epochs to evaluate """ + n_dimensions = len(dimensions) + n_parameters = 0 + # sum all the dimensions discretely, granting minimum values + for d in dimensions: + if type(d).__name__ == 'Integer': + n_parameters += max(1, d.high - d.low) + elif type(d).__name__ == 'Real': + n_parameters += max(10, int(d.high - d.low)) + else: + n_parameters += len(d.bounds) + # guess the size of the search space as the count of the + # unordered combination of the dimensions entries + search_space_size = (factorial(n_parameters) / + (factorial(n_parameters - n_dimensions) * factorial(n_dimensions))) + # logger.info(f'Search space size: {search_space_size}') + if search_space_size < config_jobs: + # don't waste if the space is small + n_initial_points = config_jobs + else: + # extract coefficients from the search space and the jobs count + log_sss = int(log(search_space_size, 10)) + log_jobs = int(log(config_jobs, 2)) + log_jobs = 2 if log_jobs < 0 else log_jobs + jobs_ip = log_jobs * log_sss + # never waste + n_initial_points = log_sss if jobs_ip > search_space_size else jobs_ip + # it shall run for this much, I say + min_epochs = max(2 * n_initial_points, 3 * config_jobs) * effort + return n_initial_points, min_epochs, search_space_size + + def update_max_epoch(self, val: Dict, current: int): + """ calculate max epochs: store the number of non best epochs + between each best, and get the mean of that value """ + if val['is_initial_point'] is not True: + self.epochs_since_last_best.append(current - self.current_best_epoch) + self.avg_best_occurrence = (sum(self.epochs_since_last_best) // + len(self.epochs_since_last_best)) + self.current_best_epoch = current + self.max_epoch = (self.current_best_epoch + self.avg_best_occurrence + + self.min_epochs) * self.effort + if self.max_epoch > self.search_space_size: + self.max_epoch = self.search_space_size + print('\n') + logger.info(f'Max epochs set to: {self.max_epoch}') + def start(self) -> None: self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None)) logger.info(f"Using optimizer random state: {self.random_state}") @@ -492,10 +583,8 @@ class Hyperopt: preprocessed[pair] = trim_dataframe(df, timerange) min_date, max_date = get_timerange(data) - logger.info( - 'Hyperopting with data from %s up to %s (%s days)..', - min_date.isoformat(), max_date.isoformat(), (max_date - min_date).days - ) + logger.info('Hyperopting with data from %s up to %s (%s days)..', min_date.isoformat(), + max_date.isoformat(), (max_date - min_date).days) dump(preprocessed, self.tickerdata_pickle) # We don't need exchange instance anymore while running hyperopt @@ -509,46 +598,41 @@ class Hyperopt: logger.info(f'Number of parallel jobs set as: {config_jobs}') self.dimensions: List[Dimension] = self.hyperopt_space() - self.opt = self.get_optimizer(self.dimensions, config_jobs) + self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( + self.dimensions, config_jobs, self.effort) + logger.info(f"Min epochs set to: {self.min_epochs}") + self.max_epoch = self.min_epochs + self.avg_best_occurrence = self.max_epoch + + logger.info(f'Initial points: {self.n_initial_points}') + self.opt = self.get_optimizer(self.dimensions, config_jobs, self.n_initial_points) + + # last_frame_len = (self.total_epochs - 1) % self.avg_best_occurrence if self.print_colorized: colorama_init(autoreset=True) - try: - with Parallel(n_jobs=config_jobs) as parallel: - jobs = parallel._effective_n_jobs() - logger.info(f'Effective number of parallel workers used: {jobs}') - EVALS = max(self.total_epochs // jobs, 1) - for i in range(EVALS): - asked = self.opt.ask(n_points=jobs) - f_val = self.run_optimizer_parallel(parallel, asked, i) - self.opt.tell(asked, [v['loss'] for v in f_val]) - self.fix_optimizer_models_list() - for j in range(jobs): - # Use human-friendly indexes here (starting from 1) - current = i * jobs + j + 1 - val = f_val[j] - val['current_epoch'] = current - val['is_initial_point'] = current <= INITIAL_POINTS - logger.debug(f"Optimizer epoch evaluated: {val}") + try: + register_parallel_backend('custom', CustomImmediateResultBackend) + with parallel_backend('custom'): + with Parallel(n_jobs=config_jobs, verbose=0) as parallel: + for frame in range(self.total_epochs): + epochs_so_far = len(self.trials) + # pad the frame length to the number of jobs to avoid desaturation + frame_len = (self.avg_best_occurrence + config_jobs - + self.avg_best_occurrence % config_jobs) + print( + f"{epochs_so_far+1}-{epochs_so_far+self.avg_best_occurrence}" + f"/{self.total_epochs}: ", + end='') + f_val = self.run_optimizer_parallel(parallel, frame_len, epochs_so_far) + self.log_results(f_val, epochs_so_far, self.total_epochs) + if self.max_epoch_reached: + logger.info("Max epoch reached, terminating.") + break - is_best = self.is_best_loss(val, self.current_best_loss) - # This value is assigned here and not in the optimization method - # to keep proper order in the list of results. That's because - # evaluations can take different time. Here they are aligned in the - # order they will be shown to the user. - val['is_best'] = is_best - - self.print_results(val) - - if is_best: - self.current_best_loss = val['loss'] - self.trials.append(val) - # Save results after each best epoch and every 100 epochs - if is_best or current % 100 == 0: - self.save_trials() - except KeyboardInterrupt: - print('User interrupted..') + except KeyboardInterrupt: + print("User interrupted..") self.save_trials(final=True) @@ -560,3 +644,8 @@ class Hyperopt: # This is printed when Ctrl+C is pressed quickly, before first epochs have # a chance to be evaluated. print("No epochs evaluated yet, no best result.") + + def __getstate__(self): + state = self.__dict__.copy() + del state['trials'] + return state diff --git a/freqtrade/optimize/hyperopt_backend.py b/freqtrade/optimize/hyperopt_backend.py new file mode 100644 index 000000000..d7a8544cc --- /dev/null +++ b/freqtrade/optimize/hyperopt_backend.py @@ -0,0 +1,30 @@ +from joblib._parallel_backends import LokyBackend + +hyperopt = None + + +class MultiCallback: + def __init__(self, *callbacks): + self.callbacks = [cb for cb in callbacks if cb] + + def __call__(self, out): + for cb in self.callbacks: + cb(out) + + +class CustomImmediateResultBackend(LokyBackend): + def callback(self, result): + """ + Our custom completion callback. Executed in the parent process. + Use it to run Optimizer.tell() with immediate results of the backtest() + evaluated in the joblib worker process. + """ + if not result.exception(): + # Fetch results from the Future object passed to us. + # Future object is assumed to be 'done' already. + f_val = result.result().copy() + hyperopt.parallel_callback(f_val) + + def apply_async(self, func, callback=None): + cbs = MultiCallback(callback, self.callback) + return super().apply_async(func, cbs) diff --git a/setup.cfg b/setup.cfg index 34f25482b..9853c99d9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,3 +13,7 @@ ignore_missing_imports = True [mypy-tests.*] ignore_errors = True + +[yapf] +based_on_style = pep8 +column_limit = 100 \ No newline at end of file From d96e842a2169aa60aa69a4e951e543142f3107fb Mon Sep 17 00:00:00 2001 From: orehunt Date: Mon, 24 Feb 2020 13:31:46 +0100 Subject: [PATCH 02/21] added effort as new argument --- freqtrade/commands/arguments.py | 132 ++++---- freqtrade/commands/cli_options.py | 303 +++++++++++------- freqtrade/constants.py | 406 +++++++++++++++++++------ freqtrade/optimize/hyperopt.py | 146 +++++---- freqtrade/optimize/hyperopt_backend.py | 3 +- 5 files changed, 660 insertions(+), 330 deletions(-) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 73e77d69d..d4866bd8c 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -15,18 +15,17 @@ ARGS_STRATEGY = ["strategy", "strategy_path"] ARGS_TRADE = ["db_url", "sd_notify", "dry_run"] -ARGS_COMMON_OPTIMIZE = ["ticker_interval", "timerange", - "max_open_trades", "stake_amount", "fee"] +ARGS_COMMON_OPTIMIZE = ["ticker_interval", "timerange", "max_open_trades", "stake_amount", "fee"] -ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions", - "strategy_list", "export", "exportfilename"] +ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [ + "position_stacking", "use_max_market_positions", "strategy_list", "export", "exportfilename" +] -ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path", - "position_stacking", "epochs", "spaces", - "use_max_market_positions", "print_all", - "print_colorized", "print_json", "hyperopt_jobs", - "hyperopt_random_state", "hyperopt_min_trades", - "hyperopt_continue", "hyperopt_loss"] +ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [ + "hyperopt", "hyperopt_path", "position_stacking", "epochs", "spaces", + "use_max_market_positions", "print_all", "print_colorized", "print_json", "hyperopt_jobs", + "hyperopt_random_state", "hyperopt_min_trades", "hyperopt_continue", "hyperopt_loss", "effort" +] ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"] @@ -38,8 +37,10 @@ ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"] ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"] -ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column", - "print_csv", "base_currencies", "quote_currencies", "list_pairs_all"] +ARGS_LIST_PAIRS = [ + "exchange", "print_list", "list_pairs_print_json", "print_one_column", "print_csv", + "base_currencies", "quote_currencies", "list_pairs_all" +] ARGS_TEST_PAIRLIST = ["config", "quote_currencies", "print_one_column", "list_pairs_print_json"] @@ -54,30 +55,38 @@ ARGS_BUILD_HYPEROPT = ["user_data_dir", "hyperopt", "template"] ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase"] ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes"] -ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "download_trades", "exchange", - "timeframes", "erase", "dataformat_ohlcv", "dataformat_trades"] +ARGS_DOWNLOAD_DATA = [ + "pairs", "pairs_file", "days", "download_trades", "exchange", "timeframes", "erase", + "dataformat_ohlcv", "dataformat_trades" +] -ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit", - "db_url", "trade_source", "export", "exportfilename", - "timerange", "ticker_interval"] +ARGS_PLOT_DATAFRAME = [ + "pairs", "indicators1", "indicators2", "plot_limit", "db_url", "trade_source", "export", + "exportfilename", "timerange", "ticker_interval" +] -ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url", - "trade_source", "ticker_interval"] +ARGS_PLOT_PROFIT = [ + "pairs", "timerange", "export", "exportfilename", "db_url", "trade_source", "ticker_interval" +] -ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable", - "hyperopt_list_min_trades", "hyperopt_list_max_trades", - "hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time", - "hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit", - "hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit", - "print_colorized", "print_json", "hyperopt_list_no_details"] +ARGS_HYPEROPT_LIST = [ + "hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_list_min_trades", + "hyperopt_list_max_trades", "hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time", + "hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit", + "hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit", "print_colorized", + "print_json", "hyperopt_list_no_details" +] -ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index", - "print_json", "hyperopt_show_no_header"] +ARGS_HYPEROPT_SHOW = [ + "hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index", "print_json", + "hyperopt_show_no_header" +] -NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", - "list-markets", "list-pairs", "list-strategies", - "list-hyperopts", "hyperopt-list", "hyperopt-show", - "plot-dataframe", "plot-profit"] +NO_CONF_REQURIED = [ + "convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", + "list-pairs", "list-strategies", "list-hyperopts", "hyperopt-list", "hyperopt-show", + "plot-dataframe", "plot-profit" +] NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-hyperopt", "new-strategy"] @@ -86,7 +95,6 @@ class Arguments: """ Arguments Class. Manage the arguments received by the cli """ - def __init__(self, args: Optional[List[str]]) -> None: self.args = args self._parsed_arg: Optional[argparse.Namespace] = None @@ -155,70 +163,70 @@ class Arguments: self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot') self._build_args(optionlist=['version'], parser=self.parser) - from freqtrade.commands import (start_create_userdir, start_convert_data, - start_download_data, - start_hyperopt_list, start_hyperopt_show, - start_list_exchanges, start_list_hyperopts, - start_list_markets, start_list_strategies, - start_list_timeframes, start_new_config, - start_new_hyperopt, start_new_strategy, - start_plot_dataframe, start_plot_profit, - start_backtesting, start_hyperopt, start_edge, - start_test_pairlist, start_trading) + from freqtrade.commands import ( + start_create_userdir, start_convert_data, start_download_data, start_hyperopt_list, + start_hyperopt_show, start_list_exchanges, start_list_hyperopts, start_list_markets, + start_list_strategies, start_list_timeframes, start_new_config, start_new_hyperopt, + start_new_strategy, start_plot_dataframe, start_plot_profit, start_backtesting, + start_hyperopt, start_edge, start_test_pairlist, start_trading) - subparsers = self.parser.add_subparsers(dest='command', - # Use custom message when no subhandler is added - # shown from `main.py` - # required=True - ) + subparsers = self.parser.add_subparsers( + dest='command', + # Use custom message when no subhandler is added + # shown from `main.py` + # required=True + ) # Add trade subcommand - trade_cmd = subparsers.add_parser('trade', help='Trade module.', + trade_cmd = subparsers.add_parser('trade', + help='Trade module.', parents=[_common_parser, _strategy_parser]) trade_cmd.set_defaults(func=start_trading) self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd) # Add backtesting subcommand - backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.', + backtesting_cmd = subparsers.add_parser('backtesting', + help='Backtesting module.', parents=[_common_parser, _strategy_parser]) backtesting_cmd.set_defaults(func=start_backtesting) self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd) # Add edge subcommand - edge_cmd = subparsers.add_parser('edge', help='Edge module.', + edge_cmd = subparsers.add_parser('edge', + help='Edge module.', parents=[_common_parser, _strategy_parser]) edge_cmd.set_defaults(func=start_edge) self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd) # Add hyperopt subcommand - hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.', - parents=[_common_parser, _strategy_parser], - ) + hyperopt_cmd = subparsers.add_parser( + 'hyperopt', + help='Hyperopt module.', + parents=[_common_parser, _strategy_parser], + ) hyperopt_cmd.set_defaults(func=start_hyperopt) self._build_args(optionlist=ARGS_HYPEROPT, parser=hyperopt_cmd) # add create-userdir subcommand - create_userdir_cmd = subparsers.add_parser('create-userdir', - help="Create user-data directory.", - ) + create_userdir_cmd = subparsers.add_parser( + 'create-userdir', + help="Create user-data directory.", + ) create_userdir_cmd.set_defaults(func=start_create_userdir) self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd) # add new-config subcommand - build_config_cmd = subparsers.add_parser('new-config', - help="Create new config") + build_config_cmd = subparsers.add_parser('new-config', help="Create new config") build_config_cmd.set_defaults(func=start_new_config) self._build_args(optionlist=ARGS_BUILD_CONFIG, parser=build_config_cmd) # add new-strategy subcommand - build_strategy_cmd = subparsers.add_parser('new-strategy', - help="Create new strategy") + build_strategy_cmd = subparsers.add_parser('new-strategy', help="Create new strategy") build_strategy_cmd.set_defaults(func=start_new_strategy) self._build_args(optionlist=ARGS_BUILD_STRATEGY, parser=build_strategy_cmd) # add new-hyperopt subcommand - build_hyperopt_cmd = subparsers.add_parser('new-hyperopt', - help="Create new hyperopt") + build_hyperopt_cmd = subparsers.add_parser('new-hyperopt', help="Create new hyperopt") build_hyperopt_cmd.set_defaults(func=start_new_hyperopt) self._build_args(optionlist=ARGS_BUILD_HYPEROPT, parser=build_hyperopt_cmd) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index a8d4bc198..c5a4c10d5 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -13,8 +13,7 @@ def check_int_positive(value: str) -> int: raise ValueError except ValueError: raise ArgumentTypeError( - f"{value} is invalid for this parameter, should be a positive integer value" - ) + f"{value} is invalid for this parameter, should be a positive integer value") return uint @@ -25,8 +24,7 @@ def check_int_nonzero(value: str) -> int: raise ValueError except ValueError: raise ArgumentTypeError( - f"{value} is invalid for this parameter, should be a non-zero integer value" - ) + f"{value} is invalid for this parameter, should be a non-zero integer value") return uint @@ -40,25 +38,32 @@ class Arg: # List of available command line options AVAILABLE_CLI_OPTIONS = { # Common options - "verbosity": Arg( - '-v', '--verbose', + "verbosity": + Arg( + '-v', + '--verbose', help='Verbose mode (-vv for more, -vvv to get all messages).', action='count', default=0, ), - "logfile": Arg( + "logfile": + Arg( '--logfile', help="Log to the file specified. Special values are: 'syslog', 'journald'. " - "See the documentation for more details.", + "See the documentation for more details.", metavar='FILE', ), - "version": Arg( - '-V', '--version', + "version": + Arg( + '-V', + '--version', action='version', version=f'%(prog)s {__version__}', ), - "config": Arg( - '-c', '--config', + "config": + Arg( + '-c', + '--config', help=f'Specify configuration file (default: `userdir/{constants.DEFAULT_CONFIG}` ' f'or `config.json` whichever exists). ' f'Multiple --config options may be used. ' @@ -66,84 +71,105 @@ AVAILABLE_CLI_OPTIONS = { action='append', metavar='PATH', ), - "datadir": Arg( - '-d', '--datadir', + "datadir": + Arg( + '-d', + '--datadir', help='Path to directory with historical backtesting data.', metavar='PATH', ), - "user_data_dir": Arg( - '--userdir', '--user-data-dir', + "user_data_dir": + Arg( + '--userdir', + '--user-data-dir', help='Path to userdata directory.', metavar='PATH', ), - "reset": Arg( + "reset": + Arg( '--reset', help='Reset sample files to their original state.', action='store_true', ), # Main options - "strategy": Arg( - '-s', '--strategy', + "strategy": + Arg( + '-s', + '--strategy', help='Specify strategy class name which will be used by the bot.', metavar='NAME', ), - "strategy_path": Arg( + "strategy_path": + Arg( '--strategy-path', help='Specify additional strategy lookup path.', metavar='PATH', ), - "db_url": Arg( + "db_url": + Arg( '--db-url', help=f'Override trades database URL, this is useful in custom deployments ' f'(default: `{constants.DEFAULT_DB_PROD_URL}` for Live Run mode, ' f'`{constants.DEFAULT_DB_DRYRUN_URL}` for Dry Run).', metavar='PATH', ), - "sd_notify": Arg( + "sd_notify": + Arg( '--sd-notify', help='Notify systemd service manager.', action='store_true', ), - "dry_run": Arg( + "dry_run": + Arg( '--dry-run', help='Enforce dry-run for trading (removes Exchange secrets and simulates trades).', action='store_true', ), # Optimize common - "ticker_interval": Arg( - '-i', '--ticker-interval', + "ticker_interval": + Arg( + '-i', + '--ticker-interval', help='Specify ticker interval (`1m`, `5m`, `30m`, `1h`, `1d`).', ), - "timerange": Arg( + "timerange": + Arg( '--timerange', help='Specify what timerange of data to use.', ), - "max_open_trades": Arg( + "max_open_trades": + Arg( '--max-open-trades', help='Override the value of the `max_open_trades` configuration setting.', type=int, metavar='INT', ), - "stake_amount": Arg( + "stake_amount": + Arg( '--stake-amount', help='Override the value of the `stake_amount` configuration setting.', type=float, ), # Backtesting - "position_stacking": Arg( - '--eps', '--enable-position-stacking', + "position_stacking": + Arg( + '--eps', + '--enable-position-stacking', help='Allow buying the same pair multiple times (position stacking).', action='store_true', default=False, ), - "use_max_market_positions": Arg( - '--dmmp', '--disable-max-market-positions', + "use_max_market_positions": + Arg( + '--dmmp', + '--disable-max-market-positions', help='Disable applying `max_open_trades` during backtest ' '(same as setting `max_open_trades` to a very high number).', action='store_false', default=True, ), - "strategy_list": Arg( + "strategy_list": + Arg( '--strategy-list', help='Provide a space-separated list of strategies to backtest. ' 'Please note that ticker-interval needs to be set either in config ' @@ -152,77 +178,100 @@ AVAILABLE_CLI_OPTIONS = { '(so `backtest-data.json` becomes `backtest-data-DefaultStrategy.json`', nargs='+', ), - "export": Arg( + "export": + Arg( '--export', help='Export backtest results, argument are: trades. ' 'Example: `--export=trades`', ), - "exportfilename": Arg( + "exportfilename": + Arg( '--export-filename', help='Save backtest results to the file with this filename. ' 'Requires `--export` to be set as well. ' 'Example: `--export-filename=user_data/backtest_results/backtest_today.json`', metavar='PATH', ), - "fee": Arg( + "fee": + Arg( '--fee', help='Specify fee ratio. Will be applied twice (on trade entry and exit).', type=float, metavar='FLOAT', ), # Edge - "stoploss_range": Arg( + "stoploss_range": + Arg( '--stoplosses', help='Defines a range of stoploss values against which edge will assess the strategy. ' 'The format is "min,max,step" (without any space). ' 'Example: `--stoplosses=-0.01,-0.1,-0.001`', ), # Hyperopt - "hyperopt": Arg( + "hyperopt": + Arg( '--hyperopt', help='Specify hyperopt class name which will be used by the bot.', metavar='NAME', ), - "hyperopt_path": Arg( + "hyperopt_path": + Arg( '--hyperopt-path', help='Specify additional lookup path for Hyperopt and Hyperopt Loss functions.', metavar='PATH', ), - "epochs": Arg( - '-e', '--epochs', + "epochs": + Arg( + '-e', + '--epochs', help='Specify number of epochs (default: %(default)d).', type=check_int_positive, metavar='INT', default=constants.HYPEROPT_EPOCH, ), - "spaces": Arg( + "effort": + Arg( + '--effort', + help=('The higher the number, the longer will be the search if' + 'no epochs are defined (default: %(default)d).'), + type=check_int_positive, + metavar='INT', + default=constants.HYPEROPT_EFFORT, + ), + "spaces": + Arg( '--spaces', help='Specify which parameters to hyperopt. Space-separated list.', choices=['all', 'buy', 'sell', 'roi', 'stoploss', 'trailing', 'default'], nargs='+', default='default', ), - "print_all": Arg( + "print_all": + Arg( '--print-all', help='Print all results, not only the best ones.', action='store_true', default=False, ), - "print_colorized": Arg( + "print_colorized": + Arg( '--no-color', help='Disable colorization of hyperopt results. May be useful if you are ' 'redirecting output to a file.', action='store_false', default=True, ), - "print_json": Arg( + "print_json": + Arg( '--print-json', help='Print best result detailization in JSON format.', action='store_true', default=False, ), - "hyperopt_jobs": Arg( - '-j', '--job-workers', + "hyperopt_jobs": + Arg( + '-j', + '--job-workers', help='The number of concurrently running jobs for hyperoptimization ' '(hyperopt worker processes). ' 'If -1 (default), all CPUs are used, for -2, all CPUs but one are used, etc. ' @@ -231,13 +280,15 @@ AVAILABLE_CLI_OPTIONS = { metavar='JOBS', default=-1, ), - "hyperopt_random_state": Arg( + "hyperopt_random_state": + Arg( '--random-state', help='Set random state to some positive integer for reproducible hyperopt results.', type=check_int_positive, metavar='INT', ), - "hyperopt_min_trades": Arg( + "hyperopt_min_trades": + Arg( '--min-trades', help="Set minimal desired number of trades for evaluations in the hyperopt " "optimization path (default: 1).", @@ -245,14 +296,16 @@ AVAILABLE_CLI_OPTIONS = { metavar='INT', default=1, ), - "hyperopt_continue": Arg( + "hyperopt_continue": + Arg( "--continue", help="Continue hyperopt from previous runs. " "By default, temporary files will be removed and hyperopt will start from scratch.", default=False, action='store_true', ), - "hyperopt_loss": Arg( + "hyperopt_loss": + Arg( '--hyperopt-loss', help='Specify the class name of the hyperopt loss function class (IHyperOptLoss). ' 'Different functions can generate completely different results, ' @@ -263,121 +316,143 @@ AVAILABLE_CLI_OPTIONS = { default=constants.DEFAULT_HYPEROPT_LOSS, ), # List exchanges - "print_one_column": Arg( - '-1', '--one-column', + "print_one_column": + Arg( + '-1', + '--one-column', help='Print output in one column.', action='store_true', ), - "list_exchanges_all": Arg( - '-a', '--all', + "list_exchanges_all": + Arg( + '-a', + '--all', help='Print all exchanges known to the ccxt library.', action='store_true', ), # List pairs / markets - "list_pairs_all": Arg( - '-a', '--all', + "list_pairs_all": + Arg( + '-a', + '--all', help='Print all pairs or market symbols. By default only active ' - 'ones are shown.', + 'ones are shown.', action='store_true', ), - "print_list": Arg( + "print_list": + Arg( '--print-list', help='Print list of pairs or market symbols. By default data is ' - 'printed in the tabular format.', + 'printed in the tabular format.', action='store_true', ), - "list_pairs_print_json": Arg( + "list_pairs_print_json": + Arg( '--print-json', help='Print list of pairs or market symbols in JSON format.', action='store_true', default=False, ), - "print_csv": Arg( + "print_csv": + Arg( '--print-csv', help='Print exchange pair or market data in the csv format.', action='store_true', ), - "quote_currencies": Arg( + "quote_currencies": + Arg( '--quote', help='Specify quote currency(-ies). Space-separated list.', nargs='+', metavar='QUOTE_CURRENCY', ), - "base_currencies": Arg( + "base_currencies": + Arg( '--base', help='Specify base currency(-ies). Space-separated list.', nargs='+', metavar='BASE_CURRENCY', ), # Script options - "pairs": Arg( - '-p', '--pairs', + "pairs": + Arg( + '-p', + '--pairs', help='Show profits for only these pairs. Pairs are space-separated.', nargs='+', ), # Download data - "pairs_file": Arg( + "pairs_file": + Arg( '--pairs-file', help='File containing a list of pairs to download.', metavar='FILE', ), - "days": Arg( + "days": + Arg( '--days', help='Download data for given number of days.', type=check_int_positive, metavar='INT', ), - "download_trades": Arg( + "download_trades": + Arg( '--dl-trades', help='Download trades instead of OHLCV data. The bot will resample trades to the ' - 'desired timeframe as specified as --timeframes/-t.', + 'desired timeframe as specified as --timeframes/-t.', action='store_true', ), - "format_from": Arg( + "format_from": + Arg( '--format-from', help='Source format for data conversion.', choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), - "format_to": Arg( + "format_to": + Arg( '--format-to', help='Destination format for data conversion.', choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), - "dataformat_ohlcv": Arg( - '--data-format-ohlcv', + "dataformat_ohlcv": + Arg('--data-format-ohlcv', help='Storage format for downloaded ohlcv data. (default: `%(default)s`).', choices=constants.AVAILABLE_DATAHANDLERS, - default='json' - ), - "dataformat_trades": Arg( - '--data-format-trades', + default='json'), + "dataformat_trades": + Arg('--data-format-trades', help='Storage format for downloaded trades data. (default: `%(default)s`).', choices=constants.AVAILABLE_DATAHANDLERS, - default='jsongz' - ), - "exchange": Arg( + default='jsongz'), + "exchange": + Arg( '--exchange', help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). ' f'Only valid if no config is provided.', ), - "timeframes": Arg( - '-t', '--timeframes', + "timeframes": + Arg( + '-t', + '--timeframes', help=f'Specify which tickers to download. Space-separated list. ' f'Default: `1m 5m`.', - choices=['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', - '6h', '8h', '12h', '1d', '3d', '1w'], + choices=[ + '1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', '6h', '8h', '12h', '1d', '3d', '1w' + ], default=['1m', '5m'], nargs='+', ), - "erase": Arg( + "erase": + Arg( '--erase', help='Clean all existing data for the selected exchange/pairs/timeframes.', action='store_true', ), # Templating options - "template": Arg( + "template": + Arg( '--template', help='Use a template which is either `minimal` or ' '`full` (containing multiple sample indicators). Default: `%(default)s`.', @@ -385,19 +460,22 @@ AVAILABLE_CLI_OPTIONS = { default='full', ), # Plot dataframe - "indicators1": Arg( + "indicators1": + Arg( '--indicators1', help='Set indicators from your strategy you want in the first row of the graph. ' "Space-separated list. Example: `ema3 ema5`. Default: `['sma', 'ema3', 'ema5']`.", nargs='+', ), - "indicators2": Arg( + "indicators2": + Arg( '--indicators2', help='Set indicators from your strategy you want in the third row of the graph. ' "Space-separated list. Example: `fastd fastk`. Default: `['macd', 'macdsignal']`.", nargs='+', ), - "plot_limit": Arg( + "plot_limit": + Arg( '--plot-limit', help='Specify tick limit for plotting. Notice: too high values cause huge files. ' 'Default: %(default)s.', @@ -405,7 +483,8 @@ AVAILABLE_CLI_OPTIONS = { metavar='INT', default=750, ), - "trade_source": Arg( + "trade_source": + Arg( '--trade-source', help='Specify the source for trades (Can be DB or file (backtest file)) ' 'Default: %(default)s', @@ -413,76 +492,90 @@ AVAILABLE_CLI_OPTIONS = { default="file", ), # hyperopt-list, hyperopt-show - "hyperopt_list_profitable": Arg( + "hyperopt_list_profitable": + Arg( '--profitable', help='Select only profitable epochs.', action='store_true', ), - "hyperopt_list_best": Arg( + "hyperopt_list_best": + Arg( '--best', help='Select only best epochs.', action='store_true', ), - "hyperopt_list_min_trades": Arg( + "hyperopt_list_min_trades": + Arg( '--min-trades', help='Select epochs with more than INT trades.', type=check_int_positive, metavar='INT', ), - "hyperopt_list_max_trades": Arg( + "hyperopt_list_max_trades": + Arg( '--max-trades', help='Select epochs with less than INT trades.', type=check_int_positive, metavar='INT', ), - "hyperopt_list_min_avg_time": Arg( + "hyperopt_list_min_avg_time": + Arg( '--min-avg-time', help='Select epochs on above average time.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_avg_time": Arg( + "hyperopt_list_max_avg_time": + Arg( '--max-avg-time', help='Select epochs on under average time.', type=float, metavar='FLOAT', ), - "hyperopt_list_min_avg_profit": Arg( + "hyperopt_list_min_avg_profit": + Arg( '--min-avg-profit', help='Select epochs on above average profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_avg_profit": Arg( + "hyperopt_list_max_avg_profit": + Arg( '--max-avg-profit', help='Select epochs on below average profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_min_total_profit": Arg( + "hyperopt_list_min_total_profit": + Arg( '--min-total-profit', help='Select epochs on above total profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_total_profit": Arg( + "hyperopt_list_max_total_profit": + Arg( '--max-total-profit', help='Select epochs on below total profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_no_details": Arg( + "hyperopt_list_no_details": + Arg( '--no-details', help='Do not print best epoch details.', action='store_true', ), - "hyperopt_show_index": Arg( - '-n', '--index', + "hyperopt_show_index": + Arg( + '-n', + '--index', help='Specify the index of the epoch to print details for.', type=check_int_nonzero, metavar='INT', ), - "hyperopt_show_no_header": Arg( + "hyperopt_show_no_header": + Arg( '--no-header', help='Do not print epoch details header.', action='store_true', diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 105cd6b53..f8fa7bc70 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -1,12 +1,12 @@ # pragma pylint: disable=too-few-public-methods - """ bot constants """ DEFAULT_CONFIG = 'config.json' DEFAULT_EXCHANGE = 'bittrex' PROCESS_THROTTLE_SECS = 5 # sec -HYPEROPT_EPOCH = 100 # epochs +HYPEROPT_EPOCH = 0 # epochs +HYPEROPT_EFFORT = 0 # /10 RETRY_TIMEOUT = 30 # sec DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss' DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite' @@ -17,8 +17,9 @@ REQUIRED_ORDERTIF = ['buy', 'sell'] REQUIRED_ORDERTYPES = ['buy', 'sell', 'stoploss', 'stoploss_on_exchange'] ORDERTYPE_POSSIBILITIES = ['limit', 'market'] ORDERTIF_POSSIBILITIES = ['gtc', 'fok', 'ioc'] -AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', - 'PrecisionFilter', 'PriceFilter', 'SpreadFilter'] +AVAILABLE_PAIRLISTS = [ + 'StaticPairList', 'VolumePairList', 'PrecisionFilter', 'PriceFilter', 'SpreadFilter' +] AVAILABLE_DATAHANDLERS = ['json', 'jsongz'] DRY_RUN_WALLET = 1000 MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons @@ -38,11 +39,9 @@ USER_DATA_FILES = { } SUPPORTED_FIAT = [ - "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", - "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", - "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", - "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD", - "BTC", "XBT", "ETH", "XRP", "LTC", "BCH", "USDT" + "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", + "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", + "SGD", "THB", "TRY", "TWD", "ZAR", "USD", "BTC", "XBT", "ETH", "XRP", "LTC", "BCH", "USDT" ] MINIMAL_CONFIG = { @@ -63,9 +62,16 @@ MINIMAL_CONFIG = { CONF_SCHEMA = { 'type': 'object', 'properties': { - 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1}, - 'ticker_interval': {'type': 'string'}, - 'stake_currency': {'type': 'string'}, + 'max_open_trades': { + 'type': ['integer', 'number'], + 'minimum': -1 + }, + 'ticker_interval': { + 'type': 'string' + }, + 'stake_currency': { + 'type': 'string' + }, 'stake_amount': { 'type': ['number', 'string'], 'minimum': 0.0001, @@ -77,32 +83,76 @@ CONF_SCHEMA = { 'maximum': 1, 'default': 0.99 }, - 'amend_last_stake_amount': {'type': 'boolean', 'default': False}, - 'last_stake_amount_min_ratio': { - 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5 + 'amend_last_stake_amount': { + 'type': 'boolean', + 'default': False + }, + 'last_stake_amount_min_ratio': { + 'type': 'number', + 'minimum': 0.0, + 'maximum': 1.0, + 'default': 0.5 + }, + 'fiat_display_currency': { + 'type': 'string', + 'enum': SUPPORTED_FIAT + }, + 'dry_run': { + 'type': 'boolean' + }, + 'dry_run_wallet': { + 'type': 'number', + 'default': DRY_RUN_WALLET + }, + 'process_only_new_candles': { + 'type': 'boolean' }, - 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT}, - 'dry_run': {'type': 'boolean'}, - 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET}, - 'process_only_new_candles': {'type': 'boolean'}, 'minimal_roi': { 'type': 'object', 'patternProperties': { - '^[0-9.]+$': {'type': 'number'} + '^[0-9.]+$': { + 'type': 'number' + } }, 'minProperties': 1 }, - 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5}, - 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True}, - 'trailing_stop': {'type': 'boolean'}, - 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1}, - 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1}, - 'trailing_only_offset_is_reached': {'type': 'boolean'}, + 'amount_reserve_percent': { + 'type': 'number', + 'minimum': 0.0, + 'maximum': 0.5 + }, + 'stoploss': { + 'type': 'number', + 'maximum': 0, + 'exclusiveMaximum': True + }, + 'trailing_stop': { + 'type': 'boolean' + }, + 'trailing_stop_positive': { + 'type': 'number', + 'minimum': 0, + 'maximum': 1 + }, + 'trailing_stop_positive_offset': { + 'type': 'number', + 'minimum': 0, + 'maximum': 1 + }, + 'trailing_only_offset_is_reached': { + 'type': 'boolean' + }, 'unfilledtimeout': { 'type': 'object', 'properties': { - 'buy': {'type': 'number', 'minimum': 1}, - 'sell': {'type': 'number', 'minimum': 1} + 'buy': { + 'type': 'number', + 'minimum': 1 + }, + 'sell': { + 'type': 'number', + 'minimum': 1 + } } }, 'bid_strategy': { @@ -113,13 +163,24 @@ CONF_SCHEMA = { 'minimum': 0, 'maximum': 1, 'exclusiveMaximum': False, - 'use_order_book': {'type': 'boolean'}, - 'order_book_top': {'type': 'integer', 'maximum': 20, 'minimum': 1}, + 'use_order_book': { + 'type': 'boolean' + }, + 'order_book_top': { + 'type': 'integer', + 'maximum': 20, + 'minimum': 1 + }, 'check_depth_of_market': { 'type': 'object', 'properties': { - 'enabled': {'type': 'boolean'}, - 'bids_to_ask_delta': {'type': 'number', 'minimum': 0}, + 'enabled': { + 'type': 'boolean' + }, + 'bids_to_ask_delta': { + 'type': 'number', + 'minimum': 0 + }, } }, }, @@ -129,43 +190,92 @@ CONF_SCHEMA = { 'ask_strategy': { 'type': 'object', 'properties': { - 'use_order_book': {'type': 'boolean'}, - 'order_book_min': {'type': 'integer', 'minimum': 1}, - 'order_book_max': {'type': 'integer', 'minimum': 1, 'maximum': 50}, - 'use_sell_signal': {'type': 'boolean'}, - 'sell_profit_only': {'type': 'boolean'}, - 'ignore_roi_if_buy_signal': {'type': 'boolean'} + 'use_order_book': { + 'type': 'boolean' + }, + 'order_book_min': { + 'type': 'integer', + 'minimum': 1 + }, + 'order_book_max': { + 'type': 'integer', + 'minimum': 1, + 'maximum': 50 + }, + 'use_sell_signal': { + 'type': 'boolean' + }, + 'sell_profit_only': { + 'type': 'boolean' + }, + 'ignore_roi_if_buy_signal': { + 'type': 'boolean' + } } }, 'order_types': { 'type': 'object', 'properties': { - 'buy': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'sell': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'emergencysell': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'stoploss_on_exchange': {'type': 'boolean'}, - 'stoploss_on_exchange_interval': {'type': 'number'} + 'buy': { + 'type': 'string', + 'enum': ORDERTYPE_POSSIBILITIES + }, + 'sell': { + 'type': 'string', + 'enum': ORDERTYPE_POSSIBILITIES + }, + 'emergencysell': { + 'type': 'string', + 'enum': ORDERTYPE_POSSIBILITIES + }, + 'stoploss': { + 'type': 'string', + 'enum': ORDERTYPE_POSSIBILITIES + }, + 'stoploss_on_exchange': { + 'type': 'boolean' + }, + 'stoploss_on_exchange_interval': { + 'type': 'number' + } }, 'required': ['buy', 'sell', 'stoploss', 'stoploss_on_exchange'] }, 'order_time_in_force': { 'type': 'object', 'properties': { - 'buy': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}, - 'sell': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES} + 'buy': { + 'type': 'string', + 'enum': ORDERTIF_POSSIBILITIES + }, + 'sell': { + 'type': 'string', + 'enum': ORDERTIF_POSSIBILITIES + } }, 'required': ['buy', 'sell'] }, - 'exchange': {'$ref': '#/definitions/exchange'}, - 'edge': {'$ref': '#/definitions/edge'}, + 'exchange': { + '$ref': '#/definitions/exchange' + }, + 'edge': { + '$ref': '#/definitions/edge' + }, 'experimental': { 'type': 'object', 'properties': { - 'use_sell_signal': {'type': 'boolean'}, - 'sell_profit_only': {'type': 'boolean'}, - 'ignore_roi_if_buy_signal': {'type': 'boolean'}, - 'block_bad_exchanges': {'type': 'boolean'} + 'use_sell_signal': { + 'type': 'boolean' + }, + 'sell_profit_only': { + 'type': 'boolean' + }, + 'ignore_roi_if_buy_signal': { + 'type': 'boolean' + }, + 'block_bad_exchanges': { + 'type': 'boolean' + } } }, 'pairlists': { @@ -173,8 +283,13 @@ CONF_SCHEMA = { 'items': { 'type': 'object', 'properties': { - 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS}, - 'config': {'type': 'object'} + 'method': { + 'type': 'string', + 'enum': AVAILABLE_PAIRLISTS + }, + 'config': { + 'type': 'object' + } }, 'required': ['method'], } @@ -182,71 +297,126 @@ CONF_SCHEMA = { 'telegram': { 'type': 'object', 'properties': { - 'enabled': {'type': 'boolean'}, - 'token': {'type': 'string'}, - 'chat_id': {'type': 'string'}, + 'enabled': { + 'type': 'boolean' + }, + 'token': { + 'type': 'string' + }, + 'chat_id': { + 'type': 'string' + }, }, 'required': ['enabled', 'token', 'chat_id'] }, 'webhook': { 'type': 'object', 'properties': { - 'enabled': {'type': 'boolean'}, - 'webhookbuy': {'type': 'object'}, - 'webhookbuycancel': {'type': 'object'}, - 'webhooksell': {'type': 'object'}, - 'webhooksellcancel': {'type': 'object'}, - 'webhookstatus': {'type': 'object'}, + 'enabled': { + 'type': 'boolean' + }, + 'webhookbuy': { + 'type': 'object' + }, + 'webhookbuycancel': { + 'type': 'object' + }, + 'webhooksell': { + 'type': 'object' + }, + 'webhooksellcancel': { + 'type': 'object' + }, + 'webhookstatus': { + 'type': 'object' + }, }, }, 'api_server': { 'type': 'object', 'properties': { - 'enabled': {'type': 'boolean'}, - 'listen_ip_address': {'format': 'ipv4'}, + 'enabled': { + 'type': 'boolean' + }, + 'listen_ip_address': { + 'format': 'ipv4' + }, 'listen_port': { 'type': 'integer', 'minimum': 1024, 'maximum': 65535 }, - 'username': {'type': 'string'}, - 'password': {'type': 'string'}, + 'username': { + 'type': 'string' + }, + 'password': { + 'type': 'string' + }, }, 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password'] }, - 'db_url': {'type': 'string'}, - 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']}, - 'forcebuy_enable': {'type': 'boolean'}, + 'db_url': { + 'type': 'string' + }, + 'initial_state': { + 'type': 'string', + 'enum': ['running', 'stopped'] + }, + 'forcebuy_enable': { + 'type': 'boolean' + }, 'internals': { 'type': 'object', 'default': {}, 'properties': { - 'process_throttle_secs': {'type': 'integer'}, - 'interval': {'type': 'integer'}, - 'sd_notify': {'type': 'boolean'}, + 'process_throttle_secs': { + 'type': 'integer' + }, + 'interval': { + 'type': 'integer' + }, + 'sd_notify': { + 'type': 'boolean' + }, } }, 'dataformat_ohlcv': { 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'json' + 'enum': AVAILABLE_DATAHANDLERS, + 'default': 'json' }, 'dataformat_trades': { 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'jsongz' + 'enum': AVAILABLE_DATAHANDLERS, + 'default': 'jsongz' } }, 'definitions': { 'exchange': { 'type': 'object', 'properties': { - 'name': {'type': 'string'}, - 'sandbox': {'type': 'boolean', 'default': False}, - 'key': {'type': 'string', 'default': ''}, - 'secret': {'type': 'string', 'default': ''}, - 'password': {'type': 'string', 'default': ''}, - 'uid': {'type': 'string'}, + 'name': { + 'type': 'string' + }, + 'sandbox': { + 'type': 'boolean', + 'default': False + }, + 'key': { + 'type': 'string', + 'default': '' + }, + 'secret': { + 'type': 'string', + 'default': '' + }, + 'password': { + 'type': 'string', + 'default': '' + }, + 'uid': { + 'type': 'string' + }, 'pair_whitelist': { 'type': 'array', 'items': { @@ -263,29 +433,65 @@ CONF_SCHEMA = { }, 'uniqueItems': True }, - 'outdated_offset': {'type': 'integer', 'minimum': 1}, - 'markets_refresh_interval': {'type': 'integer'}, - 'ccxt_config': {'type': 'object'}, - 'ccxt_async_config': {'type': 'object'} + 'outdated_offset': { + 'type': 'integer', + 'minimum': 1 + }, + 'markets_refresh_interval': { + 'type': 'integer' + }, + 'ccxt_config': { + 'type': 'object' + }, + 'ccxt_async_config': { + 'type': 'object' + } }, 'required': ['name'] }, 'edge': { 'type': 'object', 'properties': { - 'enabled': {'type': 'boolean'}, - 'process_throttle_secs': {'type': 'integer', 'minimum': 600}, - 'calculate_since_number_of_days': {'type': 'integer'}, - 'allowed_risk': {'type': 'number'}, - 'capital_available_percentage': {'type': 'number'}, - 'stoploss_range_min': {'type': 'number'}, - 'stoploss_range_max': {'type': 'number'}, - 'stoploss_range_step': {'type': 'number'}, - 'minimum_winrate': {'type': 'number'}, - 'minimum_expectancy': {'type': 'number'}, - 'min_trade_number': {'type': 'number'}, - 'max_trade_duration_minute': {'type': 'integer'}, - 'remove_pumps': {'type': 'boolean'} + 'enabled': { + 'type': 'boolean' + }, + 'process_throttle_secs': { + 'type': 'integer', + 'minimum': 600 + }, + 'calculate_since_number_of_days': { + 'type': 'integer' + }, + 'allowed_risk': { + 'type': 'number' + }, + 'capital_available_percentage': { + 'type': 'number' + }, + 'stoploss_range_min': { + 'type': 'number' + }, + 'stoploss_range_max': { + 'type': 'number' + }, + 'stoploss_range_step': { + 'type': 'number' + }, + 'minimum_winrate': { + 'type': 'number' + }, + 'minimum_expectancy': { + 'type': 'number' + }, + 'min_trade_number': { + 'type': 'number' + }, + 'max_trade_duration_minute': { + 'type': 'integer' + }, + 'remove_pumps': { + 'type': 'boolean' + } }, 'required': ['process_throttle_secs', 'allowed_risk'] } diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 14a77cdf5..6b9a7a559 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -2,7 +2,6 @@ """ This module contains the hyperopt logic """ - import os import functools import locale @@ -10,12 +9,12 @@ import logging import random import sys import warnings -from collections import OrderedDict +from collections import OrderedDict, deque from math import factorial, log from operator import itemgetter from pathlib import Path from pprint import pprint -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Callable import rapidjson from colorama import Fore, Style @@ -32,7 +31,6 @@ from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F401 from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F401 from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) from joblib import (Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects) -from joblib._parallel_backends import LokyBackend from joblib import register_parallel_backend, parallel_backend from pandas import DataFrame @@ -62,6 +60,7 @@ class Hyperopt: hyperopt.start() """ def __init__(self, config: Dict[str, Any]) -> None: + self.config = config self.backtesting = Backtesting(self.config) @@ -75,16 +74,16 @@ class Hyperopt: 'hyperopt_results.pickle') self.tickerdata_pickle = (self.config['user_data_dir'] / 'hyperopt_results' / 'hyperopt_tickerdata.pkl') - self.effort = config.get('epochs', 0) or 1 - self.total_epochs = 9999 - self.max_epoch = 9999 + self.total_epochs = config['epochs'] if 'epochs' in config else 0 + self.effort = config['effort'] if 'effort' in config else -1 + self.max_epoch = 0 self.search_space_size = 0 self.max_epoch_reached = False self.min_epochs = INITIAL_POINTS self.current_best_loss = 100 self.current_best_epoch = 0 - self.epochs_since_last_best = [] + self.epochs_since_last_best: List = [] self.avg_best_occurrence = 0 if not self.config.get('hyperopt_continue'): @@ -100,6 +99,10 @@ class Hyperopt: self.opt: Optimizer self.opt = None self.f_val: List = [] + self.to_ask: deque + self.to_ask = deque() + self.tell: Callable + self.tell = None # Populate functions here (hasattr is slow so should not be run during "regular" operations) if hasattr(self.custom_hyperopt, 'populate_indicators'): @@ -163,6 +166,7 @@ class Hyperopt: Save hyperopt trials to file """ num_trials = len(self.trials) + print() if num_trials > self.num_trials_saved: logger.info(f"Saving {num_trials} {plural(num_trials, 'epoch')}.") dump(self.trials, self.trials_file) @@ -276,8 +280,8 @@ class Hyperopt: """ is_best = results['is_best'] if self.print_all or is_best: - self.print_results_explanation(results, self.total_epochs, self.print_all, - self.print_colorized) + self.print_results_explanation(results, self.total_epochs or self.max_epoch, + self.print_all, self.print_colorized) @staticmethod def print_results_explanation(results, total_epochs, highlight_best: bool, @@ -386,10 +390,10 @@ class Hyperopt: position_stacking=self.position_stacking, ) return self._get_results_dict(backtesting_results, min_date, max_date, params_dict, - params_details) + params_details, raw_params) def _get_results_dict(self, backtesting_results, min_date, max_date, params_dict, - params_details): + params_details, raw_params): results_metrics = self._calculate_results_metrics(backtesting_results) results_explanation = self._format_results_explanation_string(results_metrics) @@ -413,6 +417,7 @@ class Hyperopt: 'results_metrics': results_metrics, 'results_explanation': results_explanation, 'total_profit': total_profit, + 'asked': raw_params, } def _calculate_results_metrics(self, backtesting_results: DataFrame) -> Dict: @@ -448,38 +453,51 @@ class Hyperopt: random_state=self.random_state, ) - def run_optimizer_parallel(self, parallel, tries: int, first_try: int) -> List: + def run_optimizer_parallel(self, parallel: Parallel, tries: int, first_try: int, + jobs: int) -> List: result = parallel( delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, i) - for asked, i in zip(self.opt_generator(), range(first_try, first_try + tries))) + for asked, i in zip(self.opt_generator(jobs, tries), range( + first_try, first_try + tries))) return result - def opt_generator(self): + def opt_generator(self, jobs: int, tries: int): while True: if self.f_val: - # print("opt.tell(): ", - # [v['params_dict'] for v in self.f_val], [v['loss'] for v in self.f_val]) - functools.partial(self.opt.tell, - ([v['params_dict'] - for v in self.f_val], [v['loss'] for v in self.f_val])) + # print("opt.tell(): ", [v['asked'] for v in self.f_val], + # [v['loss'] for v in self.f_val]) + self.tell = functools.partial(self.opt.tell, [v['asked'] for v in self.f_val], + [v['loss'] for v in self.f_val]) self.f_val = [] - yield self.opt.ask() + + if not self.to_ask: + self.opt.update_next() + self.to_ask.extend(self.opt.ask(n_points=tries)) + self.fit = True + yield self.to_ask.popleft() + # yield self.opt.ask() def parallel_objective(self, asked, n): self.log_results_immediate(n) return self.generate_optimizer(asked) def parallel_callback(self, f_val): + if self.tell: + self.tell(fit=self.fit) + self.tell = None + self.fit = False self.f_val.extend(f_val) def log_results_immediate(self, n) -> None: print('.', end='') sys.stdout.flush() - def log_results(self, f_val, frame_start, max_epoch) -> None: + def log_results(self, f_val, frame_start, total_epochs: int) -> None: """ Log results if it is better than any previous evaluation """ + print() + current = frame_start + 1 for i, v in enumerate(f_val): is_best = self.is_best_loss(v, self.current_best_loss) current = frame_start + i + 1 @@ -493,15 +511,10 @@ class Hyperopt: self.print_results(v) self.trials.append(v) # Save results after every batch - print('\n') self.save_trials() # give up if no best since max epochs - if current > self.max_epoch: + if current + 1 > (total_epochs or self.max_epoch): self.max_epoch_reached = True - # testing trapdoor - if os.getenv('FQT_HYPEROPT_TRAP'): - logger.debug('bypassing hyperopt loop') - self.max_epoch = 1 @staticmethod def load_previous_results(trials_file: Path) -> List: @@ -522,7 +535,7 @@ class Hyperopt: return random_state or random.randint(1, 2**16 - 1) @staticmethod - def calc_epochs(dimensions: List[Dimension], config_jobs: int, effort: int): + def calc_epochs(dimensions: List[Dimension], config_jobs: int, effort: int, total_epochs: int): """ Compute a reasonable number of initial points and a minimum number of epochs to evaluate """ n_dimensions = len(dimensions) @@ -543,16 +556,18 @@ class Hyperopt: if search_space_size < config_jobs: # don't waste if the space is small n_initial_points = config_jobs + elif total_epochs > 0: + n_initial_points = total_epochs // 3 if total_epochs > config_jobs * 3 else config_jobs + min_epochs = n_initial_points else: # extract coefficients from the search space and the jobs count log_sss = int(log(search_space_size, 10)) - log_jobs = int(log(config_jobs, 2)) - log_jobs = 2 if log_jobs < 0 else log_jobs + log_jobs = int(log(config_jobs, 2)) if config_jobs > 4 else 2 jobs_ip = log_jobs * log_sss # never waste n_initial_points = log_sss if jobs_ip > search_space_size else jobs_ip - # it shall run for this much, I say - min_epochs = max(2 * n_initial_points, 3 * config_jobs) * effort + # it shall run for this much, I say + min_epochs = int(max(2 * n_initial_points, 3 * config_jobs) * (1 + effort / 10)) return n_initial_points, min_epochs, search_space_size def update_max_epoch(self, val: Dict, current: int): @@ -563,11 +578,12 @@ class Hyperopt: self.avg_best_occurrence = (sum(self.epochs_since_last_best) // len(self.epochs_since_last_best)) self.current_best_epoch = current - self.max_epoch = (self.current_best_epoch + self.avg_best_occurrence + - self.min_epochs) * self.effort + self.max_epoch = int( + (self.current_best_epoch + self.avg_best_occurrence + self.min_epochs) * + (1 + self.effort / 10)) if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size - print('\n') + print() logger.info(f'Max epochs set to: {self.max_epoch}') def start(self) -> None: @@ -599,47 +615,53 @@ class Hyperopt: self.dimensions: List[Dimension] = self.hyperopt_space() self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( - self.dimensions, config_jobs, self.effort) + self.dimensions, config_jobs, self.effort, self.total_epochs) logger.info(f"Min epochs set to: {self.min_epochs}") - self.max_epoch = self.min_epochs - self.avg_best_occurrence = self.max_epoch + if self.total_epochs < 1: + self.max_epoch = int(self.min_epochs + len(self.trials)) + else: + self.max_epoch = self.n_initial_points + self.avg_best_occurrence = self.min_epochs logger.info(f'Initial points: {self.n_initial_points}') self.opt = self.get_optimizer(self.dimensions, config_jobs, self.n_initial_points) - # last_frame_len = (self.total_epochs - 1) % self.avg_best_occurrence - if self.print_colorized: colorama_init(autoreset=True) - try: - register_parallel_backend('custom', CustomImmediateResultBackend) - with parallel_backend('custom'): - with Parallel(n_jobs=config_jobs, verbose=0) as parallel: - for frame in range(self.total_epochs): - epochs_so_far = len(self.trials) - # pad the frame length to the number of jobs to avoid desaturation - frame_len = (self.avg_best_occurrence + config_jobs - - self.avg_best_occurrence % config_jobs) - print( - f"{epochs_so_far+1}-{epochs_so_far+self.avg_best_occurrence}" - f"/{self.total_epochs}: ", - end='') - f_val = self.run_optimizer_parallel(parallel, frame_len, epochs_so_far) - self.log_results(f_val, epochs_so_far, self.total_epochs) - if self.max_epoch_reached: - logger.info("Max epoch reached, terminating.") - break + try: + register_parallel_backend('custom', CustomImmediateResultBackend) + with parallel_backend('custom'): + with Parallel(n_jobs=config_jobs, verbose=0) as parallel: + while True: + # update epochs count + epochs_so_far = len(self.trials) + # pad the frame length to the number of jobs to avoid desaturation + frame_len = (self.avg_best_occurrence + config_jobs - + self.avg_best_occurrence % config_jobs) + # don't go over the limit + if epochs_so_far + frame_len > (self.total_epochs or self.max_epoch): + frame_len = (self.total_epochs or self.max_epoch) - epochs_so_far + print( + f"{epochs_so_far+1}-{epochs_so_far+frame_len}" + f"/{self.total_epochs}: ", + end='') + f_val = self.run_optimizer_parallel(parallel, frame_len, epochs_so_far, + config_jobs) + self.log_results(f_val, epochs_so_far, self.total_epochs or self.max_epoch) + if self.max_epoch_reached: + logger.info("Max epoch reached, terminating.") + break - except KeyboardInterrupt: - print("User interrupted..") + except KeyboardInterrupt: + print("User interrupted..") self.save_trials(final=True) if self.trials: sorted_trials = sorted(self.trials, key=itemgetter('loss')) results = sorted_trials[0] - self.print_epoch_details(results, self.total_epochs, self.print_json) + self.print_epoch_details(results, self.max_epoch, self.print_json) else: # This is printed when Ctrl+C is pressed quickly, before first epochs have # a chance to be evaluated. diff --git a/freqtrade/optimize/hyperopt_backend.py b/freqtrade/optimize/hyperopt_backend.py index d7a8544cc..4d75ec88b 100644 --- a/freqtrade/optimize/hyperopt_backend.py +++ b/freqtrade/optimize/hyperopt_backend.py @@ -1,6 +1,7 @@ from joblib._parallel_backends import LokyBackend +from typing import Any -hyperopt = None +hyperopt: Any = None class MultiCallback: From f797413c806aa197ab2c828d73ec9dea42ab65f4 Mon Sep 17 00:00:00 2001 From: orehunt Date: Mon, 2 Mar 2020 07:47:34 +0100 Subject: [PATCH 03/21] - added multi optimizer mode - tweaked optimizer config - dump optimizer state on disk --- freqtrade/commands/arguments.py | 3 +- freqtrade/commands/cli_options.py | 18 +- freqtrade/configuration/configuration.py | 181 +++-- freqtrade/constants.py | 11 +- freqtrade/optimize/hyperopt.py | 349 ++++++--- freqtrade/optimize/hyperopt_backend.py | 38 +- tests/optimize/test_hyperopt.py | 875 +++++++++++++---------- 7 files changed, 897 insertions(+), 578 deletions(-) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index d4866bd8c..da5015f73 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -24,7 +24,8 @@ ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [ "hyperopt", "hyperopt_path", "position_stacking", "epochs", "spaces", "use_max_market_positions", "print_all", "print_colorized", "print_json", "hyperopt_jobs", - "hyperopt_random_state", "hyperopt_min_trades", "hyperopt_continue", "hyperopt_loss", "effort" + "hyperopt_random_state", "hyperopt_min_trades", "hyperopt_continue", "hyperopt_loss", "effort", + "multi_opt", "points_per_opt" ] ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"] diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index c5a4c10d5..bccd387b9 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -234,10 +234,24 @@ AVAILABLE_CLI_OPTIONS = { '--effort', help=('The higher the number, the longer will be the search if' 'no epochs are defined (default: %(default)d).'), - type=check_int_positive, - metavar='INT', + type=float, + metavar='FLOAT', default=constants.HYPEROPT_EFFORT, ), + "multi_opt": + Arg('--multi', + help=('Switches hyperopt to use one optimizer per job, use it', + 'when backtesting iterations are cheap (default: %(default)d).'), + action='store_true', + default=False), + "points_per_opt": + Arg('--points-per-opt', + help=('Controls how many points to ask at each job dispatch to each', + 'optimizer in multi opt mode, increase if cpu usage of each core', + 'appears low (default: %(default)d).'), + type=int, + metavar='INT', + default=constants.HYPEROPT_POINTS_PER_OPT), "spaces": Arg( '--spaces', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 21b3e3bd3..810840e49 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -10,8 +10,7 @@ from typing import Any, Callable, Dict, List, Optional from freqtrade import constants from freqtrade.configuration.check_exchange import check_exchange from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings -from freqtrade.configuration.directory_operations import (create_datadir, - create_userdata_dir) +from freqtrade.configuration.directory_operations import (create_datadir, create_userdata_dir) from freqtrade.configuration.load_config import load_config_file from freqtrade.exceptions import OperationalException from freqtrade.loggers import setup_logging @@ -26,7 +25,6 @@ class Configuration: Class to read and init the bot configuration Reuse this class for the bot, backtesting, hyperopt and every script that required configuration """ - def __init__(self, args: Dict[str, Any], runmode: RunMode = None) -> None: self.args = args self.config: Optional[Dict[str, Any]] = None @@ -152,11 +150,12 @@ class Configuration: if self.args.get("strategy") or not config.get('strategy'): config.update({'strategy': self.args.get("strategy")}) - self._args_to_config(config, argname='strategy_path', + self._args_to_config(config, + argname='strategy_path', logstring='Using additional Strategy lookup path: {}') - if ('db_url' in self.args and self.args["db_url"] and - self.args["db_url"] != constants.DEFAULT_DB_PROD_URL): + if ('db_url' in self.args and self.args["db_url"] + and self.args["db_url"] != constants.DEFAULT_DB_PROD_URL): config.update({'db_url': self.args["db_url"]}) logger.info('Parameter --db-url detected ...') @@ -167,7 +166,8 @@ class Configuration: if 'sd_notify' in self.args and self.args["sd_notify"]: config['internals'].update({'sd_notify': True}) - self._args_to_config(config, argname='dry_run', + self._args_to_config(config, + argname='dry_run', logstring='Parameter --dry-run detected, ' 'overriding dry_run to: {} ...') @@ -198,20 +198,23 @@ class Configuration: logger.info('Using data directory: %s ...', config.get('datadir')) if self.args.get('exportfilename'): - self._args_to_config(config, argname='exportfilename', + self._args_to_config(config, + argname='exportfilename', logstring='Storing backtest results to {} ...') else: - config['exportfilename'] = (config['user_data_dir'] - / 'backtest_results/backtest-result.json') + config['exportfilename'] = (config['user_data_dir'] / + 'backtest_results/backtest-result.json') def _process_optimize_options(self, config: Dict[str, Any]) -> None: # This will override the strategy configuration - self._args_to_config(config, argname='ticker_interval', + self._args_to_config(config, + argname='ticker_interval', logstring='Parameter -i/--ticker-interval detected ... ' 'Using ticker_interval: {} ...') - self._args_to_config(config, argname='position_stacking', + self._args_to_config(config, + argname='position_stacking', logstring='Parameter --enable-position-stacking detected ...') # Setting max_open_trades to infinite if -1 @@ -224,31 +227,39 @@ class Configuration: logger.info('max_open_trades set to unlimited ...') elif 'max_open_trades' in self.args and self.args["max_open_trades"]: config.update({'max_open_trades': self.args["max_open_trades"]}) - logger.info('Parameter --max-open-trades detected, ' - 'overriding max_open_trades to: %s ...', config.get('max_open_trades')) + logger.info( + 'Parameter --max-open-trades detected, ' + 'overriding max_open_trades to: %s ...', config.get('max_open_trades')) elif config['runmode'] in NON_UTIL_MODES: logger.info('Using max_open_trades: %s ...', config.get('max_open_trades')) - self._args_to_config(config, argname='stake_amount', + self._args_to_config(config, + argname='stake_amount', logstring='Parameter --stake-amount detected, ' 'overriding stake_amount to: {} ...') - self._args_to_config(config, argname='fee', + self._args_to_config(config, + argname='fee', logstring='Parameter --fee detected, ' 'setting fee to: {} ...') - self._args_to_config(config, argname='timerange', + self._args_to_config(config, + argname='timerange', logstring='Parameter --timerange detected: {} ...') self._process_datadir_options(config) - self._args_to_config(config, argname='strategy_list', - logstring='Using strategy list of {} strategies', logfun=len) + self._args_to_config(config, + argname='strategy_list', + logstring='Using strategy list of {} strategies', + logfun=len) - self._args_to_config(config, argname='ticker_interval', + self._args_to_config(config, + argname='ticker_interval', logstring='Overriding ticker interval with Command line argument') - self._args_to_config(config, argname='export', + self._args_to_config(config, + argname='export', logstring='Parameter --export detected: {} ...') # Edge section: @@ -260,21 +271,32 @@ class Configuration: logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"]) # Hyperopt section - self._args_to_config(config, argname='hyperopt', - logstring='Using Hyperopt class name: {}') + self._args_to_config(config, argname='hyperopt', logstring='Using Hyperopt class name: {}') - self._args_to_config(config, argname='hyperopt_path', + self._args_to_config(config, + argname='hyperopt_path', logstring='Using additional Hyperopt lookup path: {}') - self._args_to_config(config, argname='epochs', + self._args_to_config(config, + argname='epochs', logstring='Parameter --epochs detected ... ' - 'Will run Hyperopt with for {} epochs ...' - ) - - self._args_to_config(config, argname='spaces', + 'Will run Hyperopt with for {} epochs ...') + self._args_to_config(config, + argname='effort', + logstring='Parameter --effort detected ... ' + 'Parameter --effort detected: {}') + self._args_to_config(config, + argname='multi_opt', + logstring='Hyperopt will use multiple optimizers ...') + self._args_to_config(config, + argname='points_per_opt', + logstring='Optimizers will be asked for {} points...') + self._args_to_config(config, + argname='spaces', logstring='Parameter -s/--spaces detected: {}') - self._args_to_config(config, argname='print_all', + self._args_to_config(config, + argname='print_all', logstring='Parameter --print-all detected ...') if 'print_colorized' in self.args and not self.args["print_colorized"]: @@ -283,95 +305,109 @@ class Configuration: else: config.update({'print_colorized': True}) - self._args_to_config(config, argname='print_json', + self._args_to_config(config, + argname='print_json', logstring='Parameter --print-json detected ...') - self._args_to_config(config, argname='hyperopt_jobs', + self._args_to_config(config, + argname='hyperopt_jobs', logstring='Parameter -j/--job-workers detected: {}') - self._args_to_config(config, argname='hyperopt_random_state', + self._args_to_config(config, + argname='hyperopt_random_state', logstring='Parameter --random-state detected: {}') - self._args_to_config(config, argname='hyperopt_min_trades', + self._args_to_config(config, + argname='hyperopt_min_trades', logstring='Parameter --min-trades detected: {}') - self._args_to_config(config, argname='hyperopt_continue', - logstring='Hyperopt continue: {}') + self._args_to_config(config, argname='hyperopt_continue', logstring='Hyperopt continue: {}') - self._args_to_config(config, argname='hyperopt_loss', + self._args_to_config(config, + argname='hyperopt_loss', logstring='Using Hyperopt loss class name: {}') - self._args_to_config(config, argname='hyperopt_show_index', + self._args_to_config(config, + argname='hyperopt_show_index', logstring='Parameter -n/--index detected: {}') - self._args_to_config(config, argname='hyperopt_list_best', + self._args_to_config(config, + argname='hyperopt_list_best', logstring='Parameter --best detected: {}') - self._args_to_config(config, argname='hyperopt_list_profitable', + self._args_to_config(config, + argname='hyperopt_list_profitable', logstring='Parameter --profitable detected: {}') - self._args_to_config(config, argname='hyperopt_list_min_trades', + self._args_to_config(config, + argname='hyperopt_list_min_trades', logstring='Parameter --min-trades detected: {}') - self._args_to_config(config, argname='hyperopt_list_max_trades', + self._args_to_config(config, + argname='hyperopt_list_max_trades', logstring='Parameter --max-trades detected: {}') - self._args_to_config(config, argname='hyperopt_list_min_avg_time', + self._args_to_config(config, + argname='hyperopt_list_min_avg_time', logstring='Parameter --min-avg-time detected: {}') - self._args_to_config(config, argname='hyperopt_list_max_avg_time', + self._args_to_config(config, + argname='hyperopt_list_max_avg_time', logstring='Parameter --max-avg-time detected: {}') - self._args_to_config(config, argname='hyperopt_list_min_avg_profit', + self._args_to_config(config, + argname='hyperopt_list_min_avg_profit', logstring='Parameter --min-avg-profit detected: {}') - self._args_to_config(config, argname='hyperopt_list_max_avg_profit', + self._args_to_config(config, + argname='hyperopt_list_max_avg_profit', logstring='Parameter --max-avg-profit detected: {}') - self._args_to_config(config, argname='hyperopt_list_min_total_profit', + self._args_to_config(config, + argname='hyperopt_list_min_total_profit', logstring='Parameter --min-total-profit detected: {}') - self._args_to_config(config, argname='hyperopt_list_max_total_profit', + self._args_to_config(config, + argname='hyperopt_list_max_total_profit', logstring='Parameter --max-total-profit detected: {}') - self._args_to_config(config, argname='hyperopt_list_no_details', + self._args_to_config(config, + argname='hyperopt_list_no_details', logstring='Parameter --no-details detected: {}') - self._args_to_config(config, argname='hyperopt_show_no_header', + self._args_to_config(config, + argname='hyperopt_show_no_header', logstring='Parameter --no-header detected: {}') def _process_plot_options(self, config: Dict[str, Any]) -> None: - self._args_to_config(config, argname='pairs', - logstring='Using pairs {}') + self._args_to_config(config, argname='pairs', logstring='Using pairs {}') - self._args_to_config(config, argname='indicators1', - logstring='Using indicators1: {}') + self._args_to_config(config, argname='indicators1', logstring='Using indicators1: {}') - self._args_to_config(config, argname='indicators2', - logstring='Using indicators2: {}') + self._args_to_config(config, argname='indicators2', logstring='Using indicators2: {}') - self._args_to_config(config, argname='plot_limit', - logstring='Limiting plot to: {}') - self._args_to_config(config, argname='trade_source', - logstring='Using trades from: {}') + self._args_to_config(config, argname='plot_limit', logstring='Limiting plot to: {}') + self._args_to_config(config, argname='trade_source', logstring='Using trades from: {}') - self._args_to_config(config, argname='erase', + self._args_to_config(config, + argname='erase', logstring='Erase detected. Deleting existing data.') - self._args_to_config(config, argname='timeframes', - logstring='timeframes --timeframes: {}') + self._args_to_config(config, argname='timeframes', logstring='timeframes --timeframes: {}') - self._args_to_config(config, argname='days', - logstring='Detected --days: {}') + self._args_to_config(config, argname='days', logstring='Detected --days: {}') - self._args_to_config(config, argname='download_trades', + self._args_to_config(config, + argname='download_trades', logstring='Detected --dl-trades: {}') - self._args_to_config(config, argname='dataformat_ohlcv', + self._args_to_config(config, + argname='dataformat_ohlcv', logstring='Using "{}" to store OHLCV data.') - self._args_to_config(config, argname='dataformat_trades', + self._args_to_config(config, + argname='dataformat_trades', logstring='Using "{}" to store trades data.') def _process_runmode(self, config: Dict[str, Any]) -> None: @@ -383,8 +419,11 @@ class Configuration: config.update({'runmode': self.runmode}) - def _args_to_config(self, config: Dict[str, Any], argname: str, - logstring: str, logfun: Optional[Callable] = None, + def _args_to_config(self, + config: Dict[str, Any], + argname: str, + logstring: str, + logfun: Optional[Callable] = None, deprecated_msg: Optional[str] = None) -> None: """ :param config: Configuration dictionary @@ -396,7 +435,7 @@ class Configuration: configuration instead of the content) """ if (argname in self.args and self.args[argname] is not None - and self.args[argname] is not False): + and self.args[argname] is not False): config.update({argname: self.args[argname]}) if logfun: diff --git a/freqtrade/constants.py b/freqtrade/constants.py index f8fa7bc70..3672de5bd 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -4,10 +4,11 @@ bot constants """ DEFAULT_CONFIG = 'config.json' DEFAULT_EXCHANGE = 'bittrex' -PROCESS_THROTTLE_SECS = 5 # sec -HYPEROPT_EPOCH = 0 # epochs -HYPEROPT_EFFORT = 0 # /10 -RETRY_TIMEOUT = 30 # sec +PROCESS_THROTTLE_SECS = 5 # sec +HYPEROPT_EPOCH = 0 # epochs +HYPEROPT_EFFORT = 0. # tune max epoch count +HYPEROPT_POINTS_PER_OPT = 2 # tune iterations between estimations +RETRY_TIMEOUT = 30 # sec DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss' DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite' DEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite' @@ -22,7 +23,7 @@ AVAILABLE_PAIRLISTS = [ ] AVAILABLE_DATAHANDLERS = ['json', 'jsongz'] DRY_RUN_WALLET = 1000 -MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons +MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons DEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume'] USERPATH_HYPEROPTS = 'hyperopts' diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 6b9a7a559..354c8ab58 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -2,7 +2,7 @@ """ This module contains the hyperopt logic """ -import os + import functools import locale import logging @@ -10,11 +10,11 @@ import random import sys import warnings from collections import OrderedDict, deque -from math import factorial, log +from math import factorial, log, inf from operator import itemgetter from pathlib import Path from pprint import pprint -from typing import Any, Dict, List, Optional, Callable +from typing import Any, Dict, List, Optional import rapidjson from colorama import Fore, Style @@ -26,19 +26,28 @@ from freqtrade.exceptions import OperationalException from freqtrade.misc import plural, round_dict from freqtrade.optimize.backtesting import Backtesting # Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules -from freqtrade.optimize.hyperopt_backend import CustomImmediateResultBackend +import freqtrade.optimize.hyperopt_backend as backend from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F401 from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss # noqa: F401 from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) from joblib import (Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects) -from joblib import register_parallel_backend, parallel_backend +from joblib import parallel_backend +from multiprocessing import Manager +from queue import Queue + from pandas import DataFrame +from numpy import iinfo, int32 # Suppress scikit-learn FutureWarnings from skopt with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) from skopt import Optimizer from skopt.space import Dimension +# Additional regressors already pluggable into the optimizer +# from sklearn.linear_model import ARDRegression, BayesianRidge +# possibly interesting regressors that need predict method override +# from sklearn.ensemble import HistGradientBoostingRegressor +# from xgboost import XGBoostRegressor logger = logging.getLogger(__name__) @@ -48,6 +57,10 @@ INITIAL_POINTS = 30 # in the skopt models list SKOPT_MODELS_MAX_NUM = 10 +# supported strategies when asking for multiple points to the optimizer +NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] +NEXT_POINT_METHODS_LENGTH = 3 + MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization @@ -72,16 +85,24 @@ class Hyperopt: self.trials_file = (self.config['user_data_dir'] / 'hyperopt_results' / 'hyperopt_results.pickle') + self.opts_file = (self.config['user_data_dir'] / 'hyperopt_results' / + 'hyperopt_optimizers.pickle') self.tickerdata_pickle = (self.config['user_data_dir'] / 'hyperopt_results' / 'hyperopt_tickerdata.pkl') - self.total_epochs = config['epochs'] if 'epochs' in config else 0 - self.effort = config['effort'] if 'effort' in config else -1 + self.n_jobs = self.config.get('hyperopt_jobs', -1) + self.effort = self.config['effort'] if 'effort' in self.config else 0 + self.total_epochs = self.config['epochs'] if 'epochs' in self.config else 0 self.max_epoch = 0 - self.search_space_size = 0 self.max_epoch_reached = False + self.min_epochs = 0 + self.epochs_limit = lambda: self.total_epochs or self.max_epoch - self.min_epochs = INITIAL_POINTS - self.current_best_loss = 100 + # a guessed number extracted by the space dimensions + self.search_space_size = 0 + # total number of candles being backtested + self.n_samples = 0 + + self.current_best_loss = inf self.current_best_epoch = 0 self.epochs_since_last_best: List = [] self.avg_best_occurrence = 0 @@ -93,16 +114,35 @@ class Hyperopt: self.num_trials_saved = 0 - # Previous evaluations + # evaluations self.trials: List = [] + # optimizers + self.opts: List[Optimizer] = [] + self.opt: Optimizer = None - self.opt: Optimizer - self.opt = None - self.f_val: List = [] - self.to_ask: deque - self.to_ask = deque() - self.tell: Callable - self.tell = None + if 'multi_opt' in self.config and self.config['multi_opt']: + self.multi = True + backend.manager = Manager() + backend.optimizers = backend.manager.Queue() + backend.results_board = backend.manager.Queue(maxsize=1) + backend.results_board.put([]) + self.opt_base_estimator = 'GBRT' + self.opt_acq_optimizer = 'sampling' + default_n_points = 2 + else: + backend.manager = Manager() + backend.results = backend.manager.Queue() + self.multi = False + self.opt_base_estimator = 'GP' + self.opt_acq_optimizer = 'lbfgs' + default_n_points = 1 + + # in single opt assume runs are expensive so default to 1 point per ask + self.n_points = self.config.get('points_per_opt', default_n_points) + if self.n_points < 1: + self.n_points = 1 + self.opt_base_estimator = 'DUMMY' + self.opt_acq_optimizer = 'sampling' # Populate functions here (hasattr is slow so should not be run during "regular" operations) if hasattr(self.custom_hyperopt, 'populate_indicators'): @@ -142,7 +182,7 @@ class Hyperopt: """ Remove hyperopt pickle files to restart hyperopt. """ - for f in [self.tickerdata_pickle, self.trials_file]: + for f in [self.tickerdata_pickle, self.trials_file, self.opts_file]: p = Path(f) if p.is_file(): logger.info(f"Removing `{p}`.") @@ -171,10 +211,30 @@ class Hyperopt: logger.info(f"Saving {num_trials} {plural(num_trials, 'epoch')}.") dump(self.trials, self.trials_file) self.num_trials_saved = num_trials + self.save_opts() if final: logger.info(f"{num_trials} {plural(num_trials, 'epoch')} " f"saved to '{self.trials_file}'.") + def save_opts(self) -> None: + """ Save optimizers state to disk. The minimum required state could also be constructed + from the attributes [ models, space, rng ] with Xi, yi loaded from trials """ + # synchronize with saved trials + opts = [] + n_opts = 0 + if self.multi: + while not backend.optimizers.empty(): + opts.append(backend.optimizers.get()) + n_opts = len(opts) + for opt in opts: + backend.optimizers.put(opt) + else: + if self.opt: + n_opts = 1 + opts = [self.opt] + logger.info(f"Saving {n_opts} {plural(n_opts, 'optimizer')}.") + dump(opts, self.opts_file) + @staticmethod def _read_trials(trials_file: Path) -> List: """ @@ -280,8 +340,8 @@ class Hyperopt: """ is_best = results['is_best'] if self.print_all or is_best: - self.print_results_explanation(results, self.total_epochs or self.max_epoch, - self.print_all, self.print_colorized) + self.print_results_explanation(results, self.epochs_limit(), self.print_all, + self.print_colorized) @staticmethod def print_results_explanation(results, total_epochs, highlight_best: bool, @@ -345,7 +405,7 @@ class Hyperopt: return spaces - def generate_optimizer(self, raw_params: List[Any], iteration=None) -> Dict: + def backtest_params(self, raw_params: List[Any], iteration=None) -> Dict: """ Used Optimize function. Called once per epoch to optimize whatever is configured. Keep this function as optimized as possible! @@ -417,7 +477,6 @@ class Hyperopt: 'results_metrics': results_metrics, 'results_explanation': results_explanation, 'total_profit': total_profit, - 'asked': raw_params, } def _calculate_results_metrics(self, backtesting_results: DataFrame) -> Dict: @@ -441,54 +500,134 @@ class Hyperopt: f"Avg duration {results_metrics['duration']:5.1f} min.").encode( locale.getpreferredencoding(), 'replace').decode('utf-8') - def get_optimizer(self, dimensions: List[Dimension], cpu_count, + def get_next_point_strategy(self): + """ Choose a strategy randomly among the supported ones, used in multi opt mode + to increase the diversion of the searches of each optimizer """ + return NEXT_POINT_METHODS[random.randrange(0, NEXT_POINT_METHODS_LENGTH)] + + def get_optimizer(self, + dimensions: List[Dimension], + n_jobs: int, n_initial_points=INITIAL_POINTS) -> Optimizer: + " Construct an optimizer object " + # https://github.com/scikit-learn/scikit-learn/issues/14265 + # lbfgs uses joblib threading backend so n_jobs has to be reduced + # to avoid oversubscription + if self.opt_acq_optimizer == 'lbfgs': + n_jobs = 1 return Optimizer( dimensions, - base_estimator="ET", - acq_optimizer="auto", + base_estimator=self.opt_base_estimator, + acq_optimizer=self.opt_acq_optimizer, n_initial_points=n_initial_points, - acq_optimizer_kwargs={'n_jobs': cpu_count}, + acq_optimizer_kwargs={'n_jobs': n_jobs}, + acq_func_kwargs={ + 'xi': 0.00001, + 'kappa': 0.00001 + }, model_queue_size=SKOPT_MODELS_MAX_NUM, random_state=self.random_state, ) - def run_optimizer_parallel(self, parallel: Parallel, tries: int, first_try: int, - jobs: int) -> List: + def run_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, + jobs: int) -> List: result = parallel( - delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, i) - for asked, i in zip(self.opt_generator(jobs, tries), range( - first_try, first_try + tries))) + delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, backend.results, i) + for asked, i in zip(self.opt_ask_and_tell(jobs, tries), + range(first_try, first_try + tries))) return result - def opt_generator(self, jobs: int, tries: int): - while True: - if self.f_val: - # print("opt.tell(): ", [v['asked'] for v in self.f_val], - # [v['loss'] for v in self.f_val]) - self.tell = functools.partial(self.opt.tell, [v['asked'] for v in self.f_val], - [v['loss'] for v in self.f_val]) - self.f_val = [] + def run_multi_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, + jobs: int) -> List: + results = parallel( + delayed(wrap_non_picklable_objects(self.parallel_opt_objective))( + i, backend.optimizers, jobs, backend.results_board) + for i in range(first_try, first_try + tries)) + return functools.reduce(lambda x, y: [*x, *y], results) - if not self.to_ask: + def opt_ask_and_tell(self, jobs: int, tries: int): + """ loop to manager optimizer state in single optimizer mode """ + vals = [] + to_ask: deque = deque() + evald: List[List] = [] + fit = False + for r in range(tries): + while not backend.results.empty(): + vals.append(backend.results.get()) + if vals: + self.opt.tell([list(v['params_dict'].values()) for v in vals], + [v['loss'] for v in vals], + fit=fit) + if fit: + fit = False + vals = [] + + if not to_ask: self.opt.update_next() - self.to_ask.extend(self.opt.ask(n_points=tries)) - self.fit = True - yield self.to_ask.popleft() - # yield self.opt.ask() + to_ask.extend(self.opt.ask(n_points=self.n_points)) + fit = True + a = to_ask.popleft() + if a in evald: + logger.info('this point was evaluated before...') + evald.append(a) + yield a - def parallel_objective(self, asked, n): + def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): self.log_results_immediate(n) - return self.generate_optimizer(asked) + # fetch an optimizer instance + opt = optimizers.get() + # tell new points if any + results = results_board.get() + past_Xi = [] + past_yi = [] + for idx, res in enumerate(results): + unsubscribe = False + vals = res[0] # res[1] is the counter + for v in vals: + if list(v['params_dict'].values()) not in opt.Xi: + past_Xi.append(list(v['params_dict'].values())) + past_yi.append(v['loss']) + # decrease counter + if not unsubscribe: + unsubscribe = True + if unsubscribe: + results[idx][1] -= 1 + if results[idx][1] < 1: + del results[idx] + # put back the updated results + results_board.put(results) + if len(past_Xi) > 0: + opt.tell(past_Xi, past_yi, fit=False) + opt.update_next() + + # ask for points according to config + asked = opt.ask(n_points=self.n_points, strategy=self.get_next_point_strategy()) + # run the backtest for each point + f_val = [self.backtest_params(e) for e in asked] + # tell the optimizer the results + Xi = [list(v['params_dict'].values()) for v in f_val] + yi = [v['loss'] for v in f_val] + opt.tell(Xi, yi, fit=False) + # update the board with the new results + results = results_board.get() + results.append([f_val, jobs - 1]) + results_board.put(results) + # send back the updated optimizer + optimizers.put(opt) + return f_val + + def parallel_objective(self, asked, results: Queue, n=0): + self.log_results_immediate(n) + v = self.backtest_params(asked) + results.put(v) + return v def parallel_callback(self, f_val): - if self.tell: - self.tell(fit=self.fit) - self.tell = None - self.fit = False + """ Executed after each epoch evaluation to collect the results """ self.f_val.extend(f_val) def log_results_immediate(self, n) -> None: + """ Signals that a new job has been scheduled""" print('.', end='') sys.stdout.flush() @@ -510,10 +649,10 @@ class Hyperopt: self.update_max_epoch(v, current) self.print_results(v) self.trials.append(v) - # Save results after every batch + # Save results and optimizersafter every batch self.save_trials() # give up if no best since max epochs - if current + 1 > (total_epochs or self.max_epoch): + if current + 1 > self.epochs_limit(): self.max_epoch_reached = True @staticmethod @@ -531,11 +670,25 @@ class Hyperopt: logger.info(f"Loaded {len(trials)} previous evaluations from disk.") return trials + @staticmethod + def load_previous_optimizers(opts_file: Path) -> List: + """ Load the state of previous optimizers from file """ + opts: List[Optimizer] = [] + if opts_file.is_file() and opts_file.stat().st_size > 0: + opts = load(opts_file) + n_opts = len(opts) + if n_opts > 0 and type(opts[-1]) != Optimizer: + raise OperationalException("The file storing optimizers state might be corrupted " + "and cannot be loaded.") + else: + logger.info(f"Loaded {n_opts} previous {plural(n_opts, 'optimizer')} from disk.") + return opts + def _set_random_state(self, random_state: Optional[int]) -> int: return random_state or random.randint(1, 2**16 - 1) @staticmethod - def calc_epochs(dimensions: List[Dimension], config_jobs: int, effort: int, total_epochs: int): + def calc_epochs(dimensions: List[Dimension], n_jobs: int, effort: float, total_epochs: int): """ Compute a reasonable number of initial points and a minimum number of epochs to evaluate """ n_dimensions = len(dimensions) @@ -553,21 +706,21 @@ class Hyperopt: search_space_size = (factorial(n_parameters) / (factorial(n_parameters - n_dimensions) * factorial(n_dimensions))) # logger.info(f'Search space size: {search_space_size}') - if search_space_size < config_jobs: + if search_space_size < n_jobs: # don't waste if the space is small - n_initial_points = config_jobs + n_initial_points = n_jobs elif total_epochs > 0: - n_initial_points = total_epochs // 3 if total_epochs > config_jobs * 3 else config_jobs + n_initial_points = total_epochs // 3 if total_epochs > n_jobs * 3 else n_jobs min_epochs = n_initial_points else: # extract coefficients from the search space and the jobs count log_sss = int(log(search_space_size, 10)) - log_jobs = int(log(config_jobs, 2)) if config_jobs > 4 else 2 + log_jobs = int(log(n_jobs, 2)) if n_jobs > 4 else 2 jobs_ip = log_jobs * log_sss # never waste n_initial_points = log_sss if jobs_ip > search_space_size else jobs_ip # it shall run for this much, I say - min_epochs = int(max(2 * n_initial_points, 3 * config_jobs) * (1 + effort / 10)) + min_epochs = int(max(n_initial_points, n_jobs) * (1 + effort) + n_initial_points) return n_initial_points, min_epochs, search_space_size def update_max_epoch(self, val: Dict, current: int): @@ -580,11 +733,40 @@ class Hyperopt: self.current_best_epoch = current self.max_epoch = int( (self.current_best_epoch + self.avg_best_occurrence + self.min_epochs) * - (1 + self.effort / 10)) + (1 + self.effort)) if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size print() - logger.info(f'Max epochs set to: {self.max_epoch}') + logger.info(f'Max epochs set to: {self.epochs_limit()}') + + def setup_optimizers(self): + """ Setup the optimizers objects, try to load from disk, or create new ones """ + # try to load previous optimizers + self.opts = self.load_previous_optimizers(self.opts_file) + + if self.multi: + if len(self.opts) == self.n_jobs: + # put the restored optimizers in the queue and clear them from the object + for opt in self.opts: + backend.optimizers.put(opt) + else: # or generate new optimizers + opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) + # reduce random points by the number of optimizers + self.n_initial_points = self.n_initial_points // self.n_jobs + for _ in range(self.n_jobs): # generate optimizers + # random state is preserved + backend.optimizers.put( + opt.copy(random_state=opt.rng.randint(0, + iinfo(int32).max))) + del opt + else: + # if we have more than 1 optimizer but are using single opt, + # pick one discard the rest... + if len(self.opts) > 0: + self.opt = self.opts[-1] + del self.opts + else: + self.opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) def start(self) -> None: self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None)) @@ -597,6 +779,7 @@ class Hyperopt: # Trim startup period from analyzed dataframe for pair, df in preprocessed.items(): preprocessed[pair] = trim_dataframe(df, timerange) + self.n_samples += len(preprocessed[pair]) min_date, max_date = get_timerange(data) logger.info('Hyperopting with data from %s up to %s (%s days)..', min_date.isoformat(), @@ -608,47 +791,49 @@ class Hyperopt: self.trials = self.load_previous_results(self.trials_file) - cpus = cpu_count() - logger.info(f"Found {cpus} CPU cores. Let's make them scream!") - config_jobs = self.config.get('hyperopt_jobs', -1) - logger.info(f'Number of parallel jobs set as: {config_jobs}') + logger.info(f"Found {cpu_count()} CPU cores. Let's make them scream!") + logger.info(f'Number of parallel jobs set as: {self.n_jobs}') self.dimensions: List[Dimension] = self.hyperopt_space() self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( - self.dimensions, config_jobs, self.effort, self.total_epochs) + self.dimensions, self.n_jobs, self.effort, self.total_epochs) logger.info(f"Min epochs set to: {self.min_epochs}") if self.total_epochs < 1: self.max_epoch = int(self.min_epochs + len(self.trials)) else: self.max_epoch = self.n_initial_points - self.avg_best_occurrence = self.min_epochs + self.avg_best_occurrence = self.min_epochs // self.n_jobs logger.info(f'Initial points: {self.n_initial_points}') - self.opt = self.get_optimizer(self.dimensions, config_jobs, self.n_initial_points) - if self.print_colorized: colorama_init(autoreset=True) + self.setup_optimizers() try: - register_parallel_backend('custom', CustomImmediateResultBackend) - with parallel_backend('custom'): - with Parallel(n_jobs=config_jobs, verbose=0) as parallel: + if self.multi: + jobs_scheduler = self.run_multi_backtest_parallel + else: + jobs_scheduler = self.run_backtest_parallel + with parallel_backend('loky', inner_max_num_threads=2): + with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: while True: # update epochs count epochs_so_far = len(self.trials) - # pad the frame length to the number of jobs to avoid desaturation - frame_len = (self.avg_best_occurrence + config_jobs - - self.avg_best_occurrence % config_jobs) + # pad the batch length to the number of jobs to avoid desaturation + batch_len = (self.avg_best_occurrence + self.n_jobs - + self.avg_best_occurrence % self.n_jobs) + # when using multiple optimizers each worker performs + # n_points (epochs) in 1 dispatch but this reduces the batch len too much + # if self.multi: batch_len = batch_len // self.n_points # don't go over the limit - if epochs_so_far + frame_len > (self.total_epochs or self.max_epoch): - frame_len = (self.total_epochs or self.max_epoch) - epochs_so_far + if epochs_so_far + batch_len > self.epochs_limit(): + batch_len = self.epochs_limit() - epochs_so_far print( - f"{epochs_so_far+1}-{epochs_so_far+frame_len}" - f"/{self.total_epochs}: ", + f"{epochs_so_far+1}-{epochs_so_far+batch_len}" + f"/{self.epochs_limit()}: ", end='') - f_val = self.run_optimizer_parallel(parallel, frame_len, epochs_so_far, - config_jobs) - self.log_results(f_val, epochs_so_far, self.total_epochs or self.max_epoch) + f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) + self.log_results(f_val, epochs_so_far, self.epochs_limit()) if self.max_epoch_reached: logger.info("Max epoch reached, terminating.") break diff --git a/freqtrade/optimize/hyperopt_backend.py b/freqtrade/optimize/hyperopt_backend.py index 4d75ec88b..4871c44ca 100644 --- a/freqtrade/optimize/hyperopt_backend.py +++ b/freqtrade/optimize/hyperopt_backend.py @@ -1,31 +1,13 @@ -from joblib._parallel_backends import LokyBackend from typing import Any +from queue import Queue +from multiprocessing.managers import SyncManager hyperopt: Any = None - - -class MultiCallback: - def __init__(self, *callbacks): - self.callbacks = [cb for cb in callbacks if cb] - - def __call__(self, out): - for cb in self.callbacks: - cb(out) - - -class CustomImmediateResultBackend(LokyBackend): - def callback(self, result): - """ - Our custom completion callback. Executed in the parent process. - Use it to run Optimizer.tell() with immediate results of the backtest() - evaluated in the joblib worker process. - """ - if not result.exception(): - # Fetch results from the Future object passed to us. - # Future object is assumed to be 'done' already. - f_val = result.result().copy() - hyperopt.parallel_callback(f_val) - - def apply_async(self, func, callback=None): - cbs = MultiCallback(callback, self.callback) - return super().apply_async(func, cbs) +manager: SyncManager +# stores the optimizers in multi opt mode +optimizers: Queue +# stores a list of the results to share between optimizers +# each result is a tuple of the params_dict and a decreasing counter +results_board: Queue +# store the results in single opt mode +results: Queue diff --git a/tests/optimize/test_hyperopt.py b/tests/optimize/test_hyperopt.py index e3212e0cd..d0f09f74c 100644 --- a/tests/optimize/test_hyperopt.py +++ b/tests/optimize/test_hyperopt.py @@ -10,15 +10,13 @@ import pytest from arrow import Arrow from filelock import Timeout -from freqtrade.commands.optimize_commands import (setup_optimize_configuration, - start_hyperopt) +from freqtrade.commands.optimize_commands import (setup_optimize_configuration, start_hyperopt) from freqtrade.data.history import load_data from freqtrade.exceptions import OperationalException from freqtrade.optimize.default_hyperopt import DefaultHyperOpt from freqtrade.optimize.default_hyperopt_loss import DefaultHyperOptLoss from freqtrade.optimize.hyperopt import Hyperopt -from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, - HyperOptResolver) +from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) from freqtrade.state import RunMode from freqtrade.strategy.interface import SellType from tests.conftest import (get_args, log_has, log_has_re, patch_exchange, @@ -37,21 +35,18 @@ def hyperopt(default_conf, mocker): @pytest.fixture(scope='function') def hyperopt_results(): - return pd.DataFrame( - { - 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], - 'profit_percent': [-0.1, 0.2, 0.3], - 'profit_abs': [-0.2, 0.4, 0.6], - 'trade_duration': [10, 30, 10], - 'sell_reason': [SellType.STOP_LOSS, SellType.ROI, SellType.ROI], - 'close_time': - [ - datetime(2019, 1, 1, 9, 26, 3, 478039), - datetime(2019, 2, 1, 9, 26, 3, 478039), - datetime(2019, 3, 1, 9, 26, 3, 478039) - ] - } - ) + return pd.DataFrame({ + 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], + 'profit_percent': [-0.1, 0.2, 0.3], + 'profit_abs': [-0.2, 0.4, 0.6], + 'trade_duration': [10, 30, 10], + 'sell_reason': [SellType.STOP_LOSS, SellType.ROI, SellType.ROI], + 'close_time': [ + datetime(2019, 1, 1, 9, 26, 3, 478039), + datetime(2019, 2, 1, 9, 26, 3, 478039), + datetime(2019, 3, 1, 9, 26, 3, 478039) + ] + }) # Functions for recurrent object patching @@ -80,8 +75,10 @@ def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, ca args = [ 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', + '--config', + 'config.json', + '--hyperopt', + 'DefaultHyperOpt', ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) @@ -105,23 +102,12 @@ def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, ca def test_setup_hyperopt_configuration_with_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) - mocker.patch( - 'freqtrade.configuration.configuration.create_datadir', - lambda c, x: x - ) + mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x) args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', - '--datadir', '/foo/bar', - '--ticker-interval', '1m', - '--timerange', ':100', - '--enable-position-stacking', - '--disable-max-market-positions', - '--epochs', '1000', - '--spaces', 'default', - '--print-all' + 'hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--datadir', + '/foo/bar', '--ticker-interval', '1m', '--timerange', ':100', '--enable-position-stacking', + '--disable-max-market-positions', '--epochs', '1000', '--spaces', 'default', '--print-all' ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) @@ -165,21 +151,22 @@ def test_hyperoptresolver(mocker, default_conf, caplog) -> None: delattr(hyperopt, 'populate_indicators') delattr(hyperopt, 'populate_buy_trend') delattr(hyperopt, 'populate_sell_trend') - mocker.patch( - 'freqtrade.resolvers.hyperopt_resolver.HyperOptResolver.load_object', - MagicMock(return_value=hyperopt(default_conf)) - ) + mocker.patch('freqtrade.resolvers.hyperopt_resolver.HyperOptResolver.load_object', + MagicMock(return_value=hyperopt(default_conf))) default_conf.update({'hyperopt': 'DefaultHyperOpt'}) x = HyperOptResolver.load_hyperopt(default_conf) assert not hasattr(x, 'populate_indicators') assert not hasattr(x, 'populate_buy_trend') assert not hasattr(x, 'populate_sell_trend') - assert log_has("Hyperopt class does not provide populate_indicators() method. " - "Using populate_indicators from the strategy.", caplog) - assert log_has("Hyperopt class does not provide populate_sell_trend() method. " - "Using populate_sell_trend from the strategy.", caplog) - assert log_has("Hyperopt class does not provide populate_buy_trend() method. " - "Using populate_buy_trend from the strategy.", caplog) + assert log_has( + "Hyperopt class does not provide populate_indicators() method. " + "Using populate_indicators from the strategy.", caplog) + assert log_has( + "Hyperopt class does not provide populate_sell_trend() method. " + "Using populate_sell_trend from the strategy.", caplog) + assert log_has( + "Hyperopt class does not provide populate_buy_trend() method. " + "Using populate_buy_trend from the strategy.", caplog) assert hasattr(x, "ticker_interval") @@ -194,17 +181,15 @@ def test_hyperoptresolver_noname(default_conf): default_conf['hyperopt'] = '' with pytest.raises(OperationalException, match="No Hyperopt set. Please use `--hyperopt` to specify " - "the Hyperopt class to use."): + "the Hyperopt class to use."): HyperOptResolver.load_hyperopt(default_conf) def test_hyperoptlossresolver(mocker, default_conf, caplog) -> None: hl = DefaultHyperOptLoss - mocker.patch( - 'freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object', - MagicMock(return_value=hl) - ) + mocker.patch('freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object', + MagicMock(return_value=hl)) x = HyperOptLossResolver.load_hyperoptloss(default_conf) assert hasattr(x, "hyperopt_loss_function") @@ -223,12 +208,7 @@ def test_start_not_installed(mocker, default_conf, caplog, import_fails) -> None mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', - '--epochs', '5' - ] + args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] pargs = get_args(args) with pytest.raises(OperationalException, match=r"Please ensure that the hyperopt dependencies"): @@ -241,12 +221,7 @@ def test_start(mocker, default_conf, caplog) -> None: mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', - '--epochs', '5' - ] + args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] pargs = get_args(args) start_hyperopt(pargs) @@ -257,19 +232,12 @@ def test_start(mocker, default_conf, caplog) -> None: def test_start_no_data(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame)) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) patch_exchange(mocker) - args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', - '--epochs', '5' - ] + args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] pargs = get_args(args) with pytest.raises(OperationalException, match='No data found. Terminating.'): start_hyperopt(pargs) @@ -281,12 +249,7 @@ def test_start_filelock(mocker, default_conf, caplog) -> None: mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'DefaultHyperOpt', - '--epochs', '5' - ] + args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] pargs = get_args(args) start_hyperopt(pargs) assert log_has("Another running instance of freqtrade Hyperopt detected.", caplog) @@ -294,12 +257,12 @@ def test_start_filelock(mocker, default_conf, caplog) -> None: def test_loss_calculation_prefer_correct_trade_count(default_conf, hyperopt_results) -> None: hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function(hyperopt_results, 600, datetime(2019, 1, 1), + datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100, datetime(2019, 1, 1), + datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100, datetime(2019, 1, 1), + datetime(2019, 5, 1)) assert over > correct assert under > correct @@ -309,10 +272,9 @@ def test_loss_calculation_prefer_shorter_trades(default_conf, hyperopt_results) resultsb.loc[1, 'trade_duration'] = 20 hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - longer = hl.hyperopt_loss_function(hyperopt_results, 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - shorter = hl.hyperopt_loss_function(resultsb, 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + longer = hl.hyperopt_loss_function(hyperopt_results, 100, datetime(2019, 1, 1), + datetime(2019, 5, 1)) + shorter = hl.hyperopt_loss_function(resultsb, 100, datetime(2019, 1, 1), datetime(2019, 5, 1)) assert shorter < longer @@ -323,12 +285,11 @@ def test_loss_calculation_has_limited_profit(default_conf, hyperopt_results) -> results_under['profit_percent'] = hyperopt_results['profit_percent'] / 2 hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function(hyperopt_results, 600, datetime(2019, 1, 1), + datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, 600, datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, 600, datetime(2019, 1, 1), + datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -343,10 +304,10 @@ def test_sharpe_loss_prefers_higher_profits(default_conf, hyperopt_results) -> N hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -361,10 +322,10 @@ def test_sharpe_loss_daily_prefers_higher_profits(default_conf, hyperopt_results hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -379,10 +340,10 @@ def test_onlyprofit_loss_prefers_higher_profits(default_conf, hyperopt_results) hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), - datetime(2019, 1, 1), datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), + datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -390,28 +351,24 @@ def test_onlyprofit_loss_prefers_higher_profits(default_conf, hyperopt_results) def test_log_results_if_loss_improves(hyperopt, capsys) -> None: hyperopt.current_best_loss = 2 hyperopt.total_epochs = 2 - hyperopt.print_results( - { - 'is_best': True, - 'loss': 1, - 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) - 'results_explanation': 'foo.', - 'is_initial_point': False - } - ) + hyperopt.print_results({ + 'is_best': True, + 'loss': 1, + 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) + 'results_explanation': 'foo.', + 'is_initial_point': False + }) out, err = capsys.readouterr() assert ' 2/2: foo. Objective: 1.00000' in out def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: hyperopt.current_best_loss = 2 - hyperopt.print_results( - { - 'is_best': False, - 'loss': 3, - 'current_epoch': 1, - } - ) + hyperopt.print_results({ + 'is_best': False, + 'loss': 3, + 'current_epoch': 1, + }) assert caplog.record_tuples == [] @@ -424,7 +381,7 @@ def test_save_trials_saves_trials(mocker, hyperopt, testdatadir, caplog) -> None hyperopt.save_trials(final=True) assert log_has("Saving 1 epoch.", caplog) assert log_has(f"1 epoch saved to '{trials_file}'.", caplog) - mock_dump.assert_called_once() + mock_dump.assert_called() hyperopt.trials = trials + trials hyperopt.save_trials(final=True) @@ -459,25 +416,32 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', - 'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': { + 'buy': {}, + 'sell': {}, + 'roi': {}, + 'stoploss': 0.0 + } + }])) patch_exchange(mocker) # Co-test loading ticker-interval from strategy del default_conf['ticker_interval'] - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -490,8 +454,11 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: out, err = capsys.readouterr() assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called 3 times, from: + # 1 tickerdata + # 1 save_trials + # 1 save_opts + assert dumper.call_count == 3 assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt, "max_open_trades") @@ -501,11 +468,7 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: def test_format_results(hyperopt): # Test with BTC as stake_currency - trades = [ - ('ETH/BTC', 2, 2, 123), - ('LTC/BTC', 1, 1, 123), - ('XPR/BTC', -1, -2, -246) - ] + trades = [('ETH/BTC', 2, 2, 123), ('LTC/BTC', 1, 1, 123), ('XPR/BTC', -1, -2, -246)] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] df = pd.DataFrame.from_records(trades, columns=labels) results_metrics = hyperopt._calculate_results_metrics(df) @@ -529,11 +492,7 @@ def test_format_results(hyperopt): assert result.find('2.0000Σ %') # Test with EUR as stake_currency - trades = [ - ('ETH/EUR', 2, 2, 123), - ('LTC/EUR', 1, 1, 123), - ('XPR/EUR', -1, -2, -246) - ] + trades = [('ETH/EUR', 2, 2, 123), ('LTC/EUR', 1, 1, 123), ('XPR/EUR', -1, -2, -246)] df = pd.DataFrame.from_records(trades, columns=labels) results_metrics = hyperopt._calculate_results_metrics(df) results['total_profit'] = results_metrics['total_profit'] @@ -542,32 +501,97 @@ def test_format_results(hyperopt): @pytest.mark.parametrize("spaces, expected_results", [ - (['buy'], - {'buy': True, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': False}), - (['sell'], - {'buy': False, 'sell': True, 'roi': False, 'stoploss': False, 'trailing': False}), - (['roi'], - {'buy': False, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False}), - (['stoploss'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': True, 'trailing': False}), - (['trailing'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': True}), - (['buy', 'sell', 'roi', 'stoploss'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), - (['buy', 'sell', 'roi', 'stoploss', 'trailing'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), - (['buy', 'roi'], - {'buy': True, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False}), - (['all'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), - (['default'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), - (['default', 'trailing'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), - (['all', 'buy'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), - (['default', 'buy'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), + (['buy'], { + 'buy': True, + 'sell': False, + 'roi': False, + 'stoploss': False, + 'trailing': False + }), + (['sell'], { + 'buy': False, + 'sell': True, + 'roi': False, + 'stoploss': False, + 'trailing': False + }), + (['roi'], { + 'buy': False, + 'sell': False, + 'roi': True, + 'stoploss': False, + 'trailing': False + }), + (['stoploss'], { + 'buy': False, + 'sell': False, + 'roi': False, + 'stoploss': True, + 'trailing': False + }), + (['trailing'], { + 'buy': False, + 'sell': False, + 'roi': False, + 'stoploss': False, + 'trailing': True + }), + (['buy', 'sell', 'roi', 'stoploss'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': False + }), + (['buy', 'sell', 'roi', 'stoploss', 'trailing'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': True + }), + (['buy', 'roi'], { + 'buy': True, + 'sell': False, + 'roi': True, + 'stoploss': False, + 'trailing': False + }), + (['all'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': True + }), + (['default'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': False + }), + (['default', 'trailing'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': True + }), + (['all', 'buy'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': True + }), + (['default', 'buy'], { + 'buy': True, + 'sell': True, + 'roi': True, + 'stoploss': True, + 'trailing': False + }), ]) def test_has_space(hyperopt, spaces, expected_results): for s in ['buy', 'sell', 'roi', 'stoploss', 'trailing']: @@ -593,47 +617,40 @@ def test_buy_strategy_generator(hyperopt, testdatadir) -> None: dataframe = hyperopt.custom_hyperopt.populate_indicators(dataframes['UNITTEST/BTC'], {'pair': 'UNITTEST/BTC'}) - populate_buy_trend = hyperopt.custom_hyperopt.buy_strategy_generator( - { - 'adx-value': 20, - 'fastd-value': 20, - 'mfi-value': 20, - 'rsi-value': 20, - 'adx-enabled': True, - 'fastd-enabled': True, - 'mfi-enabled': True, - 'rsi-enabled': True, - 'trigger': 'bb_lower' - } - ) + populate_buy_trend = hyperopt.custom_hyperopt.buy_strategy_generator({ + 'adx-value': 20, + 'fastd-value': 20, + 'mfi-value': 20, + 'rsi-value': 20, + 'adx-enabled': True, + 'fastd-enabled': True, + 'mfi-enabled': True, + 'rsi-enabled': True, + 'trigger': 'bb_lower' + }) result = populate_buy_trend(dataframe, {'pair': 'UNITTEST/BTC'}) # Check if some indicators are generated. We will not test all of them assert 'buy' in result assert 1 in result['buy'] -def test_generate_optimizer(mocker, default_conf) -> None: - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'timerange': None, - 'spaces': 'all', - 'hyperopt_min_trades': 1, - }) +def test_backtest_params(mocker, default_conf) -> None: + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'timerange': None, + 'spaces': 'all', + 'hyperopt_min_trades': 1, + }) - trades = [ - ('TRX/BTC', 0.023117, 0.000233, 100) - ] + trades = [('TRX/BTC', 0.023117, 0.000233, 100)] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] backtest_result = pd.DataFrame.from_records(trades, columns=labels) - mocker.patch( - 'freqtrade.optimize.hyperopt.Backtesting.backtest', - MagicMock(return_value=backtest_result) - ) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(Arrow(2017, 12, 10), Arrow(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.Backtesting.backtest', + MagicMock(return_value=backtest_result)) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(Arrow(2017, 12, 10), Arrow(2017, 12, 13)))) patch_exchange(mocker) mocker.patch('freqtrade.optimize.hyperopt.load', MagicMock()) @@ -669,79 +686,100 @@ def test_generate_optimizer(mocker, default_conf) -> None: 'trailing_only_offset_is_reached': False, } response_expected = { - 'loss': 1.9840569076926293, - 'results_explanation': (' 1 trades. Avg profit 2.31%. Total profit 0.00023300 BTC ' - '( 2.31\N{GREEK CAPITAL LETTER SIGMA}%). Avg duration 100.0 min.' - ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8'), - 'params_details': {'buy': {'adx-enabled': False, - 'adx-value': 0, - 'fastd-enabled': True, - 'fastd-value': 35, - 'mfi-enabled': False, - 'mfi-value': 0, - 'rsi-enabled': False, - 'rsi-value': 0, - 'trigger': 'macd_cross_signal'}, - 'roi': {0: 0.12000000000000001, - 20.0: 0.02, - 50.0: 0.01, - 110.0: 0}, - 'sell': {'sell-adx-enabled': False, - 'sell-adx-value': 0, - 'sell-fastd-enabled': True, - 'sell-fastd-value': 75, - 'sell-mfi-enabled': False, - 'sell-mfi-value': 0, - 'sell-rsi-enabled': False, - 'sell-rsi-value': 0, - 'sell-trigger': 'macd_cross_signal'}, - 'stoploss': {'stoploss': -0.4}, - 'trailing': {'trailing_only_offset_is_reached': False, - 'trailing_stop': True, - 'trailing_stop_positive': 0.02, - 'trailing_stop_positive_offset': 0.07}}, - 'params_dict': optimizer_param, - 'results_metrics': {'avg_profit': 2.3117, - 'duration': 100.0, - 'profit': 2.3117, - 'total_profit': 0.000233, - 'trade_count': 1}, - 'total_profit': 0.00023300 + 'loss': + 1.9840569076926293, + 'results_explanation': + (' 1 trades. Avg profit 2.31%. Total profit 0.00023300 BTC ' + '( 2.31\N{GREEK CAPITAL LETTER SIGMA}%). Avg duration 100.0 min.').encode( + locale.getpreferredencoding(), 'replace').decode('utf-8'), + 'params_details': { + 'buy': { + 'adx-enabled': False, + 'adx-value': 0, + 'fastd-enabled': True, + 'fastd-value': 35, + 'mfi-enabled': False, + 'mfi-value': 0, + 'rsi-enabled': False, + 'rsi-value': 0, + 'trigger': 'macd_cross_signal' + }, + 'roi': { + 0: 0.12000000000000001, + 20.0: 0.02, + 50.0: 0.01, + 110.0: 0 + }, + 'sell': { + 'sell-adx-enabled': False, + 'sell-adx-value': 0, + 'sell-fastd-enabled': True, + 'sell-fastd-value': 75, + 'sell-mfi-enabled': False, + 'sell-mfi-value': 0, + 'sell-rsi-enabled': False, + 'sell-rsi-value': 0, + 'sell-trigger': 'macd_cross_signal' + }, + 'stoploss': { + 'stoploss': -0.4 + }, + 'trailing': { + 'trailing_only_offset_is_reached': False, + 'trailing_stop': True, + 'trailing_stop_positive': 0.02, + 'trailing_stop_positive_offset': 0.07 + } + }, + 'params_dict': + optimizer_param, + 'results_metrics': { + 'avg_profit': 2.3117, + 'duration': 100.0, + 'profit': 2.3117, + 'total_profit': 0.000233, + 'trade_count': 1 + }, + 'total_profit': + 0.00023300 } hyperopt = Hyperopt(default_conf) hyperopt.dimensions = hyperopt.hyperopt_space() - generate_optimizer_value = hyperopt.generate_optimizer(list(optimizer_param.values())) - assert generate_optimizer_value == response_expected + backtest_params_value = hyperopt.backtest_params(list(optimizer_param.values())) + assert backtest_params_value == response_expected def test_clean_hyperopt(mocker, default_conf, caplog): patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + }) mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True)) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) h = Hyperopt(default_conf) - assert unlinkmock.call_count == 2 + # once for tickerdata, once for trials, once for optimizers (list) + assert unlinkmock.call_count == 3 assert log_has(f"Removing `{h.tickerdata_pickle}`.", caplog) def test_continue_hyperopt(mocker, default_conf, caplog): patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - 'hyperopt_continue': True - }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + 'hyperopt_continue': True + }) mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True)) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) Hyperopt(default_conf) @@ -754,29 +792,42 @@ def test_print_json_spaces_all(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': {'buy': {'mfi-value': None}, - 'sell': {'sell-mfi-value': None}, - 'roi': {}, 'stoploss': {'stoploss': None}, - 'trailing': {'trailing_stop': None}}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': {}, + 'params_details': { + 'buy': { + 'mfi-value': None + }, + 'sell': { + 'sell-mfi-value': None + }, + 'roi': {}, + 'stoploss': { + 'stoploss': None + }, + 'trailing': { + 'trailing_stop': None + } + } + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'all', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'all', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -789,36 +840,50 @@ def test_print_json_spaces_all(mocker, default_conf, caplog, capsys) -> None: out, err = capsys.readouterr() assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null,"trailing_stop":null}' in out # noqa: E501 assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called 3 times from: + # 1 tickerdata + # 1 save_trials + # 1 save_opts + assert dumper.call_count == 3 def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': {'buy': {'mfi-value': None}, - 'sell': {'sell-mfi-value': None}, - 'roi': {}, 'stoploss': {'stoploss': None}}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': {}, + 'params_details': { + 'buy': { + 'mfi-value': None + }, + 'sell': { + 'sell-mfi-value': None + }, + 'roi': {}, + 'stoploss': { + 'stoploss': None + } + } + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -831,34 +896,44 @@ def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None out, err = capsys.readouterr() assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501 assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called three times, from: + # 1 tickerdata + # 1 save_trials + # 1 save_opts + assert dumper.call_count == 3 def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': {'roi': {}, 'stoploss': {'stoploss': None}}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': {}, + 'params_details': { + 'roi': {}, + 'stoploss': { + 'stoploss': None + } + } + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'roi stoploss', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'roi stoploss', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -871,32 +946,39 @@ def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) -> out, err = capsys.readouterr() assert '{"minimal_roi":{},"stoploss":null}' in out assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called three times from: + # 1 for tickerdata + # 1 for save_trials + # 1 for save_opts + assert dumper.call_count == 3 def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0}}]) - ) + 'loss': 1, + 'results_explanation': 'foo result', + 'params': { + 'stoploss': 0.0 + } + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'roi stoploss', - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'roi stoploss', + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -914,8 +996,11 @@ def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys) out, err = capsys.readouterr() assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called three times, from: + # 1 for tickerdata + # 1 for save_trials + # 1 for save_opts + assert dumper.call_count == 3 assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt, "max_open_trades") @@ -927,19 +1012,19 @@ def test_simplified_interface_all_failed(mocker, default_conf, caplog, capsys) - mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'all', - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'all', + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -958,30 +1043,33 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': {} + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'buy', - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'buy', + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={}) # TODO: sell_strategy_generator() is actually not called because - # run_optimizer_parallel() is mocked + # run_backtest_parallel() is mocked del hyperopt.custom_hyperopt.__class__.sell_strategy_generator del hyperopt.custom_hyperopt.__class__.sell_indicator_space @@ -992,8 +1080,11 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None: out, err = capsys.readouterr() assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called three times, from: + # 1 tickerdata + # 1 save_trials + # 1 save_opts + assert dumper.call_count == 3 assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt, "max_open_trades") @@ -1005,30 +1096,33 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}}]) - ) + 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', + MagicMock(return_value=[{ + 'loss': 1, + 'results_explanation': 'foo result', + 'params': {} + }])) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'sell', - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'sell', + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={}) # TODO: buy_strategy_generator() is actually not called because - # run_optimizer_parallel() is mocked + # run_backtest_parallel() is mocked del hyperopt.custom_hyperopt.__class__.buy_strategy_generator del hyperopt.custom_hyperopt.__class__.indicator_space @@ -1039,8 +1133,11 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None out, err = capsys.readouterr() assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert dumper.called - # Should be called twice, once for tickerdata, once to save evaluations - assert dumper.call_count == 2 + # Should be called three times, from: + # 1 tickerdata + # 1 save_trials + # 1 save_opt s + assert dumper.call_count == 3 assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt, "max_open_trades") @@ -1058,19 +1155,19 @@ def test_simplified_interface_failed(mocker, default_conf, caplog, capsys, metho mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) - ) + mocker.patch('freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) patch_exchange(mocker) - default_conf.update({'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': space, - 'hyperopt_jobs': 1, }) + default_conf.update({ + 'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': space, + 'hyperopt_jobs': 1, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() From daba9d157b40e91dedbabbaf1b98dd7d0bfa5094 Mon Sep 17 00:00:00 2001 From: orehunt Date: Mon, 2 Mar 2020 09:18:55 +0100 Subject: [PATCH 04/21] jobs config fixes --- freqtrade/optimize/hyperopt.py | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 7385cb7ce..5919a37e0 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -51,17 +51,11 @@ with warnings.catch_warnings(): logger = logging.getLogger(__name__) -INITIAL_POINTS = 30 - -# Keep no more than 2*SKOPT_MODELS_MAX_NUM models -# in the skopt models list -SKOPT_MODELS_MAX_NUM = 10 - # supported strategies when asking for multiple points to the optimizer NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] NEXT_POINT_METHODS_LENGTH = 3 -MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization +MAX_LOSS = 10000 # just a big enough number to be bad result in loss optimization class Hyperopt: @@ -90,6 +84,8 @@ class Hyperopt: self.tickerdata_pickle = (self.config['user_data_dir'] / 'hyperopt_results' / 'hyperopt_tickerdata.pkl') self.n_jobs = self.config.get('hyperopt_jobs', -1) + if self.n_jobs < 0: + self.n_jobs = cpu_count() // 2 or 1 self.effort = self.config['effort'] if 'effort' in self.config else 0 self.total_epochs = self.config['epochs'] if 'epochs' in self.config else 0 self.max_epoch = 0 @@ -143,6 +139,7 @@ class Hyperopt: self.n_points = 1 self.opt_base_estimator = 'DUMMY' self.opt_acq_optimizer = 'sampling' + self.n_models = max(16, self.n_jobs) # Populate functions here (hasattr is slow so should not be run during "regular" operations) if hasattr(self.custom_hyperopt, 'populate_indicators'): @@ -560,10 +557,8 @@ class Hyperopt: to increase the diversion of the searches of each optimizer """ return NEXT_POINT_METHODS[random.randrange(0, NEXT_POINT_METHODS_LENGTH)] - def get_optimizer(self, - dimensions: List[Dimension], - n_jobs: int, - n_initial_points=INITIAL_POINTS) -> Optimizer: + def get_optimizer(self, dimensions: List[Dimension], n_jobs: int, + n_initial_points: int) -> Optimizer: " Construct an optimizer object " # https://github.com/scikit-learn/scikit-learn/issues/14265 # lbfgs uses joblib threading backend so n_jobs has to be reduced @@ -580,7 +575,7 @@ class Hyperopt: 'xi': 0.00001, 'kappa': 0.00001 }, - model_queue_size=SKOPT_MODELS_MAX_NUM, + model_queue_size=self.n_models, random_state=self.random_state, ) @@ -686,12 +681,13 @@ class Hyperopt: print('.', end='') sys.stdout.flush() - def log_results(self, f_val, frame_start, total_epochs: int) -> None: + def log_results(self, f_val, frame_start, total_epochs: int) -> int: """ Log results if it is better than any previous evaluation """ print() current = frame_start + 1 + i = 0 for i, v in enumerate(f_val): is_best = self.is_best_loss(v, self.current_best_loss) current = frame_start + i + 1 @@ -709,6 +705,7 @@ class Hyperopt: # give up if no best since max epochs if current + 1 > self.epochs_limit(): self.max_epoch_reached = True + return i @staticmethod def load_previous_results(trials_file: Path) -> List: @@ -871,9 +868,11 @@ class Hyperopt: jobs_scheduler = self.run_backtest_parallel with parallel_backend('loky', inner_max_num_threads=2): with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: - while True: - # update epochs count - epochs_so_far = len(self.trials) + # update epochs count + prev_batch = -1 + epochs_so_far = len(self.trials) + while prev_batch < epochs_so_far: + prev_batch = epochs_so_far # pad the batch length to the number of jobs to avoid desaturation batch_len = (self.avg_best_occurrence + self.n_jobs - self.avg_best_occurrence % self.n_jobs) @@ -888,7 +887,12 @@ class Hyperopt: f"/{self.epochs_limit()}: ", end='') f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) - self.log_results(f_val, epochs_so_far, self.epochs_limit()) + saved = self.log_results(f_val, epochs_so_far, self.epochs_limit()) + # stop if no epochs have been evaluated + if not saved or batch_len < 1: + break + # log_results add + epochs_so_far += saved if self.max_epoch_reached: logger.info("Max epoch reached, terminating.") break From d6c66a54fd8f5b9712e3472ccd827c37043a8359 Mon Sep 17 00:00:00 2001 From: orehunt Date: Tue, 3 Mar 2020 07:57:13 +0100 Subject: [PATCH 05/21] deformat --- freqtrade/commands/arguments.py | 116 ++-- freqtrade/commands/cli_options.py | 291 +++------ freqtrade/configuration/configuration.py | 164 ++--- freqtrade/constants.py | 403 +++--------- freqtrade/optimize/hyperopt.py | 103 ++- setup.cfg | 4 - tests/optimize/test_hyperopt.py | 784 ++++++++++------------- 7 files changed, 726 insertions(+), 1139 deletions(-) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index da5015f73..323a556f8 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -15,7 +15,8 @@ ARGS_STRATEGY = ["strategy", "strategy_path"] ARGS_TRADE = ["db_url", "sd_notify", "dry_run"] -ARGS_COMMON_OPTIMIZE = ["ticker_interval", "timerange", "max_open_trades", "stake_amount", "fee"] +ARGS_COMMON_OPTIMIZE = ["ticker_interval", "timerange", + "max_open_trades", "stake_amount", "fee"] ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [ "position_stacking", "use_max_market_positions", "strategy_list", "export", "exportfilename" @@ -38,10 +39,8 @@ ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"] ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"] -ARGS_LIST_PAIRS = [ - "exchange", "print_list", "list_pairs_print_json", "print_one_column", "print_csv", - "base_currencies", "quote_currencies", "list_pairs_all" -] +ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column", + "print_csv", "base_currencies", "quote_currencies", "list_pairs_all"] ARGS_TEST_PAIRLIST = ["config", "quote_currencies", "print_one_column", "list_pairs_print_json"] @@ -56,38 +55,30 @@ ARGS_BUILD_HYPEROPT = ["user_data_dir", "hyperopt", "template"] ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase"] ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes"] -ARGS_DOWNLOAD_DATA = [ - "pairs", "pairs_file", "days", "download_trades", "exchange", "timeframes", "erase", - "dataformat_ohlcv", "dataformat_trades" -] +ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "download_trades", "exchange", + "timeframes", "erase", "dataformat_ohlcv", "dataformat_trades"] -ARGS_PLOT_DATAFRAME = [ - "pairs", "indicators1", "indicators2", "plot_limit", "db_url", "trade_source", "export", - "exportfilename", "timerange", "ticker_interval" -] +ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit", + "db_url", "trade_source", "export", "exportfilename", + "timerange", "ticker_interval"] -ARGS_PLOT_PROFIT = [ - "pairs", "timerange", "export", "exportfilename", "db_url", "trade_source", "ticker_interval" -] +ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url", + "trade_source", "ticker_interval"] -ARGS_HYPEROPT_LIST = [ - "hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_list_min_trades", - "hyperopt_list_max_trades", "hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time", - "hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit", - "hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit", "print_colorized", - "print_json", "hyperopt_list_no_details" -] +ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable", + "hyperopt_list_min_trades", "hyperopt_list_max_trades", + "hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time", + "hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit", + "hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit", + "print_colorized", "print_json", "hyperopt_list_no_details"] -ARGS_HYPEROPT_SHOW = [ - "hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index", "print_json", - "hyperopt_show_no_header" -] +ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index", + "print_json", "hyperopt_show_no_header"] -NO_CONF_REQURIED = [ - "convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", - "list-pairs", "list-strategies", "list-hyperopts", "hyperopt-list", "hyperopt-show", - "plot-dataframe", "plot-profit" -] +NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", + "list-markets", "list-pairs", "list-strategies", + "list-hyperopts", "hyperopt-list", "hyperopt-show", + "plot-dataframe", "plot-profit"] NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-hyperopt", "new-strategy"] @@ -96,6 +87,7 @@ class Arguments: """ Arguments Class. Manage the arguments received by the cli """ + def __init__(self, args: Optional[List[str]]) -> None: self.args = args self._parsed_arg: Optional[argparse.Namespace] = None @@ -164,70 +156,70 @@ class Arguments: self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot') self._build_args(optionlist=['version'], parser=self.parser) - from freqtrade.commands import ( - start_create_userdir, start_convert_data, start_download_data, start_hyperopt_list, - start_hyperopt_show, start_list_exchanges, start_list_hyperopts, start_list_markets, - start_list_strategies, start_list_timeframes, start_new_config, start_new_hyperopt, - start_new_strategy, start_plot_dataframe, start_plot_profit, start_backtesting, - start_hyperopt, start_edge, start_test_pairlist, start_trading) + from freqtrade.commands import (start_create_userdir, start_convert_data, + start_download_data, + start_hyperopt_list, start_hyperopt_show, + start_list_exchanges, start_list_hyperopts, + start_list_markets, start_list_strategies, + start_list_timeframes, start_new_config, + start_new_hyperopt, start_new_strategy, + start_plot_dataframe, start_plot_profit, + start_backtesting, start_hyperopt, start_edge, + start_test_pairlist, start_trading) - subparsers = self.parser.add_subparsers( - dest='command', - # Use custom message when no subhandler is added - # shown from `main.py` - # required=True - ) + subparsers = self.parser.add_subparsers(dest='command', + # Use custom message when no subhandler is added + # shown from `main.py` + # required=True + ) # Add trade subcommand - trade_cmd = subparsers.add_parser('trade', - help='Trade module.', + trade_cmd = subparsers.add_parser('trade', help='Trade module.', parents=[_common_parser, _strategy_parser]) trade_cmd.set_defaults(func=start_trading) self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd) # Add backtesting subcommand - backtesting_cmd = subparsers.add_parser('backtesting', - help='Backtesting module.', + backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.', parents=[_common_parser, _strategy_parser]) backtesting_cmd.set_defaults(func=start_backtesting) self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd) # Add edge subcommand - edge_cmd = subparsers.add_parser('edge', - help='Edge module.', + edge_cmd = subparsers.add_parser('edge', help='Edge module.', parents=[_common_parser, _strategy_parser]) edge_cmd.set_defaults(func=start_edge) self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd) # Add hyperopt subcommand - hyperopt_cmd = subparsers.add_parser( - 'hyperopt', - help='Hyperopt module.', - parents=[_common_parser, _strategy_parser], - ) + hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.', + parents=[_common_parser, _strategy_parser], + ) hyperopt_cmd.set_defaults(func=start_hyperopt) self._build_args(optionlist=ARGS_HYPEROPT, parser=hyperopt_cmd) # add create-userdir subcommand - create_userdir_cmd = subparsers.add_parser( - 'create-userdir', - help="Create user-data directory.", - ) + create_userdir_cmd = subparsers.add_parser('create-userdir', + help="Create user-data directory.", + ) create_userdir_cmd.set_defaults(func=start_create_userdir) self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd) # add new-config subcommand - build_config_cmd = subparsers.add_parser('new-config', help="Create new config") + build_config_cmd = subparsers.add_parser('new-config', + help="Create new config") build_config_cmd.set_defaults(func=start_new_config) self._build_args(optionlist=ARGS_BUILD_CONFIG, parser=build_config_cmd) # add new-strategy subcommand - build_strategy_cmd = subparsers.add_parser('new-strategy', help="Create new strategy") + build_strategy_cmd = subparsers.add_parser('new-strategy', + help="Create new strategy") build_strategy_cmd.set_defaults(func=start_new_strategy) self._build_args(optionlist=ARGS_BUILD_STRATEGY, parser=build_strategy_cmd) # add new-hyperopt subcommand - build_hyperopt_cmd = subparsers.add_parser('new-hyperopt', help="Create new hyperopt") + build_hyperopt_cmd = subparsers.add_parser('new-hyperopt', + help="Create new hyperopt") build_hyperopt_cmd.set_defaults(func=start_new_hyperopt) self._build_args(optionlist=ARGS_BUILD_HYPEROPT, parser=build_hyperopt_cmd) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index acb629cf9..0aac462dd 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -13,7 +13,8 @@ def check_int_positive(value: str) -> int: raise ValueError except ValueError: raise ArgumentTypeError( - f"{value} is invalid for this parameter, should be a positive integer value") + f"{value} is invalid for this parameter, should be a positive integer value" + ) return uint @@ -24,7 +25,8 @@ def check_int_nonzero(value: str) -> int: raise ValueError except ValueError: raise ArgumentTypeError( - f"{value} is invalid for this parameter, should be a non-zero integer value") + f"{value} is invalid for this parameter, should be a non-zero integer value" + ) return uint @@ -38,32 +40,25 @@ class Arg: # List of available command line options AVAILABLE_CLI_OPTIONS = { # Common options - "verbosity": - Arg( - '-v', - '--verbose', + "verbosity": Arg( + '-v', '--verbose', help='Verbose mode (-vv for more, -vvv to get all messages).', action='count', default=0, ), - "logfile": - Arg( + "logfile": Arg( '--logfile', help="Log to the file specified. Special values are: 'syslog', 'journald'. " - "See the documentation for more details.", + "See the documentation for more details.", metavar='FILE', ), - "version": - Arg( - '-V', - '--version', + "version": Arg( + '-V', '--version', action='version', version=f'%(prog)s {__version__}', ), - "config": - Arg( - '-c', - '--config', + "config": Arg( + '-c', '--config', help=f'Specify configuration file (default: `userdir/{constants.DEFAULT_CONFIG}` ' f'or `config.json` whichever exists). ' f'Multiple --config options may be used. ' @@ -71,105 +66,84 @@ AVAILABLE_CLI_OPTIONS = { action='append', metavar='PATH', ), - "datadir": - Arg( - '-d', - '--datadir', + "datadir": Arg( + '-d', '--datadir', help='Path to directory with historical backtesting data.', metavar='PATH', ), - "user_data_dir": - Arg( - '--userdir', - '--user-data-dir', + "user_data_dir": Arg( + '--userdir', '--user-data-dir', help='Path to userdata directory.', metavar='PATH', ), - "reset": - Arg( + "reset": Arg( '--reset', help='Reset sample files to their original state.', action='store_true', ), # Main options - "strategy": - Arg( - '-s', - '--strategy', + "strategy": Arg( + '-s', '--strategy', help='Specify strategy class name which will be used by the bot.', metavar='NAME', ), - "strategy_path": - Arg( + "strategy_path": Arg( '--strategy-path', help='Specify additional strategy lookup path.', metavar='PATH', ), - "db_url": - Arg( + "db_url": Arg( '--db-url', help=f'Override trades database URL, this is useful in custom deployments ' f'(default: `{constants.DEFAULT_DB_PROD_URL}` for Live Run mode, ' f'`{constants.DEFAULT_DB_DRYRUN_URL}` for Dry Run).', metavar='PATH', ), - "sd_notify": - Arg( + "sd_notify": Arg( '--sd-notify', help='Notify systemd service manager.', action='store_true', ), - "dry_run": - Arg( + "dry_run": Arg( '--dry-run', help='Enforce dry-run for trading (removes Exchange secrets and simulates trades).', action='store_true', ), # Optimize common - "ticker_interval": - Arg( - '-i', - '--ticker-interval', + "ticker_interval": Arg( + '-i', '--ticker-interval', help='Specify ticker interval (`1m`, `5m`, `30m`, `1h`, `1d`).', ), - "timerange": - Arg( + "timerange": Arg( '--timerange', help='Specify what timerange of data to use.', ), - "max_open_trades": - Arg( + "max_open_trades": Arg( '--max-open-trades', help='Override the value of the `max_open_trades` configuration setting.', type=int, metavar='INT', ), - "stake_amount": - Arg( + "stake_amount": Arg( '--stake-amount', help='Override the value of the `stake_amount` configuration setting.', type=float, ), # Backtesting - "position_stacking": - Arg( - '--eps', - '--enable-position-stacking', + "position_stacking": Arg( + '--eps', '--enable-position-stacking', help='Allow buying the same pair multiple times (position stacking).', action='store_true', default=False, ), - "use_max_market_positions": - Arg( - '--dmmp', - '--disable-max-market-positions', + "use_max_market_positions": Arg( + '--dmmp', '--disable-max-market-positions', help='Disable applying `max_open_trades` during backtest ' '(same as setting `max_open_trades` to a very high number).', action='store_false', default=True, ), - "strategy_list": - Arg( + "strategy_list": Arg( '--strategy-list', help='Provide a space-separated list of strategies to backtest. ' 'Please note that ticker-interval needs to be set either in config ' @@ -178,52 +152,44 @@ AVAILABLE_CLI_OPTIONS = { '(so `backtest-data.json` becomes `backtest-data-DefaultStrategy.json`', nargs='+', ), - "export": - Arg( + "export": Arg( '--export', help='Export backtest results, argument are: trades. ' 'Example: `--export=trades`', ), - "exportfilename": - Arg( + "exportfilename": Arg( '--export-filename', help='Save backtest results to the file with this filename. ' 'Requires `--export` to be set as well. ' 'Example: `--export-filename=user_data/backtest_results/backtest_today.json`', metavar='PATH', ), - "fee": - Arg( + "fee": Arg( '--fee', help='Specify fee ratio. Will be applied twice (on trade entry and exit).', type=float, metavar='FLOAT', ), # Edge - "stoploss_range": - Arg( + "stoploss_range": Arg( '--stoplosses', help='Defines a range of stoploss values against which edge will assess the strategy. ' 'The format is "min,max,step" (without any space). ' 'Example: `--stoplosses=-0.01,-0.1,-0.001`', ), # Hyperopt - "hyperopt": - Arg( + "hyperopt": Arg( '--hyperopt', help='Specify hyperopt class name which will be used by the bot.', metavar='NAME', ), - "hyperopt_path": - Arg( + "hyperopt_path": Arg( '--hyperopt-path', help='Specify additional lookup path for Hyperopt and Hyperopt Loss functions.', metavar='PATH', ), - "epochs": - Arg( - '-e', - '--epochs', + "epochs": Arg( + '-e', '--epochs', help='Specify number of epochs (default: %(default)d).', type=check_int_positive, metavar='INT', @@ -260,32 +226,27 @@ AVAILABLE_CLI_OPTIONS = { nargs='+', default='default', ), - "print_all": - Arg( + "print_all": Arg( '--print-all', help='Print all results, not only the best ones.', action='store_true', default=False, ), - "print_colorized": - Arg( + "print_colorized": Arg( '--no-color', help='Disable colorization of hyperopt results. May be useful if you are ' 'redirecting output to a file.', action='store_false', default=True, ), - "print_json": - Arg( + "print_json": Arg( '--print-json', help='Print best result detailization in JSON format.', action='store_true', default=False, ), - "hyperopt_jobs": - Arg( - '-j', - '--job-workers', + "hyperopt_jobs": Arg( + '-j', '--job-workers', help='The number of concurrently running jobs for hyperoptimization ' '(hyperopt worker processes). ' 'If -1 (default), all CPUs are used, for -2, all CPUs but one are used, etc. ' @@ -294,15 +255,13 @@ AVAILABLE_CLI_OPTIONS = { metavar='JOBS', default=-1, ), - "hyperopt_random_state": - Arg( + "hyperopt_random_state": Arg( '--random-state', help='Set random state to some positive integer for reproducible hyperopt results.', type=check_int_positive, metavar='INT', ), - "hyperopt_min_trades": - Arg( + "hyperopt_min_trades": Arg( '--min-trades', help="Set minimal desired number of trades for evaluations in the hyperopt " "optimization path (default: 1).", @@ -310,16 +269,14 @@ AVAILABLE_CLI_OPTIONS = { metavar='INT', default=1, ), - "hyperopt_continue": - Arg( + "hyperopt_continue": Arg( "--continue", help="Continue hyperopt from previous runs. " "By default, temporary files will be removed and hyperopt will start from scratch.", default=False, action='store_true', ), - "hyperopt_loss": - Arg( + "hyperopt_loss": Arg( '--hyperopt-loss', help='Specify the class name of the hyperopt loss function class (IHyperOptLoss). ' 'Different functions can generate completely different results, ' @@ -331,143 +288,121 @@ AVAILABLE_CLI_OPTIONS = { default=constants.DEFAULT_HYPEROPT_LOSS, ), # List exchanges - "print_one_column": - Arg( - '-1', - '--one-column', + "print_one_column": Arg( + '-1', '--one-column', help='Print output in one column.', action='store_true', ), - "list_exchanges_all": - Arg( - '-a', - '--all', + "list_exchanges_all": Arg( + '-a', '--all', help='Print all exchanges known to the ccxt library.', action='store_true', ), # List pairs / markets - "list_pairs_all": - Arg( - '-a', - '--all', + "list_pairs_all": Arg( + '-a', '--all', help='Print all pairs or market symbols. By default only active ' - 'ones are shown.', + 'ones are shown.', action='store_true', ), - "print_list": - Arg( + "print_list": Arg( '--print-list', help='Print list of pairs or market symbols. By default data is ' - 'printed in the tabular format.', + 'printed in the tabular format.', action='store_true', ), - "list_pairs_print_json": - Arg( + "list_pairs_print_json": Arg( '--print-json', help='Print list of pairs or market symbols in JSON format.', action='store_true', default=False, ), - "print_csv": - Arg( + "print_csv": Arg( '--print-csv', help='Print exchange pair or market data in the csv format.', action='store_true', ), - "quote_currencies": - Arg( + "quote_currencies": Arg( '--quote', help='Specify quote currency(-ies). Space-separated list.', nargs='+', metavar='QUOTE_CURRENCY', ), - "base_currencies": - Arg( + "base_currencies": Arg( '--base', help='Specify base currency(-ies). Space-separated list.', nargs='+', metavar='BASE_CURRENCY', ), # Script options - "pairs": - Arg( - '-p', - '--pairs', + "pairs": Arg( + '-p', '--pairs', help='Show profits for only these pairs. Pairs are space-separated.', nargs='+', ), # Download data - "pairs_file": - Arg( + "pairs_file": Arg( '--pairs-file', help='File containing a list of pairs to download.', metavar='FILE', ), - "days": - Arg( + "days": Arg( '--days', help='Download data for given number of days.', type=check_int_positive, metavar='INT', ), - "download_trades": - Arg( + "download_trades": Arg( '--dl-trades', help='Download trades instead of OHLCV data. The bot will resample trades to the ' - 'desired timeframe as specified as --timeframes/-t.', + 'desired timeframe as specified as --timeframes/-t.', action='store_true', ), - "format_from": - Arg( + "format_from": Arg( '--format-from', help='Source format for data conversion.', choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), - "format_to": - Arg( + "format_to": Arg( '--format-to', help='Destination format for data conversion.', choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), - "dataformat_ohlcv": - Arg('--data-format-ohlcv', + "dataformat_ohlcv": Arg( + '--data-format-ohlcv', help='Storage format for downloaded ohlcv data. (default: `%(default)s`).', choices=constants.AVAILABLE_DATAHANDLERS, - default='json'), - "dataformat_trades": - Arg('--data-format-trades', + default='json' + ), + "dataformat_trades": Arg( + '--data-format-trades', help='Storage format for downloaded trades data. (default: `%(default)s`).', choices=constants.AVAILABLE_DATAHANDLERS, - default='jsongz'), - "exchange": - Arg( + default='jsongz' + ), + "exchange": Arg( '--exchange', help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). ' f'Only valid if no config is provided.', ), - "timeframes": - Arg( - '-t', - '--timeframes', + "timeframes": Arg( + '-t', '--timeframes', help=f'Specify which tickers to download. Space-separated list. ' f'Default: `1m 5m`.', - choices=[ - '1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', '6h', '8h', '12h', '1d', '3d', '1w' - ], + choices=['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', + '6h', '8h', '12h', '1d', '3d', '1w'], default=['1m', '5m'], nargs='+', ), - "erase": - Arg( + "erase": Arg( '--erase', help='Clean all existing data for the selected exchange/pairs/timeframes.', action='store_true', ), # Templating options - "template": - Arg( + "template": Arg( '--template', help='Use a template which is either `minimal` or ' '`full` (containing multiple sample indicators). Default: `%(default)s`.', @@ -475,22 +410,19 @@ AVAILABLE_CLI_OPTIONS = { default='full', ), # Plot dataframe - "indicators1": - Arg( + "indicators1": Arg( '--indicators1', help='Set indicators from your strategy you want in the first row of the graph. ' "Space-separated list. Example: `ema3 ema5`. Default: `['sma', 'ema3', 'ema5']`.", nargs='+', ), - "indicators2": - Arg( + "indicators2": Arg( '--indicators2', help='Set indicators from your strategy you want in the third row of the graph. ' "Space-separated list. Example: `fastd fastk`. Default: `['macd', 'macdsignal']`.", nargs='+', ), - "plot_limit": - Arg( + "plot_limit": Arg( '--plot-limit', help='Specify tick limit for plotting. Notice: too high values cause huge files. ' 'Default: %(default)s.', @@ -498,8 +430,7 @@ AVAILABLE_CLI_OPTIONS = { metavar='INT', default=750, ), - "trade_source": - Arg( + "trade_source": Arg( '--trade-source', help='Specify the source for trades (Can be DB or file (backtest file)) ' 'Default: %(default)s', @@ -507,90 +438,76 @@ AVAILABLE_CLI_OPTIONS = { default="file", ), # hyperopt-list, hyperopt-show - "hyperopt_list_profitable": - Arg( + "hyperopt_list_profitable": Arg( '--profitable', help='Select only profitable epochs.', action='store_true', ), - "hyperopt_list_best": - Arg( + "hyperopt_list_best": Arg( '--best', help='Select only best epochs.', action='store_true', ), - "hyperopt_list_min_trades": - Arg( + "hyperopt_list_min_trades": Arg( '--min-trades', help='Select epochs with more than INT trades.', type=check_int_positive, metavar='INT', ), - "hyperopt_list_max_trades": - Arg( + "hyperopt_list_max_trades": Arg( '--max-trades', help='Select epochs with less than INT trades.', type=check_int_positive, metavar='INT', ), - "hyperopt_list_min_avg_time": - Arg( + "hyperopt_list_min_avg_time": Arg( '--min-avg-time', help='Select epochs on above average time.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_avg_time": - Arg( + "hyperopt_list_max_avg_time": Arg( '--max-avg-time', help='Select epochs on under average time.', type=float, metavar='FLOAT', ), - "hyperopt_list_min_avg_profit": - Arg( + "hyperopt_list_min_avg_profit": Arg( '--min-avg-profit', help='Select epochs on above average profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_avg_profit": - Arg( + "hyperopt_list_max_avg_profit": Arg( '--max-avg-profit', help='Select epochs on below average profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_min_total_profit": - Arg( + "hyperopt_list_min_total_profit": Arg( '--min-total-profit', help='Select epochs on above total profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_max_total_profit": - Arg( + "hyperopt_list_max_total_profit": Arg( '--max-total-profit', help='Select epochs on below total profit.', type=float, metavar='FLOAT', ), - "hyperopt_list_no_details": - Arg( + "hyperopt_list_no_details": Arg( '--no-details', help='Do not print best epoch details.', action='store_true', ), - "hyperopt_show_index": - Arg( - '-n', - '--index', + "hyperopt_show_index": Arg( + '-n', '--index', help='Specify the index of the epoch to print details for.', type=check_int_nonzero, metavar='INT', ), - "hyperopt_show_no_header": - Arg( + "hyperopt_show_no_header": Arg( '--no-header', help='Do not print epoch details header.', action='store_true', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 4f2db4065..7c9c01237 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -10,7 +10,8 @@ from typing import Any, Callable, Dict, List, Optional from freqtrade import constants from freqtrade.configuration.check_exchange import check_exchange from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings -from freqtrade.configuration.directory_operations import (create_datadir, create_userdata_dir) +from freqtrade.configuration.directory_operations import (create_datadir, + create_userdata_dir) from freqtrade.configuration.load_config import load_config_file from freqtrade.exceptions import OperationalException from freqtrade.loggers import setup_logging @@ -25,6 +26,7 @@ class Configuration: Class to read and init the bot configuration Reuse this class for the bot, backtesting, hyperopt and every script that required configuration """ + def __init__(self, args: Dict[str, Any], runmode: RunMode = None) -> None: self.args = args self.config: Optional[Dict[str, Any]] = None @@ -150,12 +152,11 @@ class Configuration: if self.args.get("strategy") or not config.get('strategy'): config.update({'strategy': self.args.get("strategy")}) - self._args_to_config(config, - argname='strategy_path', + self._args_to_config(config, argname='strategy_path', logstring='Using additional Strategy lookup path: {}') - if ('db_url' in self.args and self.args["db_url"] - and self.args["db_url"] != constants.DEFAULT_DB_PROD_URL): + if ('db_url' in self.args and self.args["db_url"] and + self.args["db_url"] != constants.DEFAULT_DB_PROD_URL): config.update({'db_url': self.args["db_url"]}) logger.info('Parameter --db-url detected ...') @@ -193,23 +194,20 @@ class Configuration: logger.info('Using data directory: %s ...', config.get('datadir')) if self.args.get('exportfilename'): - self._args_to_config(config, - argname='exportfilename', + self._args_to_config(config, argname='exportfilename', logstring='Storing backtest results to {} ...') else: - config['exportfilename'] = (config['user_data_dir'] / - 'backtest_results/backtest-result.json') + config['exportfilename'] = (config['user_data_dir'] + / 'backtest_results/backtest-result.json') def _process_optimize_options(self, config: Dict[str, Any]) -> None: # This will override the strategy configuration - self._args_to_config(config, - argname='ticker_interval', + self._args_to_config(config, argname='ticker_interval', logstring='Parameter -i/--ticker-interval detected ... ' 'Using ticker_interval: {} ...') - self._args_to_config(config, - argname='position_stacking', + self._args_to_config(config, argname='position_stacking', logstring='Parameter --enable-position-stacking detected ...') # Setting max_open_trades to infinite if -1 @@ -222,39 +220,31 @@ class Configuration: logger.info('max_open_trades set to unlimited ...') elif 'max_open_trades' in self.args and self.args["max_open_trades"]: config.update({'max_open_trades': self.args["max_open_trades"]}) - logger.info( - 'Parameter --max-open-trades detected, ' - 'overriding max_open_trades to: %s ...', config.get('max_open_trades')) + logger.info('Parameter --max-open-trades detected, ' + 'overriding max_open_trades to: %s ...', config.get('max_open_trades')) elif config['runmode'] in NON_UTIL_MODES: logger.info('Using max_open_trades: %s ...', config.get('max_open_trades')) - self._args_to_config(config, - argname='stake_amount', + self._args_to_config(config, argname='stake_amount', logstring='Parameter --stake-amount detected, ' 'overriding stake_amount to: {} ...') - self._args_to_config(config, - argname='fee', + self._args_to_config(config, argname='fee', logstring='Parameter --fee detected, ' 'setting fee to: {} ...') - self._args_to_config(config, - argname='timerange', + self._args_to_config(config, argname='timerange', logstring='Parameter --timerange detected: {} ...') self._process_datadir_options(config) - self._args_to_config(config, - argname='strategy_list', - logstring='Using strategy list of {} strategies', - logfun=len) + self._args_to_config(config, argname='strategy_list', + logstring='Using strategy list of {} strategies', logfun=len) - self._args_to_config(config, - argname='ticker_interval', + self._args_to_config(config, argname='ticker_interval', logstring='Overriding ticker interval with Command line argument') - self._args_to_config(config, - argname='export', + self._args_to_config(config, argname='export', logstring='Parameter --export detected: {} ...') # Edge section: @@ -266,14 +256,13 @@ class Configuration: logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"]) # Hyperopt section - self._args_to_config(config, argname='hyperopt', logstring='Using Hyperopt class name: {}') + self._args_to_config(config, argname='hyperopt', + logstring='Using Hyperopt class name: {}') - self._args_to_config(config, - argname='hyperopt_path', + self._args_to_config(config, argname='hyperopt_path', logstring='Using additional Hyperopt lookup path: {}') - self._args_to_config(config, - argname='epochs', + self._args_to_config(config, argname='epochs', logstring='Parameter --epochs detected ... ' 'Will run Hyperopt with for {} epochs ...') self._args_to_config(config, @@ -290,8 +279,7 @@ class Configuration: argname='spaces', logstring='Parameter -s/--spaces detected: {}') - self._args_to_config(config, - argname='print_all', + self._args_to_config(config, argname='print_all', logstring='Parameter --print-all detected ...') if 'print_colorized' in self.args and not self.args["print_colorized"]: @@ -300,115 +288,100 @@ class Configuration: else: config.update({'print_colorized': True}) - self._args_to_config(config, - argname='print_json', + self._args_to_config(config, argname='print_json', logstring='Parameter --print-json detected ...') - self._args_to_config(config, - argname='hyperopt_jobs', + self._args_to_config(config, argname='hyperopt_jobs', logstring='Parameter -j/--job-workers detected: {}') - self._args_to_config(config, - argname='hyperopt_random_state', + self._args_to_config(config, argname='hyperopt_random_state', logstring='Parameter --random-state detected: {}') - self._args_to_config(config, - argname='hyperopt_min_trades', + self._args_to_config(config, argname='hyperopt_min_trades', logstring='Parameter --min-trades detected: {}') - self._args_to_config(config, argname='hyperopt_continue', logstring='Hyperopt continue: {}') + self._args_to_config(config, argname='hyperopt_continue', + logstring='Hyperopt continue: {}') - self._args_to_config(config, - argname='hyperopt_loss', + self._args_to_config(config, argname='hyperopt_loss', logstring='Using Hyperopt loss class name: {}') - self._args_to_config(config, - argname='hyperopt_show_index', + self._args_to_config(config, argname='hyperopt_show_index', logstring='Parameter -n/--index detected: {}') - self._args_to_config(config, - argname='hyperopt_list_best', + self._args_to_config(config, argname='hyperopt_list_best', logstring='Parameter --best detected: {}') - self._args_to_config(config, - argname='hyperopt_list_profitable', + self._args_to_config(config, argname='hyperopt_list_profitable', logstring='Parameter --profitable detected: {}') - self._args_to_config(config, - argname='hyperopt_list_min_trades', + self._args_to_config(config, argname='hyperopt_list_min_trades', logstring='Parameter --min-trades detected: {}') - self._args_to_config(config, - argname='hyperopt_list_max_trades', + self._args_to_config(config, argname='hyperopt_list_max_trades', logstring='Parameter --max-trades detected: {}') - self._args_to_config(config, - argname='hyperopt_list_min_avg_time', + self._args_to_config(config, argname='hyperopt_list_min_avg_time', logstring='Parameter --min-avg-time detected: {}') - self._args_to_config(config, - argname='hyperopt_list_max_avg_time', + self._args_to_config(config, argname='hyperopt_list_max_avg_time', logstring='Parameter --max-avg-time detected: {}') - self._args_to_config(config, - argname='hyperopt_list_min_avg_profit', + self._args_to_config(config, argname='hyperopt_list_min_avg_profit', logstring='Parameter --min-avg-profit detected: {}') - self._args_to_config(config, - argname='hyperopt_list_max_avg_profit', + self._args_to_config(config, argname='hyperopt_list_max_avg_profit', logstring='Parameter --max-avg-profit detected: {}') - self._args_to_config(config, - argname='hyperopt_list_min_total_profit', + self._args_to_config(config, argname='hyperopt_list_min_total_profit', logstring='Parameter --min-total-profit detected: {}') - self._args_to_config(config, - argname='hyperopt_list_max_total_profit', + self._args_to_config(config, argname='hyperopt_list_max_total_profit', logstring='Parameter --max-total-profit detected: {}') - self._args_to_config(config, - argname='hyperopt_list_no_details', + self._args_to_config(config, argname='hyperopt_list_no_details', logstring='Parameter --no-details detected: {}') - self._args_to_config(config, - argname='hyperopt_show_no_header', + self._args_to_config(config, argname='hyperopt_show_no_header', logstring='Parameter --no-header detected: {}') def _process_plot_options(self, config: Dict[str, Any]) -> None: - self._args_to_config(config, argname='pairs', logstring='Using pairs {}') + self._args_to_config(config, argname='pairs', + logstring='Using pairs {}') - self._args_to_config(config, argname='indicators1', logstring='Using indicators1: {}') + self._args_to_config(config, argname='indicators1', + logstring='Using indicators1: {}') - self._args_to_config(config, argname='indicators2', logstring='Using indicators2: {}') + self._args_to_config(config, argname='indicators2', + logstring='Using indicators2: {}') - self._args_to_config(config, argname='plot_limit', logstring='Limiting plot to: {}') - self._args_to_config(config, argname='trade_source', logstring='Using trades from: {}') + self._args_to_config(config, argname='plot_limit', + logstring='Limiting plot to: {}') + self._args_to_config(config, argname='trade_source', + logstring='Using trades from: {}') - self._args_to_config(config, - argname='erase', + self._args_to_config(config, argname='erase', logstring='Erase detected. Deleting existing data.') - self._args_to_config(config, argname='timeframes', logstring='timeframes --timeframes: {}') + self._args_to_config(config, argname='timeframes', + logstring='timeframes --timeframes: {}') - self._args_to_config(config, argname='days', logstring='Detected --days: {}') + self._args_to_config(config, argname='days', + logstring='Detected --days: {}') - self._args_to_config(config, - argname='download_trades', + self._args_to_config(config, argname='download_trades', logstring='Detected --dl-trades: {}') - self._args_to_config(config, - argname='dataformat_ohlcv', + self._args_to_config(config, argname='dataformat_ohlcv', logstring='Using "{}" to store OHLCV data.') - self._args_to_config(config, - argname='dataformat_trades', + self._args_to_config(config, argname='dataformat_trades', logstring='Using "{}" to store trades data.') def _process_runmode(self, config: Dict[str, Any]) -> None: - self._args_to_config(config, - argname='dry_run', + self._args_to_config(config, argname='dry_run', logstring='Parameter --dry-run detected, ' 'overriding dry_run to: {} ...') @@ -419,11 +392,8 @@ class Configuration: config.update({'runmode': self.runmode}) - def _args_to_config(self, - config: Dict[str, Any], - argname: str, - logstring: str, - logfun: Optional[Callable] = None, + def _args_to_config(self, config: Dict[str, Any], argname: str, + logstring: str, logfun: Optional[Callable] = None, deprecated_msg: Optional[str] = None) -> None: """ :param config: Configuration dictionary @@ -435,7 +405,7 @@ class Configuration: configuration instead of the content) """ if (argname in self.args and self.args[argname] is not None - and self.args[argname] is not False): + and self.args[argname] is not False): config.update({argname: self.args[argname]}) if logfun: diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 7def3a054..2c24fd01e 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -1,4 +1,5 @@ # pragma pylint: disable=too-few-public-methods + """ bot constants """ @@ -18,12 +19,11 @@ REQUIRED_ORDERTIF = ['buy', 'sell'] REQUIRED_ORDERTYPES = ['buy', 'sell', 'stoploss', 'stoploss_on_exchange'] ORDERTYPE_POSSIBILITIES = ['limit', 'market'] ORDERTIF_POSSIBILITIES = ['gtc', 'fok', 'ioc'] -AVAILABLE_PAIRLISTS = [ - 'StaticPairList', 'VolumePairList', 'PrecisionFilter', 'PriceFilter', 'SpreadFilter' -] +AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', + 'PrecisionFilter', 'PriceFilter', 'SpreadFilter'] AVAILABLE_DATAHANDLERS = ['json', 'jsongz'] DRY_RUN_WALLET = 1000 -MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons +MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons DEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume'] USERPATH_HYPEROPTS = 'hyperopts' @@ -40,9 +40,11 @@ USER_DATA_FILES = { } SUPPORTED_FIAT = [ - "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", - "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", - "SGD", "THB", "TRY", "TWD", "ZAR", "USD", "BTC", "XBT", "ETH", "XRP", "LTC", "BCH", "USDT" + "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", + "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", + "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", + "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD", + "BTC", "XBT", "ETH", "XRP", "LTC", "BCH", "USDT" ] MINIMAL_CONFIG = { @@ -63,16 +65,9 @@ MINIMAL_CONFIG = { CONF_SCHEMA = { 'type': 'object', 'properties': { - 'max_open_trades': { - 'type': ['integer', 'number'], - 'minimum': -1 - }, - 'ticker_interval': { - 'type': 'string' - }, - 'stake_currency': { - 'type': 'string' - }, + 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1}, + 'ticker_interval': {'type': 'string'}, + 'stake_currency': {'type': 'string'}, 'stake_amount': { 'type': ['number', 'string'], 'minimum': 0.0001, @@ -84,76 +79,32 @@ CONF_SCHEMA = { 'maximum': 1, 'default': 0.99 }, - 'amend_last_stake_amount': { - 'type': 'boolean', - 'default': False - }, + 'amend_last_stake_amount': {'type': 'boolean', 'default': False}, 'last_stake_amount_min_ratio': { - 'type': 'number', - 'minimum': 0.0, - 'maximum': 1.0, - 'default': 0.5 - }, - 'fiat_display_currency': { - 'type': 'string', - 'enum': SUPPORTED_FIAT - }, - 'dry_run': { - 'type': 'boolean' - }, - 'dry_run_wallet': { - 'type': 'number', - 'default': DRY_RUN_WALLET - }, - 'process_only_new_candles': { - 'type': 'boolean' + 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5 }, + 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT}, + 'dry_run': {'type': 'boolean'}, + 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET}, + 'process_only_new_candles': {'type': 'boolean'}, 'minimal_roi': { 'type': 'object', 'patternProperties': { - '^[0-9.]+$': { - 'type': 'number' - } + '^[0-9.]+$': {'type': 'number'} }, 'minProperties': 1 }, - 'amount_reserve_percent': { - 'type': 'number', - 'minimum': 0.0, - 'maximum': 0.5 - }, - 'stoploss': { - 'type': 'number', - 'maximum': 0, - 'exclusiveMaximum': True - }, - 'trailing_stop': { - 'type': 'boolean' - }, - 'trailing_stop_positive': { - 'type': 'number', - 'minimum': 0, - 'maximum': 1 - }, - 'trailing_stop_positive_offset': { - 'type': 'number', - 'minimum': 0, - 'maximum': 1 - }, - 'trailing_only_offset_is_reached': { - 'type': 'boolean' - }, + 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5}, + 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True}, + 'trailing_stop': {'type': 'boolean'}, + 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1}, + 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1}, + 'trailing_only_offset_is_reached': {'type': 'boolean'}, 'unfilledtimeout': { 'type': 'object', 'properties': { - 'buy': { - 'type': 'number', - 'minimum': 1 - }, - 'sell': { - 'type': 'number', - 'minimum': 1 - } + 'buy': {'type': 'number', 'minimum': 1}, + 'sell': {'type': 'number', 'minimum': 1} } }, 'bid_strategy': { @@ -164,24 +115,13 @@ CONF_SCHEMA = { 'minimum': 0, 'maximum': 1, 'exclusiveMaximum': False, - 'use_order_book': { - 'type': 'boolean' - }, - 'order_book_top': { - 'type': 'integer', - 'maximum': 20, - 'minimum': 1 - }, + 'use_order_book': {'type': 'boolean'}, + 'order_book_top': {'type': 'integer', 'maximum': 20, 'minimum': 1}, 'check_depth_of_market': { 'type': 'object', 'properties': { - 'enabled': { - 'type': 'boolean' - }, - 'bids_to_ask_delta': { - 'type': 'number', - 'minimum': 0 - }, + 'enabled': {'type': 'boolean'}, + 'bids_to_ask_delta': {'type': 'number', 'minimum': 0}, } }, }, @@ -191,92 +131,43 @@ CONF_SCHEMA = { 'ask_strategy': { 'type': 'object', 'properties': { - 'use_order_book': { - 'type': 'boolean' - }, - 'order_book_min': { - 'type': 'integer', - 'minimum': 1 - }, - 'order_book_max': { - 'type': 'integer', - 'minimum': 1, - 'maximum': 50 - }, - 'use_sell_signal': { - 'type': 'boolean' - }, - 'sell_profit_only': { - 'type': 'boolean' - }, - 'ignore_roi_if_buy_signal': { - 'type': 'boolean' - } + 'use_order_book': {'type': 'boolean'}, + 'order_book_min': {'type': 'integer', 'minimum': 1}, + 'order_book_max': {'type': 'integer', 'minimum': 1, 'maximum': 50}, + 'use_sell_signal': {'type': 'boolean'}, + 'sell_profit_only': {'type': 'boolean'}, + 'ignore_roi_if_buy_signal': {'type': 'boolean'} } }, 'order_types': { 'type': 'object', 'properties': { - 'buy': { - 'type': 'string', - 'enum': ORDERTYPE_POSSIBILITIES - }, - 'sell': { - 'type': 'string', - 'enum': ORDERTYPE_POSSIBILITIES - }, - 'emergencysell': { - 'type': 'string', - 'enum': ORDERTYPE_POSSIBILITIES - }, - 'stoploss': { - 'type': 'string', - 'enum': ORDERTYPE_POSSIBILITIES - }, - 'stoploss_on_exchange': { - 'type': 'boolean' - }, - 'stoploss_on_exchange_interval': { - 'type': 'number' - } + 'buy': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, + 'sell': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, + 'emergencysell': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, + 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, + 'stoploss_on_exchange': {'type': 'boolean'}, + 'stoploss_on_exchange_interval': {'type': 'number'} }, 'required': ['buy', 'sell', 'stoploss', 'stoploss_on_exchange'] }, 'order_time_in_force': { 'type': 'object', 'properties': { - 'buy': { - 'type': 'string', - 'enum': ORDERTIF_POSSIBILITIES - }, - 'sell': { - 'type': 'string', - 'enum': ORDERTIF_POSSIBILITIES - } + 'buy': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}, + 'sell': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES} }, 'required': ['buy', 'sell'] }, - 'exchange': { - '$ref': '#/definitions/exchange' - }, - 'edge': { - '$ref': '#/definitions/edge' - }, + 'exchange': {'$ref': '#/definitions/exchange'}, + 'edge': {'$ref': '#/definitions/edge'}, 'experimental': { 'type': 'object', 'properties': { - 'use_sell_signal': { - 'type': 'boolean' - }, - 'sell_profit_only': { - 'type': 'boolean' - }, - 'ignore_roi_if_buy_signal': { - 'type': 'boolean' - }, - 'block_bad_exchanges': { - 'type': 'boolean' - } + 'use_sell_signal': {'type': 'boolean'}, + 'sell_profit_only': {'type': 'boolean'}, + 'ignore_roi_if_buy_signal': {'type': 'boolean'}, + 'block_bad_exchanges': {'type': 'boolean'} } }, 'pairlists': { @@ -284,13 +175,8 @@ CONF_SCHEMA = { 'items': { 'type': 'object', 'properties': { - 'method': { - 'type': 'string', - 'enum': AVAILABLE_PAIRLISTS - }, - 'config': { - 'type': 'object' - } + 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS}, + 'config': {'type': 'object'} }, 'required': ['method'], } @@ -298,126 +184,71 @@ CONF_SCHEMA = { 'telegram': { 'type': 'object', 'properties': { - 'enabled': { - 'type': 'boolean' - }, - 'token': { - 'type': 'string' - }, - 'chat_id': { - 'type': 'string' - }, + 'enabled': {'type': 'boolean'}, + 'token': {'type': 'string'}, + 'chat_id': {'type': 'string'}, }, 'required': ['enabled', 'token', 'chat_id'] }, 'webhook': { 'type': 'object', 'properties': { - 'enabled': { - 'type': 'boolean' - }, - 'webhookbuy': { - 'type': 'object' - }, - 'webhookbuycancel': { - 'type': 'object' - }, - 'webhooksell': { - 'type': 'object' - }, - 'webhooksellcancel': { - 'type': 'object' - }, - 'webhookstatus': { - 'type': 'object' - }, + 'enabled': {'type': 'boolean'}, + 'webhookbuy': {'type': 'object'}, + 'webhookbuycancel': {'type': 'object'}, + 'webhooksell': {'type': 'object'}, + 'webhooksellcancel': {'type': 'object'}, + 'webhookstatus': {'type': 'object'}, }, }, 'api_server': { 'type': 'object', 'properties': { - 'enabled': { - 'type': 'boolean' - }, - 'listen_ip_address': { - 'format': 'ipv4' - }, + 'enabled': {'type': 'boolean'}, + 'listen_ip_address': {'format': 'ipv4'}, 'listen_port': { 'type': 'integer', 'minimum': 1024, 'maximum': 65535 }, - 'username': { - 'type': 'string' - }, - 'password': { - 'type': 'string' - }, + 'username': {'type': 'string'}, + 'password': {'type': 'string'}, }, 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password'] }, - 'db_url': { - 'type': 'string' - }, - 'initial_state': { - 'type': 'string', - 'enum': ['running', 'stopped'] - }, - 'forcebuy_enable': { - 'type': 'boolean' - }, + 'db_url': {'type': 'string'}, + 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']}, + 'forcebuy_enable': {'type': 'boolean'}, 'internals': { 'type': 'object', 'default': {}, 'properties': { - 'process_throttle_secs': { - 'type': 'integer' - }, - 'interval': { - 'type': 'integer' - }, - 'sd_notify': { - 'type': 'boolean' - }, + 'process_throttle_secs': {'type': 'integer'}, + 'interval': {'type': 'integer'}, + 'sd_notify': {'type': 'boolean'}, } }, 'dataformat_ohlcv': { 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'json' + 'enum': AVAILABLE_DATAHANDLERS, + 'default': 'json' }, 'dataformat_trades': { 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'jsongz' + 'enum': AVAILABLE_DATAHANDLERS, + 'default': 'jsongz' } }, 'definitions': { 'exchange': { 'type': 'object', 'properties': { - 'name': { - 'type': 'string' - }, - 'sandbox': { - 'type': 'boolean', - 'default': False - }, - 'key': { - 'type': 'string', - 'default': '' - }, - 'secret': { - 'type': 'string', - 'default': '' - }, - 'password': { - 'type': 'string', - 'default': '' - }, - 'uid': { - 'type': 'string' - }, + 'name': {'type': 'string'}, + 'sandbox': {'type': 'boolean', 'default': False}, + 'key': {'type': 'string', 'default': ''}, + 'secret': {'type': 'string', 'default': ''}, + 'password': {'type': 'string', 'default': ''}, + 'uid': {'type': 'string'}, 'pair_whitelist': { 'type': 'array', 'items': { @@ -432,65 +263,29 @@ CONF_SCHEMA = { }, 'uniqueItems': True }, - 'outdated_offset': { - 'type': 'integer', - 'minimum': 1 - }, - 'markets_refresh_interval': { - 'type': 'integer' - }, - 'ccxt_config': { - 'type': 'object' - }, - 'ccxt_async_config': { - 'type': 'object' - } + 'outdated_offset': {'type': 'integer', 'minimum': 1}, + 'markets_refresh_interval': {'type': 'integer'}, + 'ccxt_config': {'type': 'object'}, + 'ccxt_async_config': {'type': 'object'} }, 'required': ['name'] }, 'edge': { 'type': 'object', 'properties': { - 'enabled': { - 'type': 'boolean' - }, - 'process_throttle_secs': { - 'type': 'integer', - 'minimum': 600 - }, - 'calculate_since_number_of_days': { - 'type': 'integer' - }, - 'allowed_risk': { - 'type': 'number' - }, - 'capital_available_percentage': { - 'type': 'number' - }, - 'stoploss_range_min': { - 'type': 'number' - }, - 'stoploss_range_max': { - 'type': 'number' - }, - 'stoploss_range_step': { - 'type': 'number' - }, - 'minimum_winrate': { - 'type': 'number' - }, - 'minimum_expectancy': { - 'type': 'number' - }, - 'min_trade_number': { - 'type': 'number' - }, - 'max_trade_duration_minute': { - 'type': 'integer' - }, - 'remove_pumps': { - 'type': 'boolean' - } + 'enabled': {'type': 'boolean'}, + 'process_throttle_secs': {'type': 'integer', 'minimum': 600}, + 'calculate_since_number_of_days': {'type': 'integer'}, + 'allowed_risk': {'type': 'number'}, + 'capital_available_percentage': {'type': 'number'}, + 'stoploss_range_min': {'type': 'number'}, + 'stoploss_range_max': {'type': 'number'}, + 'stoploss_range_step': {'type': 'number'}, + 'minimum_winrate': {'type': 'number'}, + 'minimum_expectancy': {'type': 'number'}, + 'min_trade_number': {'type': 'number'}, + 'max_trade_duration_minute': {'type': 'integer'}, + 'remove_pumps': {'type': 'boolean'} }, 'required': ['process_throttle_secs', 'allowed_risk'] } diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 5919a37e0..8c3ea50ce 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -248,27 +248,24 @@ class Hyperopt: result: Dict = {} if self.has_space('buy'): - result['buy'] = {p.name: params.get(p.name) for p in self.hyperopt_space('buy')} + result['buy'] = {p.name: params.get(p.name) + for p in self.hyperopt_space('buy')} if self.has_space('sell'): - result['sell'] = {p.name: params.get(p.name) for p in self.hyperopt_space('sell')} + result['sell'] = {p.name: params.get(p.name) + for p in self.hyperopt_space('sell')} if self.has_space('roi'): result['roi'] = self.custom_hyperopt.generate_roi_table(params) if self.has_space('stoploss'): - result['stoploss'] = { - p.name: params.get(p.name) - for p in self.hyperopt_space('stoploss') - } + result['stoploss'] = {p.name: params.get(p.name) + for p in self.hyperopt_space('stoploss')} if self.has_space('trailing'): result['trailing'] = self.custom_hyperopt.generate_trailing_params(params) return result @staticmethod - def print_epoch_details(results, - total_epochs: int, - print_json: bool, - no_header: bool = False, - header_str: str = None) -> None: + def print_epoch_details(results, total_epochs: int, print_json: bool, + no_header: bool = False, header_str: str = None) -> None: """ Display details of the hyperopt result """ @@ -307,7 +304,8 @@ class Hyperopt: # OrderedDict is used to keep the numeric order of the items # in the dict. result_dict['minimal_roi'] = OrderedDict( - (str(k), v) for k, v in space_params.items()) + (str(k), v) for k, v in space_params.items() + ) else: # 'stoploss', 'trailing' result_dict.update(space_params) @@ -359,7 +357,8 @@ class Hyperopt: def _format_explanation_string(results, total_epochs) -> str: return (("*" if 'is_initial_point' in results and results['is_initial_point'] else " ") + f"{results['current_epoch']:5d}/{total_epochs}: " + - f"{results['results_explanation']} " + f"Objective: {results['loss']:.5f}") + f"{results['results_explanation']} " + + f"Objective: {results['loss']:.5f}") @staticmethod def print_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool, @@ -372,15 +371,12 @@ class Hyperopt: trials = json_normalize(results, max_level=1) trials['Best'] = '' - trials = trials[[ - 'Best', 'current_epoch', 'results_metrics.trade_count', 'results_metrics.avg_profit', - 'results_metrics.total_profit', 'results_metrics.profit', 'results_metrics.duration', - 'loss', 'is_initial_point', 'is_best' - ]] - trials.columns = [ - 'Best', 'Epoch', 'Trades', 'Avg profit', 'Total profit', 'Profit', 'Avg duration', - 'Objective', 'is_initial_point', 'is_best' - ] + trials = trials[['Best', 'current_epoch', 'results_metrics.trade_count', + 'results_metrics.avg_profit', 'results_metrics.total_profit', + 'results_metrics.profit', 'results_metrics.duration', + 'loss', 'is_initial_point', 'is_best']] + trials.columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Total profit', + 'Profit', 'Avg duration', 'Objective', 'is_initial_point', 'is_best'] trials['is_profit'] = False trials.loc[trials['is_initial_point'], 'Best'] = '*' trials.loc[trials['is_best'], 'Best'] = 'Best' @@ -388,33 +384,31 @@ class Hyperopt: trials.loc[trials['Total profit'] > 0, 'is_profit'] = True trials['Trades'] = trials['Trades'].astype(str) - trials['Epoch'] = trials['Epoch'].apply(lambda x: "{}/{}".format(x, total_epochs)) - trials['Avg profit'] = trials['Avg profit'].apply(lambda x: '{:,.2f}%'.format(x) - if not isna(x) else x) - trials['Profit'] = trials['Profit'].apply(lambda x: '{:,.2f}%'.format(x) - if not isna(x) else x) + trials['Epoch'] = trials['Epoch'].apply( + lambda x: "{}/{}".format(x, total_epochs)) + trials['Avg profit'] = trials['Avg profit'].apply( + lambda x: '{:,.2f}%'.format(x) if not isna(x) else x) + trials['Profit'] = trials['Profit'].apply( + lambda x: '{:,.2f}%'.format(x) if not isna(x) else x) trials['Total profit'] = trials['Total profit'].apply( lambda x: '{: 11.8f} '.format(x) + config['stake_currency'] if not isna(x) else x) - trials['Avg duration'] = trials['Avg duration'].apply(lambda x: '{:,.1f}m'.format(x) - if not isna(x) else x) + trials['Avg duration'] = trials['Avg duration'].apply( + lambda x: '{:,.1f}m'.format(x) if not isna(x) else x) if print_colorized: for i in range(len(trials)): if trials.loc[i]['is_profit']: - for z in range(len(trials.loc[i]) - 3): - trials.iat[i, z] = "{}{}{}".format(Fore.GREEN, str(trials.loc[i][z]), - Fore.RESET) + for z in range(len(trials.loc[i])-3): + trials.iat[i, z] = "{}{}{}".format(Fore.GREEN, + str(trials.loc[i][z]), Fore.RESET) if trials.loc[i]['is_best'] and highlight_best: - for z in range(len(trials.loc[i]) - 3): - trials.iat[i, z] = "{}{}{}".format(Style.BRIGHT, str(trials.loc[i][z]), - Style.RESET_ALL) + for z in range(len(trials.loc[i])-3): + trials.iat[i, z] = "{}{}{}".format(Style.BRIGHT, + str(trials.loc[i][z]), Style.RESET_ALL) trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit']) - print( - tabulate(trials.to_dict(orient='list'), - headers='keys', - tablefmt='psql', - stralign="right")) + print(tabulate(trials.to_dict(orient='list'), headers='keys', tablefmt='psql', + stralign="right")) def has_space(self, space: str) -> bool: """ @@ -518,10 +512,8 @@ class Hyperopt: # path. We do not want to optimize 'hodl' strategies. loss: float = MAX_LOSS if trade_count >= self.config['hyperopt_min_trades']: - loss = self.calculate_loss(results=backtesting_results, - trade_count=trade_count, - min_date=min_date.datetime, - max_date=max_date.datetime) + loss = self.calculate_loss(results=backtesting_results, trade_count=trade_count, + min_date=min_date.datetime, max_date=max_date.datetime) return { 'loss': loss, 'params_dict': params_dict, @@ -549,8 +541,8 @@ class Hyperopt: f"Avg profit {results_metrics['avg_profit']: 6.2f}%. " f"Total profit {results_metrics['total_profit']: 11.8f} {stake_cur} " f"({results_metrics['profit']: 7.2f}\N{GREEK CAPITAL LETTER SIGMA}%). " - f"Avg duration {results_metrics['duration']:5.1f} min.").encode( - locale.getpreferredencoding(), 'replace').decode('utf-8') + f"Avg duration {results_metrics['duration']:5.1f} min." + ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8') def get_next_point_strategy(self): """ Choose a strategy randomly among the supported ones, used in multi opt mode @@ -571,10 +563,6 @@ class Hyperopt: acq_optimizer=self.opt_acq_optimizer, n_initial_points=n_initial_points, acq_optimizer_kwargs={'n_jobs': n_jobs}, - acq_func_kwargs={ - 'xi': 0.00001, - 'kappa': 0.00001 - }, model_queue_size=self.n_models, random_state=self.random_state, ) @@ -755,8 +743,9 @@ class Hyperopt: n_parameters += len(d.bounds) # guess the size of the search space as the count of the # unordered combination of the dimensions entries - search_space_size = (factorial(n_parameters) / - (factorial(n_parameters - n_dimensions) * factorial(n_dimensions))) + search_space_size = int( + (factorial(n_parameters) / + (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) # logger.info(f'Search space size: {search_space_size}') if search_space_size < n_jobs: # don't waste if the space is small @@ -789,7 +778,7 @@ class Hyperopt: if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size print() - logger.info(f'Max epochs set to: {self.epochs_limit()}') + logger.info(f'Max epoch set to: {self.epochs_limit()}') def setup_optimizers(self): """ Setup the optimizers objects, try to load from disk, or create new ones """ @@ -834,8 +823,10 @@ class Hyperopt: self.n_samples += len(preprocessed[pair]) min_date, max_date = get_timerange(data) - logger.info('Hyperopting with data from %s up to %s (%s days)..', min_date.isoformat(), - max_date.isoformat(), (max_date - min_date).days) + logger.info( + 'Hyperopting with data from %s up to %s (%s days)..', + min_date.isoformat(), max_date.isoformat(), (max_date - min_date).days + ) dump(preprocessed, self.tickerdata_pickle) # We don't need exchange instance anymore while running hyperopt @@ -898,7 +889,7 @@ class Hyperopt: break except KeyboardInterrupt: - print("User interrupted..") + print('User interrupted..') self.save_trials(final=True) diff --git a/setup.cfg b/setup.cfg index 9853c99d9..34f25482b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,7 +13,3 @@ ignore_missing_imports = True [mypy-tests.*] ignore_errors = True - -[yapf] -based_on_style = pep8 -column_limit = 100 \ No newline at end of file diff --git a/tests/optimize/test_hyperopt.py b/tests/optimize/test_hyperopt.py index 1d43878a6..2fe86dc5a 100644 --- a/tests/optimize/test_hyperopt.py +++ b/tests/optimize/test_hyperopt.py @@ -10,13 +10,15 @@ import pytest from arrow import Arrow from filelock import Timeout -from freqtrade.commands.optimize_commands import (setup_optimize_configuration, start_hyperopt) +from freqtrade.commands.optimize_commands import (setup_optimize_configuration, + start_hyperopt) from freqtrade.data.history import load_data from freqtrade.exceptions import OperationalException from freqtrade.optimize.default_hyperopt import DefaultHyperOpt from freqtrade.optimize.default_hyperopt_loss import DefaultHyperOptLoss from freqtrade.optimize.hyperopt import Hyperopt -from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) +from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, + HyperOptResolver) from freqtrade.state import RunMode from freqtrade.strategy.interface import SellType from tests.conftest import (get_args, log_has, log_has_re, patch_exchange, @@ -35,18 +37,21 @@ def hyperopt(default_conf, mocker): @pytest.fixture(scope='function') def hyperopt_results(): - return pd.DataFrame({ - 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], - 'profit_percent': [-0.1, 0.2, 0.3], - 'profit_abs': [-0.2, 0.4, 0.6], - 'trade_duration': [10, 30, 10], - 'sell_reason': [SellType.STOP_LOSS, SellType.ROI, SellType.ROI], - 'close_time': [ - datetime(2019, 1, 1, 9, 26, 3, 478039), - datetime(2019, 2, 1, 9, 26, 3, 478039), - datetime(2019, 3, 1, 9, 26, 3, 478039) - ] - }) + return pd.DataFrame( + { + 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], + 'profit_percent': [-0.1, 0.2, 0.3], + 'profit_abs': [-0.2, 0.4, 0.6], + 'trade_duration': [10, 30, 10], + 'sell_reason': [SellType.STOP_LOSS, SellType.ROI, SellType.ROI], + 'close_time': + [ + datetime(2019, 1, 1, 9, 26, 3, 478039), + datetime(2019, 2, 1, 9, 26, 3, 478039), + datetime(2019, 3, 1, 9, 26, 3, 478039) + ] + } + ) # Functions for recurrent object patching @@ -75,10 +80,8 @@ def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, ca args = [ 'hyperopt', - '--config', - 'config.json', - '--hyperopt', - 'DefaultHyperOpt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) @@ -102,12 +105,23 @@ def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, ca def test_setup_hyperopt_configuration_with_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) - mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x) + mocker.patch( + 'freqtrade.configuration.configuration.create_datadir', + lambda c, x: x + ) args = [ - 'hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--datadir', - '/foo/bar', '--ticker-interval', '1m', '--timerange', ':100', '--enable-position-stacking', - '--disable-max-market-positions', '--epochs', '1000', '--spaces', 'default', '--print-all' + 'hyperopt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', + '--datadir', '/foo/bar', + '--ticker-interval', '1m', + '--timerange', ':100', + '--enable-position-stacking', + '--disable-max-market-positions', + '--epochs', '1000', + '--spaces', 'default', + '--print-all' ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) @@ -151,22 +165,21 @@ def test_hyperoptresolver(mocker, default_conf, caplog) -> None: delattr(hyperopt, 'populate_indicators') delattr(hyperopt, 'populate_buy_trend') delattr(hyperopt, 'populate_sell_trend') - mocker.patch('freqtrade.resolvers.hyperopt_resolver.HyperOptResolver.load_object', - MagicMock(return_value=hyperopt(default_conf))) + mocker.patch( + 'freqtrade.resolvers.hyperopt_resolver.HyperOptResolver.load_object', + MagicMock(return_value=hyperopt(default_conf)) + ) default_conf.update({'hyperopt': 'DefaultHyperOpt'}) x = HyperOptResolver.load_hyperopt(default_conf) assert not hasattr(x, 'populate_indicators') assert not hasattr(x, 'populate_buy_trend') assert not hasattr(x, 'populate_sell_trend') - assert log_has( - "Hyperopt class does not provide populate_indicators() method. " - "Using populate_indicators from the strategy.", caplog) - assert log_has( - "Hyperopt class does not provide populate_sell_trend() method. " - "Using populate_sell_trend from the strategy.", caplog) - assert log_has( - "Hyperopt class does not provide populate_buy_trend() method. " - "Using populate_buy_trend from the strategy.", caplog) + assert log_has("Hyperopt class does not provide populate_indicators() method. " + "Using populate_indicators from the strategy.", caplog) + assert log_has("Hyperopt class does not provide populate_sell_trend() method. " + "Using populate_sell_trend from the strategy.", caplog) + assert log_has("Hyperopt class does not provide populate_buy_trend() method. " + "Using populate_buy_trend from the strategy.", caplog) assert hasattr(x, "ticker_interval") @@ -181,15 +194,17 @@ def test_hyperoptresolver_noname(default_conf): default_conf['hyperopt'] = '' with pytest.raises(OperationalException, match="No Hyperopt set. Please use `--hyperopt` to specify " - "the Hyperopt class to use."): + "the Hyperopt class to use."): HyperOptResolver.load_hyperopt(default_conf) def test_hyperoptlossresolver(mocker, default_conf, caplog) -> None: hl = DefaultHyperOptLoss - mocker.patch('freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object', - MagicMock(return_value=hl)) + mocker.patch( + 'freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object', + MagicMock(return_value=hl) + ) x = HyperOptLossResolver.load_hyperoptloss(default_conf) assert hasattr(x, "hyperopt_loss_function") @@ -208,7 +223,12 @@ def test_start_not_installed(mocker, default_conf, caplog, import_fails) -> None mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] + args = [ + 'hyperopt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', + '--epochs', '5' + ] pargs = get_args(args) with pytest.raises(OperationalException, match=r"Please ensure that the hyperopt dependencies"): @@ -221,7 +241,12 @@ def test_start(mocker, default_conf, caplog) -> None: mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] + args = [ + 'hyperopt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', + '--epochs', '5' + ] pargs = get_args(args) start_hyperopt(pargs) @@ -232,12 +257,19 @@ def test_start(mocker, default_conf, caplog) -> None: def test_start_no_data(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame)) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) patch_exchange(mocker) - args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] + args = [ + 'hyperopt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', + '--epochs', '5' + ] pargs = get_args(args) with pytest.raises(OperationalException, match='No data found. Terminating.'): start_hyperopt(pargs) @@ -249,7 +281,12 @@ def test_start_filelock(mocker, default_conf, caplog) -> None: mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) - args = ['hyperopt', '--config', 'config.json', '--hyperopt', 'DefaultHyperOpt', '--epochs', '5'] + args = [ + 'hyperopt', + '--config', 'config.json', + '--hyperopt', 'DefaultHyperOpt', + '--epochs', '5' + ] pargs = get_args(args) start_hyperopt(pargs) assert log_has("Another running instance of freqtrade Hyperopt detected.", caplog) @@ -257,12 +294,12 @@ def test_start_filelock(mocker, default_conf, caplog) -> None: def test_loss_calculation_prefer_correct_trade_count(default_conf, hyperopt_results) -> None: hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, datetime(2019, 1, 1), - datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100, datetime(2019, 1, 1), - datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100, datetime(2019, 1, 1), - datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function(hyperopt_results, 600, + datetime(2019, 1, 1), datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100, + datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100, + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over > correct assert under > correct @@ -272,9 +309,10 @@ def test_loss_calculation_prefer_shorter_trades(default_conf, hyperopt_results) resultsb.loc[1, 'trade_duration'] = 20 hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - longer = hl.hyperopt_loss_function(hyperopt_results, 100, datetime(2019, 1, 1), - datetime(2019, 5, 1)) - shorter = hl.hyperopt_loss_function(resultsb, 100, datetime(2019, 1, 1), datetime(2019, 5, 1)) + longer = hl.hyperopt_loss_function(hyperopt_results, 100, + datetime(2019, 1, 1), datetime(2019, 5, 1)) + shorter = hl.hyperopt_loss_function(resultsb, 100, + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert shorter < longer @@ -285,11 +323,12 @@ def test_loss_calculation_has_limited_profit(default_conf, hyperopt_results) -> results_under['profit_percent'] = hyperopt_results['profit_percent'] / 2 hl = HyperOptLossResolver.load_hyperoptloss(default_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, datetime(2019, 1, 1), - datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, 600, datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, 600, datetime(2019, 1, 1), - datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function(hyperopt_results, 600, + datetime(2019, 1, 1), datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, 600, + datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, 600, + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -304,10 +343,10 @@ def test_sharpe_loss_prefers_higher_profits(default_conf, hyperopt_results) -> N hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -322,10 +361,10 @@ def test_sharpe_loss_daily_prefers_higher_profits(default_conf, hyperopt_results hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -376,10 +415,10 @@ def test_onlyprofit_loss_prefers_higher_profits(default_conf, hyperopt_results) hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), - datetime(2019, 5, 1)) + over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), + datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct @@ -387,24 +426,28 @@ def test_onlyprofit_loss_prefers_higher_profits(default_conf, hyperopt_results) def test_log_results_if_loss_improves(hyperopt, capsys) -> None: hyperopt.current_best_loss = 2 hyperopt.total_epochs = 2 - hyperopt.print_results({ - 'is_best': True, - 'loss': 1, - 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) - 'results_explanation': 'foo.', - 'is_initial_point': False - }) + hyperopt.print_results( + { + 'is_best': True, + 'loss': 1, + 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) + 'results_explanation': 'foo.', + 'is_initial_point': False + } + ) out, err = capsys.readouterr() assert ' 2/2: foo. Objective: 1.00000' in out def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: hyperopt.current_best_loss = 2 - hyperopt.print_results({ - 'is_best': False, - 'loss': 3, - 'current_epoch': 1, - }) + hyperopt.print_results( + { + 'is_best': False, + 'loss': 3, + 'current_epoch': 1, + } + ) assert caplog.record_tuples == [] @@ -452,32 +495,25 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': { - 'buy': {}, - 'sell': {}, - 'roi': {}, - 'stoploss': 0.0 - } - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', + 'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0}}]) + ) patch_exchange(mocker) # Co-test loading ticker-interval from strategy del default_conf['ticker_interval'] - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -504,7 +540,11 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: def test_format_results(hyperopt): # Test with BTC as stake_currency - trades = [('ETH/BTC', 2, 2, 123), ('LTC/BTC', 1, 1, 123), ('XPR/BTC', -1, -2, -246)] + trades = [ + ('ETH/BTC', 2, 2, 123), + ('LTC/BTC', 1, 1, 123), + ('XPR/BTC', -1, -2, -246) + ] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] df = pd.DataFrame.from_records(trades, columns=labels) results_metrics = hyperopt._calculate_results_metrics(df) @@ -528,7 +568,11 @@ def test_format_results(hyperopt): assert result.find('2.0000Σ %') # Test with EUR as stake_currency - trades = [('ETH/EUR', 2, 2, 123), ('LTC/EUR', 1, 1, 123), ('XPR/EUR', -1, -2, -246)] + trades = [ + ('ETH/EUR', 2, 2, 123), + ('LTC/EUR', 1, 1, 123), + ('XPR/EUR', -1, -2, -246) + ] df = pd.DataFrame.from_records(trades, columns=labels) results_metrics = hyperopt._calculate_results_metrics(df) results['total_profit'] = results_metrics['total_profit'] @@ -537,97 +581,32 @@ def test_format_results(hyperopt): @pytest.mark.parametrize("spaces, expected_results", [ - (['buy'], { - 'buy': True, - 'sell': False, - 'roi': False, - 'stoploss': False, - 'trailing': False - }), - (['sell'], { - 'buy': False, - 'sell': True, - 'roi': False, - 'stoploss': False, - 'trailing': False - }), - (['roi'], { - 'buy': False, - 'sell': False, - 'roi': True, - 'stoploss': False, - 'trailing': False - }), - (['stoploss'], { - 'buy': False, - 'sell': False, - 'roi': False, - 'stoploss': True, - 'trailing': False - }), - (['trailing'], { - 'buy': False, - 'sell': False, - 'roi': False, - 'stoploss': False, - 'trailing': True - }), - (['buy', 'sell', 'roi', 'stoploss'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': False - }), - (['buy', 'sell', 'roi', 'stoploss', 'trailing'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': True - }), - (['buy', 'roi'], { - 'buy': True, - 'sell': False, - 'roi': True, - 'stoploss': False, - 'trailing': False - }), - (['all'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': True - }), - (['default'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': False - }), - (['default', 'trailing'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': True - }), - (['all', 'buy'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': True - }), - (['default', 'buy'], { - 'buy': True, - 'sell': True, - 'roi': True, - 'stoploss': True, - 'trailing': False - }), + (['buy'], + {'buy': True, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': False}), + (['sell'], + {'buy': False, 'sell': True, 'roi': False, 'stoploss': False, 'trailing': False}), + (['roi'], + {'buy': False, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False}), + (['stoploss'], + {'buy': False, 'sell': False, 'roi': False, 'stoploss': True, 'trailing': False}), + (['trailing'], + {'buy': False, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': True}), + (['buy', 'sell', 'roi', 'stoploss'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), + (['buy', 'sell', 'roi', 'stoploss', 'trailing'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), + (['buy', 'roi'], + {'buy': True, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False}), + (['all'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), + (['default'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), + (['default', 'trailing'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), + (['all', 'buy'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True}), + (['default', 'buy'], + {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False}), ]) def test_has_space(hyperopt, spaces, expected_results): for s in ['buy', 'sell', 'roi', 'stoploss', 'trailing']: @@ -653,17 +632,19 @@ def test_buy_strategy_generator(hyperopt, testdatadir) -> None: dataframe = hyperopt.custom_hyperopt.populate_indicators(dataframes['UNITTEST/BTC'], {'pair': 'UNITTEST/BTC'}) - populate_buy_trend = hyperopt.custom_hyperopt.buy_strategy_generator({ - 'adx-value': 20, - 'fastd-value': 20, - 'mfi-value': 20, - 'rsi-value': 20, - 'adx-enabled': True, - 'fastd-enabled': True, - 'mfi-enabled': True, - 'rsi-enabled': True, - 'trigger': 'bb_lower' - }) + populate_buy_trend = hyperopt.custom_hyperopt.buy_strategy_generator( + { + 'adx-value': 20, + 'fastd-value': 20, + 'mfi-value': 20, + 'rsi-value': 20, + 'adx-enabled': True, + 'fastd-enabled': True, + 'mfi-enabled': True, + 'rsi-enabled': True, + 'trigger': 'bb_lower' + } + ) result = populate_buy_trend(dataframe, {'pair': 'UNITTEST/BTC'}) # Check if some indicators are generated. We will not test all of them assert 'buy' in result @@ -679,14 +660,20 @@ def test_backtest_params(mocker, default_conf) -> None: 'hyperopt_min_trades': 1, }) - trades = [('TRX/BTC', 0.023117, 0.000233, 100)] + trades = [ + ('TRX/BTC', 0.023117, 0.000233, 100) + ] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] backtest_result = pd.DataFrame.from_records(trades, columns=labels) - mocker.patch('freqtrade.optimize.hyperopt.Backtesting.backtest', - MagicMock(return_value=backtest_result)) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(Arrow(2017, 12, 10), Arrow(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.Backtesting.backtest', + MagicMock(return_value=backtest_result) + ) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(Arrow(2017, 12, 10), Arrow(2017, 12, 13))) + ) patch_exchange(mocker) mocker.patch('freqtrade.optimize.hyperopt.load', MagicMock()) @@ -722,62 +709,44 @@ def test_backtest_params(mocker, default_conf) -> None: 'trailing_only_offset_is_reached': False, } response_expected = { - 'loss': - 1.9840569076926293, - 'results_explanation': - (' 1 trades. Avg profit 2.31%. Total profit 0.00023300 BTC ' - '( 2.31\N{GREEK CAPITAL LETTER SIGMA}%). Avg duration 100.0 min.').encode( - locale.getpreferredencoding(), 'replace').decode('utf-8'), - 'params_details': { - 'buy': { - 'adx-enabled': False, - 'adx-value': 0, - 'fastd-enabled': True, - 'fastd-value': 35, - 'mfi-enabled': False, - 'mfi-value': 0, - 'rsi-enabled': False, - 'rsi-value': 0, - 'trigger': 'macd_cross_signal' - }, - 'roi': { - 0: 0.12000000000000001, - 20.0: 0.02, - 50.0: 0.01, - 110.0: 0 - }, - 'sell': { - 'sell-adx-enabled': False, - 'sell-adx-value': 0, - 'sell-fastd-enabled': True, - 'sell-fastd-value': 75, - 'sell-mfi-enabled': False, - 'sell-mfi-value': 0, - 'sell-rsi-enabled': False, - 'sell-rsi-value': 0, - 'sell-trigger': 'macd_cross_signal' - }, - 'stoploss': { - 'stoploss': -0.4 - }, - 'trailing': { - 'trailing_only_offset_is_reached': False, - 'trailing_stop': True, - 'trailing_stop_positive': 0.02, - 'trailing_stop_positive_offset': 0.07 - } - }, - 'params_dict': - optimizer_param, - 'results_metrics': { - 'avg_profit': 2.3117, - 'duration': 100.0, - 'profit': 2.3117, - 'total_profit': 0.000233, - 'trade_count': 1 - }, - 'total_profit': - 0.00023300 + 'loss': 1.9840569076926293, + 'results_explanation': (' 1 trades. Avg profit 2.31%. Total profit 0.00023300 BTC ' + '( 2.31\N{GREEK CAPITAL LETTER SIGMA}%). Avg duration 100.0 min.' + ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8'), + 'params_details': {'buy': {'adx-enabled': False, + 'adx-value': 0, + 'fastd-enabled': True, + 'fastd-value': 35, + 'mfi-enabled': False, + 'mfi-value': 0, + 'rsi-enabled': False, + 'rsi-value': 0, + 'trigger': 'macd_cross_signal'}, + 'roi': {0: 0.12000000000000001, + 20.0: 0.02, + 50.0: 0.01, + 110.0: 0}, + 'sell': {'sell-adx-enabled': False, + 'sell-adx-value': 0, + 'sell-fastd-enabled': True, + 'sell-fastd-value': 75, + 'sell-mfi-enabled': False, + 'sell-mfi-value': 0, + 'sell-rsi-enabled': False, + 'sell-rsi-value': 0, + 'sell-trigger': 'macd_cross_signal'}, + 'stoploss': {'stoploss': -0.4}, + 'trailing': {'trailing_only_offset_is_reached': False, + 'trailing_stop': True, + 'trailing_stop_positive': 0.02, + 'trailing_stop_positive_offset': 0.07}}, + 'params_dict': optimizer_param, + 'results_metrics': {'avg_profit': 2.3117, + 'duration': 100.0, + 'profit': 2.3117, + 'total_profit': 0.000233, + 'trade_count': 1}, + 'total_profit': 0.00023300 } hyperopt = Hyperopt(default_conf) @@ -788,14 +757,13 @@ def test_backtest_params(mocker, default_conf) -> None: def test_clean_hyperopt(mocker, default_conf, caplog): patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + }) mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True)) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) h = Hyperopt(default_conf) @@ -807,15 +775,14 @@ def test_clean_hyperopt(mocker, default_conf, caplog): def test_continue_hyperopt(mocker, default_conf, caplog): patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - 'hyperopt_continue': True - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + 'hyperopt_continue': True + }) mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True)) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) Hyperopt(default_conf) @@ -828,42 +795,29 @@ def test_print_json_spaces_all(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': {}, - 'params_details': { - 'buy': { - 'mfi-value': None - }, - 'sell': { - 'sell-mfi-value': None - }, - 'roi': {}, - 'stoploss': { - 'stoploss': None - }, - 'trailing': { - 'trailing_stop': None - } - } - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, + 'params_details': {'buy': {'mfi-value': None}, + 'sell': {'sell-mfi-value': None}, + 'roi': {}, 'stoploss': {'stoploss': None}, + 'trailing': {'trailing_stop': None}}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'all', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'all', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -887,39 +841,28 @@ def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': {}, - 'params_details': { - 'buy': { - 'mfi-value': None - }, - 'sell': { - 'sell-mfi-value': None - }, - 'roi': {}, - 'stoploss': { - 'stoploss': None - } - } - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, + 'params_details': {'buy': {'mfi-value': None}, + 'sell': {'sell-mfi-value': None}, + 'roi': {}, 'stoploss': {'stoploss': None}}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'default', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'default', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -943,33 +886,26 @@ def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) -> dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': {}, - 'params_details': { - 'roi': {}, - 'stoploss': { - 'stoploss': None - } - } - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}, + 'params_details': {'roi': {}, 'stoploss': {'stoploss': None}}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'roi stoploss', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'roi stoploss', + 'hyperopt_jobs': 1, + 'print_json': True, + }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -993,28 +929,24 @@ def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys) dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': { - 'stoploss': 0.0 - } - }])) + 'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'roi stoploss', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'roi stoploss', + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -1048,19 +980,19 @@ def test_simplified_interface_all_failed(mocker, default_conf, caplog, capsys) - mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'all', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'all', + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -1079,26 +1011,23 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': {} - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'buy', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'buy', + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -1132,26 +1061,23 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel', - MagicMock(return_value=[{ - 'loss': 1, - 'results_explanation': 'foo result', - 'params': {} - }])) + MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {}}]) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': 'sell', - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': 'sell', + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() @@ -1191,19 +1117,19 @@ def test_simplified_interface_failed(mocker, default_conf, caplog, capsys, metho mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', MagicMock(return_value=(MagicMock(), None))) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))) + mocker.patch( + 'freqtrade.optimize.hyperopt.get_timerange', + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + ) patch_exchange(mocker) - default_conf.update({ - 'config': 'config.json.example', - 'hyperopt': 'DefaultHyperOpt', - 'epochs': 1, - 'timerange': None, - 'spaces': space, - 'hyperopt_jobs': 1, - }) + default_conf.update({'config': 'config.json.example', + 'hyperopt': 'DefaultHyperOpt', + 'epochs': 1, + 'timerange': None, + 'spaces': space, + 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() From 3f8ee76b24979e46e6e813cc4e0786410e7c0d4a Mon Sep 17 00:00:00 2001 From: orehunt Date: Wed, 4 Mar 2020 17:00:56 +0100 Subject: [PATCH 06/21] comments and small fixes --- freqtrade/optimize/hyperopt.py | 67 +++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 8c3ea50ce..bf45241ab 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -10,7 +10,7 @@ import random import sys import warnings from collections import OrderedDict, deque -from math import factorial, log, inf +from math import factorial, log from numpy import iinfo, int32 from operator import itemgetter from pathlib import Path @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] NEXT_POINT_METHODS_LENGTH = 3 -MAX_LOSS = 10000 # just a big enough number to be bad result in loss optimization +MAX_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization class Hyperopt: @@ -83,6 +83,7 @@ class Hyperopt: 'hyperopt_optimizers.pickle') self.tickerdata_pickle = (self.config['user_data_dir'] / 'hyperopt_results' / 'hyperopt_tickerdata.pkl') + self.n_jobs = self.config.get('hyperopt_jobs', -1) if self.n_jobs < 0: self.n_jobs = cpu_count() // 2 or 1 @@ -98,7 +99,7 @@ class Hyperopt: # total number of candles being backtested self.n_samples = 0 - self.current_best_loss = inf + self.current_best_loss = MAX_LOSS self.current_best_epoch = 0 self.epochs_since_last_best: List = [] self.avg_best_occurrence = 0 @@ -135,10 +136,12 @@ class Hyperopt: # in single opt assume runs are expensive so default to 1 point per ask self.n_points = self.config.get('points_per_opt', default_n_points) + # if 0 n_points are given, don't use any base estimator (akin to random search) if self.n_points < 1: self.n_points = 1 self.opt_base_estimator = 'DUMMY' self.opt_acq_optimizer = 'sampling' + # models are only needed for posterior eval self.n_models = max(16, self.n_jobs) # Populate functions here (hasattr is slow so should not be run during "regular" operations) @@ -496,10 +499,10 @@ class Hyperopt: position_stacking=self.position_stacking, ) return self._get_results_dict(backtesting_results, min_date, max_date, params_dict, - params_details, raw_params) + params_details) def _get_results_dict(self, backtesting_results, min_date, max_date, params_dict, - params_details, raw_params): + params_details): results_metrics = self._calculate_results_metrics(backtesting_results) results_explanation = self._format_results_explanation_string(results_metrics) @@ -569,6 +572,7 @@ class Hyperopt: def run_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, jobs: int) -> List: + """ launch parallel in single opt mode, return the evaluated epochs """ result = parallel( delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, backend.results, i) for asked, i in zip(self.opt_ask_and_tell(jobs, tries), @@ -577,14 +581,21 @@ class Hyperopt: def run_multi_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, jobs: int) -> List: + """ launch parallel in multi opt mode, return the evaluated epochs""" results = parallel( delayed(wrap_non_picklable_objects(self.parallel_opt_objective))( i, backend.optimizers, jobs, backend.results_board) for i in range(first_try, first_try + tries)) + # each worker will return a list containing n_points, so compact into a single list return functools.reduce(lambda x, y: [*x, *y], results) def opt_ask_and_tell(self, jobs: int, tries: int): - """ loop to manager optimizer state in single optimizer mode """ + """ + loop to manage optimizer state in single optimizer mode, everytime a job is + dispatched, we check the optimizer for points, to ask and to tell if any, + but only fit a new model every n_points, because if we fit at every result previous + points become invalid. + """ vals = [] to_ask: deque = deque() evald: List[List] = [] @@ -605,12 +616,16 @@ class Hyperopt: to_ask.extend(self.opt.ask(n_points=self.n_points)) fit = True a = to_ask.popleft() - if a in evald: + while a in evald and len(to_ask) > 0: logger.info('this point was evaluated before...') + a = to_ask.popleft() evald.append(a) yield a def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): + """ + objective run in multi opt mode, optimizers share the results as soon as they are completed + """ self.log_results_immediate(n) # fetch an optimizer instance opt = optimizers.get() @@ -655,15 +670,12 @@ class Hyperopt: return f_val def parallel_objective(self, asked, results: Queue, n=0): + """ objective run in single opt mode, run the backtest, store the results into a queue """ self.log_results_immediate(n) v = self.backtest_params(asked) results.put(v) return v - def parallel_callback(self, f_val): - """ Executed after each epoch evaluation to collect the results """ - self.f_val.extend(f_val) - def log_results_immediate(self, n) -> None: """ Signals that a new job has been scheduled""" print('.', end='') @@ -750,6 +762,7 @@ class Hyperopt: if search_space_size < n_jobs: # don't waste if the space is small n_initial_points = n_jobs + min_epochs = n_jobs elif total_epochs > 0: n_initial_points = total_epochs // 3 if total_epochs > n_jobs * 3 else n_jobs min_epochs = n_initial_points @@ -783,33 +796,34 @@ class Hyperopt: def setup_optimizers(self): """ Setup the optimizers objects, try to load from disk, or create new ones """ # try to load previous optimizers - self.opts = self.load_previous_optimizers(self.opts_file) + opts = self.load_previous_optimizers(self.opts_file) if self.multi: - if len(self.opts) == self.n_jobs: + if len(opts) > 0: # put the restored optimizers in the queue and clear them from the object - for opt in self.opts: + for opt in opts: backend.optimizers.put(opt) - else: # or generate new optimizers + # generate as many optimizers as are still needed to fill the job count + remaining = self.n_jobs - backend.optimizers.qsize() + if remaining > 0: opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) - # reduce random points by the number of optimizers - self.n_initial_points = self.n_initial_points // self.n_jobs - for _ in range(self.n_jobs): # generate optimizers + for _ in range(remaining): # generate optimizers # random state is preserved backend.optimizers.put( opt.copy(random_state=opt.rng.randint(0, - iinfo(int32).max))) + iinfo(int32).max))) del opt else: # if we have more than 1 optimizer but are using single opt, # pick one discard the rest... - if len(self.opts) > 0: - self.opt = self.opts[-1] - del self.opts + if len(opts) > 0: + self.opt = opts[-1] else: self.opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) + del opts[:] def start(self) -> None: + """ Broom Broom """ self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None)) logger.info(f"Using optimizer random state: {self.random_state}") @@ -840,11 +854,14 @@ class Hyperopt: self.dimensions: List[Dimension] = self.hyperopt_space() self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( self.dimensions, self.n_jobs, self.effort, self.total_epochs) + # reduce random points by the number of optimizers in multi mode + if self.multi: + self.n_initial_points = self.n_initial_points // self.n_jobs logger.info(f"Min epochs set to: {self.min_epochs}") + # if total epochs are not set, max_epoch takes its place if self.total_epochs < 1: self.max_epoch = int(self.min_epochs + len(self.trials)) - else: - self.max_epoch = self.n_initial_points + # initialize average best occurrence self.avg_best_occurrence = self.min_epochs // self.n_jobs logger.info(f'Initial points: {self.n_initial_points}') @@ -896,7 +913,7 @@ class Hyperopt: if self.trials: sorted_trials = sorted(self.trials, key=itemgetter('loss')) results = sorted_trials[0] - self.print_epoch_details(results, self.max_epoch, self.print_json) + self.print_epoch_details(results, self.epochs_limit(), self.print_json) else: # This is printed when Ctrl+C is pressed quickly, before first epochs have # a chance to be evaluated. From 15749d3427a78ce735f541bacfed50ca1b290b78 Mon Sep 17 00:00:00 2001 From: orehunt Date: Sat, 7 Mar 2020 11:17:39 +0100 Subject: [PATCH 07/21] don't tell the optimizer "out of bounds" scores --- freqtrade/optimize/hyperopt.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index bf45241ab..9593f55e6 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -117,9 +117,9 @@ class Hyperopt: self.opts: List[Optimizer] = [] self.opt: Optimizer = None + backend.manager = Manager() if 'multi_opt' in self.config and self.config['multi_opt']: self.multi = True - backend.manager = Manager() backend.optimizers = backend.manager.Queue() backend.results_board = backend.manager.Queue(maxsize=1) backend.results_board.put([]) @@ -127,9 +127,8 @@ class Hyperopt: self.opt_acq_optimizer = 'sampling' default_n_points = 2 else: - backend.manager = Manager() - backend.results = backend.manager.Queue() self.multi = False + backend.results = backend.manager.Queue() self.opt_base_estimator = 'GP' self.opt_acq_optimizer = 'lbfgs' default_n_points = 1 @@ -604,12 +603,16 @@ class Hyperopt: while not backend.results.empty(): vals.append(backend.results.get()) if vals: - self.opt.tell([list(v['params_dict'].values()) for v in vals], - [v['loss'] for v in vals], - fit=fit) - if fit: - fit = False - vals = [] + # values with improbable loss scores should not be told to the optimizer + # to reduce noise + vals = list(filter(lambda v: v['loss'] != MAX_LOSS, vals)) + if vals: # again if all are filtered + self.opt.tell([list(v['params_dict'].values()) for v in vals], + [v['loss'] for v in vals], + fit=fit) + if fit: + fit = False + vals = [] if not to_ask: self.opt.update_next() From 60e519eac4ce942907c1eb457d58d03f6aa61a05 Mon Sep 17 00:00:00 2001 From: orehunt Date: Mon, 9 Mar 2020 13:34:10 +0100 Subject: [PATCH 08/21] better handling of interaction between n_points and jobs --- freqtrade/optimize/hyperopt.py | 69 +++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 9593f55e6..5078bd458 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -138,8 +138,10 @@ class Hyperopt: # if 0 n_points are given, don't use any base estimator (akin to random search) if self.n_points < 1: self.n_points = 1 - self.opt_base_estimator = 'DUMMY' - self.opt_acq_optimizer = 'sampling' + self.opt_base_estimator = "DUMMY" + self.opt_acq_optimizer = "sampling" + # var used in epochs and batches calculation + self.opt_points = self.n_jobs * self.n_points # models are only needed for posterior eval self.n_models = max(16, self.n_jobs) @@ -597,8 +599,9 @@ class Hyperopt: """ vals = [] to_ask: deque = deque() - evald: List[List] = [] + evald: set(Tuple) = set() fit = False + opt = self.opt for r in range(tries): while not backend.results.empty(): vals.append(backend.results.get()) @@ -607,7 +610,7 @@ class Hyperopt: # to reduce noise vals = list(filter(lambda v: v['loss'] != MAX_LOSS, vals)) if vals: # again if all are filtered - self.opt.tell([list(v['params_dict'].values()) for v in vals], + opt.tell([list(v['params_dict'].values()) for v in vals], [v['loss'] for v in vals], fit=fit) if fit: @@ -615,14 +618,17 @@ class Hyperopt: vals = [] if not to_ask: - self.opt.update_next() - to_ask.extend(self.opt.ask(n_points=self.n_points)) + opt.update_next() + to_ask.extend(opt.ask(n_points=self.n_points)) fit = True - a = to_ask.popleft() - while a in evald and len(to_ask) > 0: - logger.info('this point was evaluated before...') - a = to_ask.popleft() - evald.append(a) + a = tuple(to_ask.popleft()) + while a in evald: + logger.info("this point was evaluated before...") + if len(to_ask) > 0: + a = tuple(to_ask.popleft()) + else: + break + evald.add(a) yield a def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): @@ -666,7 +672,8 @@ class Hyperopt: opt.tell(Xi, yi, fit=False) # update the board with the new results results = results_board.get() - results.append([f_val, jobs - 1]) + no_max_loss_results = list(filter(lambda v: v["loss"] != MAX_LOSS, f_val)) + results.append([no_max_loss_results, jobs - 1]) results_board.put(results) # send back the updated optimizer optimizers.put(opt) @@ -743,11 +750,14 @@ class Hyperopt: return random_state or random.randint(1, 2**16 - 1) @staticmethod - def calc_epochs(dimensions: List[Dimension], n_jobs: int, effort: float, total_epochs: int): + def calc_epochs( + dimensions: List[Dimension], n_jobs: int, effort: float, total_epochs: int, n_points: int + ): """ Compute a reasonable number of initial points and a minimum number of epochs to evaluate """ n_dimensions = len(dimensions) n_parameters = 0 + opt_points = n_jobs * n_points # sum all the dimensions discretely, granting minimum values for d in dimensions: if type(d).__name__ == 'Integer': @@ -762,22 +772,22 @@ class Hyperopt: (factorial(n_parameters) / (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) # logger.info(f'Search space size: {search_space_size}') - if search_space_size < n_jobs: + if search_space_size < opt_points: # don't waste if the space is small - n_initial_points = n_jobs - min_epochs = n_jobs + n_initial_points = opt_points // 3 + min_epochs = opt_points elif total_epochs > 0: - n_initial_points = total_epochs // 3 if total_epochs > n_jobs * 3 else n_jobs - min_epochs = n_initial_points + n_initial_points = total_epochs // 3 if total_epochs > opt_points * 3 else opt_points + min_epochs = total_epochs else: # extract coefficients from the search space and the jobs count log_sss = int(log(search_space_size, 10)) - log_jobs = int(log(n_jobs, 2)) if n_jobs > 4 else 2 - jobs_ip = log_jobs * log_sss + log_opt = int(log(opt_points, 2)) if opt_points > 4 else 2 + opt_ip = log_opt * log_sss # never waste - n_initial_points = log_sss if jobs_ip > search_space_size else jobs_ip + n_initial_points = log_sss if opt_ip > search_space_size else opt_ip # it shall run for this much, I say - min_epochs = int(max(n_initial_points, n_jobs) * (1 + effort) + n_initial_points) + min_epochs = int(max(n_initial_points, opt_points) * (1 + effort) + n_initial_points) return n_initial_points, min_epochs, search_space_size def update_max_epoch(self, val: Dict, current: int): @@ -809,7 +819,7 @@ class Hyperopt: # generate as many optimizers as are still needed to fill the job count remaining = self.n_jobs - backend.optimizers.qsize() if remaining > 0: - opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) + opt = self.get_optimizer(self.dimensions, self.n_jobs, self.opt_n_initial_points) for _ in range(remaining): # generate optimizers # random state is preserved backend.optimizers.put( @@ -822,7 +832,9 @@ class Hyperopt: if len(opts) > 0: self.opt = opts[-1] else: - self.opt = self.get_optimizer(self.dimensions, self.n_jobs, self.n_initial_points) + self.opt = self.get_optimizer( + self.dimensions, self.n_jobs, self.opt_n_initial_points + ) del opts[:] def start(self) -> None: @@ -856,10 +868,13 @@ class Hyperopt: self.dimensions: List[Dimension] = self.hyperopt_space() self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( - self.dimensions, self.n_jobs, self.effort, self.total_epochs) - # reduce random points by the number of optimizers in multi mode + self.dimensions, self.n_jobs, self.effort, self.total_epochs, self.n_points + ) + # reduce random points by n_points in multi mode because asks are per job if self.multi: - self.n_initial_points = self.n_initial_points // self.n_jobs + self.opt_n_initial_points = self.n_initial_points // self.n_points + else: + self.opt_n_initial_points = self.n_initial_points logger.info(f"Min epochs set to: {self.min_epochs}") # if total epochs are not set, max_epoch takes its place if self.total_epochs < 1: From 29e9faf16710955602fe96fba3a091558ff76f44 Mon Sep 17 00:00:00 2001 From: orehunt Date: Tue, 10 Mar 2020 08:39:47 +0100 Subject: [PATCH 09/21] - switch max losses with known bad losses --- freqtrade/optimize/hyperopt.py | 62 ++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 5078bd458..61436e8b1 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] NEXT_POINT_METHODS_LENGTH = 3 -MAX_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization +VOID_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization class Hyperopt: @@ -99,7 +99,7 @@ class Hyperopt: # total number of candles being backtested self.n_samples = 0 - self.current_best_loss = MAX_LOSS + self.current_best_loss = VOID_LOSS self.current_best_epoch = 0 self.epochs_since_last_best: List = [] self.avg_best_occurrence = 0 @@ -514,7 +514,7 @@ class Hyperopt: # interesting -- consider it as 'bad' (assigned max. loss value) # in order to cast this hyperspace point away from optimization # path. We do not want to optimize 'hodl' strategies. - loss: float = MAX_LOSS + loss: float = VOID_LOSS if trade_count >= self.config['hyperopt_min_trades']: loss = self.calculate_loss(results=backtesting_results, trade_count=trade_count, min_date=min_date.datetime, max_date=max_date.datetime) @@ -548,6 +548,23 @@ class Hyperopt: f"Avg duration {results_metrics['duration']:5.1f} min." ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8') + @staticmethod + def filter_void_losses(vals: List, opt: Optimizer) -> List: + """ remove out of bound losses from the results """ + if opt.void_loss == VOID_LOSS and len(opt.yi) < 1: + # only exclude results at the beginning when void loss is yet to be set + void_filtered = list(filter(lambda v: v["loss"] != VOID_LOSS, vals)) + else: + if opt.void_loss == VOID_LOSS: # set void loss once + opt.void_loss = max(opt.yi) + void_filtered = [] + # default bad losses to set void_loss + for k, v in enumerate(vals): + if v["loss"] == VOID_LOSS: + vals[k]["loss"] = opt.void_loss + void_filtered = vals + return void_filtered + def get_next_point_strategy(self): """ Choose a strategy randomly among the supported ones, used in multi opt mode to increase the diversion of the searches of each optimizer """ @@ -606,16 +623,15 @@ class Hyperopt: while not backend.results.empty(): vals.append(backend.results.get()) if vals: - # values with improbable loss scores should not be told to the optimizer - # to reduce noise - vals = list(filter(lambda v: v['loss'] != MAX_LOSS, vals)) + # filter losses + void_filtered = self.filter_void_losses(vals, opt) if vals: # again if all are filtered - opt.tell([list(v['params_dict'].values()) for v in vals], + opt.tell([list(v['params_dict'].values()) for v in void_filtered], [v['loss'] for v in vals], fit=fit) if fit: fit = False - vals = [] + del vals[:], void_filtered[:] if not to_ask: opt.update_next() @@ -666,18 +682,20 @@ class Hyperopt: asked = opt.ask(n_points=self.n_points, strategy=self.get_next_point_strategy()) # run the backtest for each point f_val = [self.backtest_params(e) for e in asked] + # filter losses + void_filtered = self.filter_void_losses(f_val, opt) # tell the optimizer the results - Xi = [list(v['params_dict'].values()) for v in f_val] - yi = [v['loss'] for v in f_val] - opt.tell(Xi, yi, fit=False) - # update the board with the new results - results = results_board.get() - no_max_loss_results = list(filter(lambda v: v["loss"] != MAX_LOSS, f_val)) - results.append([no_max_loss_results, jobs - 1]) - results_board.put(results) + if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: + Xi = [list(v['params_dict'].values()) for v in void_filtered] + yi = [v['loss'] for v in void_filtered] + opt.tell(Xi, yi, fit=False) + # update the board with the new results + results = results_board.get() + results.append([void_filtered, jobs - 1]) + results_board.put(results) # send back the updated optimizer optimizers.put(opt) - return f_val + return void_filtered def parallel_objective(self, asked, results: Queue, n=0): """ objective run in single opt mode, run the backtest, store the results into a queue """ @@ -822,10 +840,11 @@ class Hyperopt: opt = self.get_optimizer(self.dimensions, self.n_jobs, self.opt_n_initial_points) for _ in range(remaining): # generate optimizers # random state is preserved - backend.optimizers.put( - opt.copy(random_state=opt.rng.randint(0, - iinfo(int32).max))) - del opt + opt_copy = opt.copy(random_state=opt.rng.randint(0, + iinfo(int32).max)) + opt_copy.void_loss = VOID_LOSS + backend.optimizers.put(opt_copy) + del opt, opt_copy else: # if we have more than 1 optimizer but are using single opt, # pick one discard the rest... @@ -835,6 +854,7 @@ class Hyperopt: self.opt = self.get_optimizer( self.dimensions, self.n_jobs, self.opt_n_initial_points ) + self.opt.void_loss = VOID_LOSS del opts[:] def start(self) -> None: From ece0ddba387729840ea1ef66d61efb30a916ca2e Mon Sep 17 00:00:00 2001 From: orehunt Date: Tue, 10 Mar 2020 09:10:10 +0100 Subject: [PATCH 10/21] fixes, moved points setup to its function --- freqtrade/optimize/hyperopt.py | 49 ++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 61436e8b1..6ad79c6a0 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -15,7 +15,7 @@ from numpy import iinfo, int32 from operator import itemgetter from pathlib import Path from pprint import pprint -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple, Set import rapidjson from colorama import Fore, Style @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] NEXT_POINT_METHODS_LENGTH = 3 -VOID_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization +VOID_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization class Hyperopt: @@ -555,7 +555,7 @@ class Hyperopt: # only exclude results at the beginning when void loss is yet to be set void_filtered = list(filter(lambda v: v["loss"] != VOID_LOSS, vals)) else: - if opt.void_loss == VOID_LOSS: # set void loss once + if opt.void_loss == VOID_LOSS: # set void loss once opt.void_loss = max(opt.yi) void_filtered = [] # default bad losses to set void_loss @@ -616,7 +616,7 @@ class Hyperopt: """ vals = [] to_ask: deque = deque() - evald: set(Tuple) = set() + evald: Set[Tuple] = set() fit = False opt = self.opt for r in range(tries): @@ -625,10 +625,10 @@ class Hyperopt: if vals: # filter losses void_filtered = self.filter_void_losses(vals, opt) - if vals: # again if all are filtered + if void_filtered: # again if all are filtered opt.tell([list(v['params_dict'].values()) for v in void_filtered], - [v['loss'] for v in vals], - fit=fit) + [v['loss'] for v in void_filtered], + fit=fit) if fit: fit = False del vals[:], void_filtered[:] @@ -841,7 +841,7 @@ class Hyperopt: for _ in range(remaining): # generate optimizers # random state is preserved opt_copy = opt.copy(random_state=opt.rng.randint(0, - iinfo(int32).max)) + iinfo(int32).max)) opt_copy.void_loss = VOID_LOSS backend.optimizers.put(opt_copy) del opt, opt_copy @@ -857,6 +857,23 @@ class Hyperopt: self.opt.void_loss = VOID_LOSS del opts[:] + def setup_points(self): + self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( + self.dimensions, self.n_jobs, self.effort, self.total_epochs, self.n_points + ) + logger.info(f"Min epochs set to: {self.min_epochs}") + # reduce random points by n_points in multi mode because asks are per job + if self.multi: + self.opt_n_initial_points = self.n_initial_points // self.n_points + else: + self.opt_n_initial_points = self.n_initial_points + logger.info(f'Initial points: {self.n_initial_points}') + # if total epochs are not set, max_epoch takes its place + if self.total_epochs < 1: + self.max_epoch = int(self.min_epochs + len(self.trials)) + # initialize average best occurrence + self.avg_best_occurrence = self.min_epochs // self.n_jobs + def start(self) -> None: """ Broom Broom """ self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None)) @@ -887,22 +904,8 @@ class Hyperopt: logger.info(f'Number of parallel jobs set as: {self.n_jobs}') self.dimensions: List[Dimension] = self.hyperopt_space() - self.n_initial_points, self.min_epochs, self.search_space_size = self.calc_epochs( - self.dimensions, self.n_jobs, self.effort, self.total_epochs, self.n_points - ) - # reduce random points by n_points in multi mode because asks are per job - if self.multi: - self.opt_n_initial_points = self.n_initial_points // self.n_points - else: - self.opt_n_initial_points = self.n_initial_points - logger.info(f"Min epochs set to: {self.min_epochs}") - # if total epochs are not set, max_epoch takes its place - if self.total_epochs < 1: - self.max_epoch = int(self.min_epochs + len(self.trials)) - # initialize average best occurrence - self.avg_best_occurrence = self.min_epochs // self.n_jobs + self.setup_points() - logger.info(f'Initial points: {self.n_initial_points}') if self.print_colorized: colorama_init(autoreset=True) From 027dae1c9b951d67ee76d4960b0d043faa85fc21 Mon Sep 17 00:00:00 2001 From: orehunt Date: Thu, 12 Mar 2020 12:55:00 +0100 Subject: [PATCH 11/21] more fixes for epochs counting --- freqtrade/optimize/hyperopt.py | 48 +++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 6ad79c6a0..c8191d688 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -716,9 +716,9 @@ class Hyperopt: print() current = frame_start + 1 i = 0 - for i, v in enumerate(f_val): + for i, v in enumerate(f_val, 1): is_best = self.is_best_loss(v, self.current_best_loss) - current = frame_start + i + 1 + current = frame_start + i v['is_best'] = is_best v['current_epoch'] = current v['is_initial_point'] = current <= self.n_initial_points @@ -735,6 +735,20 @@ class Hyperopt: self.max_epoch_reached = True return i + def setup_best_epochs(self) -> bool: + """ used to resume the best epochs state from previous trials """ + len_trials = len(self.trials) + if len_trials > 0: + best_epochs = list(filter(lambda k: k["is_best"], self.trials)) + len_best = len(best_epochs) + if len_best > 0: + # sorting from lowest to highest, the first value is the current best + best = sorted(best_epochs, key=lambda k: k["loss"])[0] + self.current_best_epoch = best["current_epoch"] + self.avg_best_occurrence = len_trials // len_best + return True + return False + @staticmethod def load_previous_results(trials_file: Path) -> List: """ @@ -790,20 +804,21 @@ class Hyperopt: (factorial(n_parameters) / (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) # logger.info(f'Search space size: {search_space_size}') + log_opt = int(log(opt_points, 2)) if opt_points > 4 else 2 if search_space_size < opt_points: # don't waste if the space is small n_initial_points = opt_points // 3 min_epochs = opt_points elif total_epochs > 0: - n_initial_points = total_epochs // 3 if total_epochs > opt_points * 3 else opt_points + # coefficients from total epochs + log_epp = int(log(total_epochs, 2)) * log_opt + n_initial_points = min(log_epp, total_epochs // 3) min_epochs = total_epochs else: - # extract coefficients from the search space and the jobs count - log_sss = int(log(search_space_size, 10)) - log_opt = int(log(opt_points, 2)) if opt_points > 4 else 2 - opt_ip = log_opt * log_sss + # extract coefficients from the search space + log_sss = int(log(search_space_size, 10)) * log_opt # never waste - n_initial_points = log_sss if opt_ip > search_space_size else opt_ip + n_initial_points = min(log_sss, search_space_size // 3) # it shall run for this much, I say min_epochs = int(max(n_initial_points, opt_points) * (1 + effort) + n_initial_points) return n_initial_points, min_epochs, search_space_size @@ -899,6 +914,7 @@ class Hyperopt: self.backtesting.exchange = None # type: ignore self.trials = self.load_previous_results(self.trials_file) + self.setup_best_epochs() logger.info(f"Found {cpu_count()} CPU cores. Let's make them scream!") logger.info(f'Number of parallel jobs set as: {self.n_jobs}') @@ -918,9 +934,11 @@ class Hyperopt: with parallel_backend('loky', inner_max_num_threads=2): with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: # update epochs count + n_points = self.n_points prev_batch = -1 epochs_so_far = len(self.trials) - while prev_batch < epochs_so_far: + epochs_limit = self.epochs_limit + while epochs_so_far > prev_batch or epochs_so_far < self.min_epochs: prev_batch = epochs_so_far # pad the batch length to the number of jobs to avoid desaturation batch_len = (self.avg_best_occurrence + self.n_jobs - @@ -929,16 +947,16 @@ class Hyperopt: # n_points (epochs) in 1 dispatch but this reduces the batch len too much # if self.multi: batch_len = batch_len // self.n_points # don't go over the limit - if epochs_so_far + batch_len > self.epochs_limit(): - batch_len = self.epochs_limit() - epochs_so_far + if epochs_so_far + batch_len * n_points > epochs_limit(): + batch_len = (epochs_limit() - epochs_so_far) // n_points print( - f"{epochs_so_far+1}-{epochs_so_far+batch_len}" - f"/{self.epochs_limit()}: ", + f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" + f"/{epochs_limit()}: ", end='') f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) - saved = self.log_results(f_val, epochs_so_far, self.epochs_limit()) + saved = self.log_results(f_val, epochs_so_far, epochs_limit()) # stop if no epochs have been evaluated - if not saved or batch_len < 1: + if (not saved and len(f_val) > 1) or batch_len < 1: break # log_results add epochs_so_far += saved From ef6efb71176741cf6387485e7a789f77a77be839 Mon Sep 17 00:00:00 2001 From: orehunt Date: Fri, 13 Mar 2020 18:57:05 +0100 Subject: [PATCH 12/21] better fit logic (and multi-opt was never fitting -_-) --- freqtrade/optimize/hyperopt.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index c8191d688..06dbfb5cb 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -605,7 +605,7 @@ class Hyperopt: i, backend.optimizers, jobs, backend.results_board) for i in range(first_try, first_try + tries)) # each worker will return a list containing n_points, so compact into a single list - return functools.reduce(lambda x, y: [*x, *y], results) + return functools.reduce(lambda x, y: [*x, *y], results, []) def opt_ask_and_tell(self, jobs: int, tries: int): """ @@ -617,7 +617,6 @@ class Hyperopt: vals = [] to_ask: deque = deque() evald: Set[Tuple] = set() - fit = False opt = self.opt for r in range(tries): while not backend.results.empty(): @@ -628,15 +627,11 @@ class Hyperopt: if void_filtered: # again if all are filtered opt.tell([list(v['params_dict'].values()) for v in void_filtered], [v['loss'] for v in void_filtered], - fit=fit) - if fit: - fit = False + fit=(len(to_ask) < 1)) # only fit when out of points del vals[:], void_filtered[:] if not to_ask: - opt.update_next() to_ask.extend(opt.ask(n_points=self.n_points)) - fit = True a = tuple(to_ask.popleft()) while a in evald: logger.info("this point was evaluated before...") @@ -675,8 +670,7 @@ class Hyperopt: # put back the updated results results_board.put(results) if len(past_Xi) > 0: - opt.tell(past_Xi, past_yi, fit=False) - opt.update_next() + opt.tell(past_Xi, past_yi, fit=True) # ask for points according to config asked = opt.ask(n_points=self.n_points, strategy=self.get_next_point_strategy()) @@ -688,7 +682,9 @@ class Hyperopt: if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: Xi = [list(v['params_dict'].values()) for v in void_filtered] yi = [v['loss'] for v in void_filtered] - opt.tell(Xi, yi, fit=False) + # because we fit with points from other runs + # only fit if at the current dispatch there were no points + opt.tell(Xi, yi, fit=(len(past_Xi) < 1)) # update the board with the new results results = results_board.get() results.append([void_filtered, jobs - 1]) @@ -956,6 +952,9 @@ class Hyperopt: f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) saved = self.log_results(f_val, epochs_so_far, epochs_limit()) # stop if no epochs have been evaluated + if len(f_val) < 1: + logger.warning("All epochs evaluated were void, " + "check the loss function and the search space.") if (not saved and len(f_val) > 1) or batch_len < 1: break # log_results add From a5b44de0f6fb5774381a8e458d6745a15937b97e Mon Sep 17 00:00:00 2001 From: orehunt Date: Sat, 14 Mar 2020 20:53:56 +0100 Subject: [PATCH 13/21] hyperopt shared mode - shared mode uses one optimizer with shared results - multi mode runs as many optimizers as jobs and results are only shared on ask - a flag to override the strategy when asking more points (--lie-strat) - make sure to ask with n_points `None` to avoid computing more points than needed in shared mode - reduce n of models to 1 in multi mode - don't load more than the specified number of jobs when loading previous optimizers - stretch the batch length to reach the epochs limit - a warning for when no epochs are logged --- freqtrade/commands/arguments.py | 2 +- freqtrade/commands/cli_options.py | 24 ++- freqtrade/configuration/configuration.py | 9 +- freqtrade/constants.py | 4 +- freqtrade/optimize/hyperopt.py | 211 ++++++++++++++--------- freqtrade/optimize/hyperopt_backend.py | 2 +- 6 files changed, 155 insertions(+), 97 deletions(-) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 323a556f8..6afb9ea96 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -26,7 +26,7 @@ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [ "hyperopt", "hyperopt_path", "position_stacking", "epochs", "spaces", "use_max_market_positions", "print_all", "print_colorized", "print_json", "hyperopt_jobs", "hyperopt_random_state", "hyperopt_min_trades", "hyperopt_continue", "hyperopt_loss", "effort", - "multi_opt", "points_per_opt" + "mode", "n_points", "lie_strat" ] ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"] diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 0aac462dd..b49f893e3 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -204,20 +204,26 @@ AVAILABLE_CLI_OPTIONS = { metavar='FLOAT', default=constants.HYPEROPT_EFFORT, ), - "multi_opt": - Arg('--multi', + "mode": + Arg('--mode', help=('Switches hyperopt to use one optimizer per job, use it', 'when backtesting iterations are cheap (default: %(default)d).'), - action='store_true', - default=False), - "points_per_opt": - Arg('--points-per-opt', - help=('Controls how many points to ask at each job dispatch to each', - 'optimizer in multi opt mode, increase if cpu usage of each core', + metavar='NAME', + default=constants.HYPEROPT_MODE), + "n_points": + Arg('--n-points', + help=('Controls how many points to ask to the optimizer', + 'increase if cpu usage of each core', 'appears low (default: %(default)d).'), type=int, metavar='INT', - default=constants.HYPEROPT_POINTS_PER_OPT), + default=constants.HYPEROPT_N_POINTS), + "lie_strat": + Arg('--lie-strat', + help=('Sets the strategy that the optimizer uses to lie', + 'when asking for more than one point, ', + 'no effect if n_point is one (default: %(default)d).'), + default=constants.HYPEROPT_LIE_STRAT), "spaces": Arg( '--spaces', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 7c9c01237..40db823de 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -270,10 +270,13 @@ class Configuration: logstring='Parameter --effort detected ... ' 'Parameter --effort detected: {}') self._args_to_config(config, - argname='multi_opt', - logstring='Hyperopt will use multiple optimizers ...') + argname='mode', + logstring='Hyperopt will run in {} mode ...') self._args_to_config(config, - argname='points_per_opt', + argname='explore', + logstring='Acquisition strategy set to random {}...') + self._args_to_config(config, + argname='n_points', logstring='Optimizers will be asked for {} points...') self._args_to_config(config, argname='spaces', diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 2c24fd01e..48a0700df 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -8,7 +8,9 @@ DEFAULT_EXCHANGE = 'bittrex' PROCESS_THROTTLE_SECS = 5 # sec HYPEROPT_EPOCH = 0 # epochs HYPEROPT_EFFORT = 0. # tune max epoch count -HYPEROPT_POINTS_PER_OPT = 2 # tune iterations between estimations +HYPEROPT_N_POINTS = 2 # tune iterations between estimations +HYPEROPT_MODE = 'single' +HYPEROPT_LIE_STRAT = 'default' RETRY_TIMEOUT = 30 # sec DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss' DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite' diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 06dbfb5cb..006cc230d 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -97,7 +97,7 @@ class Hyperopt: # a guessed number extracted by the space dimensions self.search_space_size = 0 # total number of candles being backtested - self.n_samples = 0 + self.n_candles = 0 self.current_best_loss = VOID_LOSS self.current_best_epoch = 0 @@ -113,37 +113,9 @@ class Hyperopt: # evaluations self.trials: List = [] - # optimizers - self.opts: List[Optimizer] = [] - self.opt: Optimizer = None - backend.manager = Manager() - if 'multi_opt' in self.config and self.config['multi_opt']: - self.multi = True - backend.optimizers = backend.manager.Queue() - backend.results_board = backend.manager.Queue(maxsize=1) - backend.results_board.put([]) - self.opt_base_estimator = 'GBRT' - self.opt_acq_optimizer = 'sampling' - default_n_points = 2 - else: - self.multi = False - backend.results = backend.manager.Queue() - self.opt_base_estimator = 'GP' - self.opt_acq_optimizer = 'lbfgs' - default_n_points = 1 - - # in single opt assume runs are expensive so default to 1 point per ask - self.n_points = self.config.get('points_per_opt', default_n_points) - # if 0 n_points are given, don't use any base estimator (akin to random search) - if self.n_points < 1: - self.n_points = 1 - self.opt_base_estimator = "DUMMY" - self.opt_acq_optimizer = "sampling" - # var used in epochs and batches calculation - self.opt_points = self.n_jobs * self.n_points - # models are only needed for posterior eval - self.n_models = max(16, self.n_jobs) + # configure multi mode + self.setup_multi() # Populate functions here (hasattr is slow so should not be run during "regular" operations) if hasattr(self.custom_hyperopt, 'populate_indicators'): @@ -174,6 +146,60 @@ class Hyperopt: self.print_colorized = self.config.get('print_colorized', False) self.print_json = self.config.get('print_json', False) + def setup_multi(self): + # optimizers + self.opts: List[Optimizer] = [] + self.opt: Optimizer = None + + backend.manager = Manager() + self.mode = self.config.get('mode', 'single') + self.shared = False + if self.mode in ('multi', 'shared'): + self.multi = True + if self.mode == 'shared': + self.shared = True + backend.optimizers = backend.manager.Queue() + backend.results_board = backend.manager.Queue(maxsize=1) + backend.results_board.put({}) + self.opt_base_estimator = 'GBRT' + self.opt_acq_optimizer = 'sampling' + # in multi opt one model is enough + self.n_models = 1 + default_n_points = 2 + else: + self.multi = False + backend.results = backend.manager.Queue() + self.opt_base_estimator = 'GP' + self.opt_acq_optimizer = 'lbfgs' + # models are only needed for posterior eval + self.n_models = min(16, self.n_jobs) + default_n_points = 1 + + # in single opt assume runs are expensive so default to 1 point per ask + self.n_points = self.config.get('n_points', default_n_points) + # if 0 n_points are given, don't use any base estimator (akin to random search) + if self.n_points < 1: + self.n_points = 1 + self.opt_base_estimator = "DUMMY" + self.opt_acq_optimizer = "sampling" + if self.n_points < 2: + # ask_points is what is used in the ask call + # because when n_points is None, it doesn't + # waste time generating new points + self.ask_points = None + else: + self.ask_points = self.n_points + # var used in epochs and batches calculation + self.opt_points = self.n_jobs * (self.n_points or 1) + # lie strategy + lie_strat = self.config.get('lie_strat', 'default') + if lie_strat == 'default': + self.lie_strat = lambda: 'cl_min' + elif lie_strat == 'random': + self.lie_strat = self.get_next_point_strategy + else: + self.lie_strat = lambda: lie_strat + @staticmethod def get_lock_filename(config: Dict[str, Any]) -> str: @@ -627,11 +653,11 @@ class Hyperopt: if void_filtered: # again if all are filtered opt.tell([list(v['params_dict'].values()) for v in void_filtered], [v['loss'] for v in void_filtered], - fit=(len(to_ask) < 1)) # only fit when out of points + fit=(len(to_ask) < 1)) # only fit when out of points del vals[:], void_filtered[:] if not to_ask: - to_ask.extend(opt.ask(n_points=self.n_points)) + to_ask.extend(opt.ask(n_points=self.n_points, strategy=self.lie_strat())) a = tuple(to_ask.popleft()) while a in evald: logger.info("this point was evaluated before...") @@ -642,55 +668,70 @@ class Hyperopt: evald.add(a) yield a + @staticmethod + def opt_get_past_points(asked: dict, results_board: Queue) -> dict: + """ fetch shared results between optimizers """ + results = results_board.get() + results_board.put(results) + for a in asked: + if a in results: + asked[a] = results[a] + return asked + def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): """ objective run in multi opt mode, optimizers share the results as soon as they are completed """ self.log_results_immediate(n) - # fetch an optimizer instance + is_shared = self.shared + # get an optimizer instance opt = optimizers.get() - # tell new points if any - results = results_board.get() - past_Xi = [] - past_yi = [] - for idx, res in enumerate(results): - unsubscribe = False - vals = res[0] # res[1] is the counter - for v in vals: - if list(v['params_dict'].values()) not in opt.Xi: - past_Xi.append(list(v['params_dict'].values())) - past_yi.append(v['loss']) - # decrease counter - if not unsubscribe: - unsubscribe = True - if unsubscribe: - results[idx][1] -= 1 - if results[idx][1] < 1: - del results[idx] - # put back the updated results - results_board.put(results) - if len(past_Xi) > 0: - opt.tell(past_Xi, past_yi, fit=True) + + if is_shared: + # get a random number before putting it back to avoid + # replication with other workers + rand = opt.rng.randint(0, VOID_LOSS) + optimizers.put(opt) + # switch the seed to get a different point + opt.rng.seed(rand) + opt.update_next() # ask for points according to config - asked = opt.ask(n_points=self.n_points, strategy=self.get_next_point_strategy()) - # run the backtest for each point - f_val = [self.backtest_params(e) for e in asked] + asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) + # check if some points have been evaluated by other optimizers + p_asked = self.opt_get_past_points({tuple(a): None for a in asked}, results_board) + Xi_d = [] # done + Xi_t = [] # to do + for a in p_asked: + if p_asked[a] is not None: + Xi_d.append(a) + else: + Xi_t.append(a) + # run the backtest for each point to do (Xi_t) + f_val = [self.backtest_params(a) for a in Xi_t] # filter losses void_filtered = self.filter_void_losses(f_val, opt) - # tell the optimizer the results + # add points of the current dispatch if any if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: - Xi = [list(v['params_dict'].values()) for v in void_filtered] - yi = [v['loss'] for v in void_filtered] - # because we fit with points from other runs - # only fit if at the current dispatch there were no points - opt.tell(Xi, yi, fit=(len(past_Xi) < 1)) - # update the board with the new results - results = results_board.get() - results.append([void_filtered, jobs - 1]) - results_board.put(results) - # send back the updated optimizer - optimizers.put(opt) + Xi = [*Xi_d, *[list(v['params_dict'].values()) for v in void_filtered]] + yi = [*[p_asked[a] for a in Xi_d], *[v['loss'] for v in void_filtered]] + void = False + if is_shared: + # refresh the optimizer that stores all the points + opt = optimizers.get() + opt.tell(Xi, yi, fit=False) + else: + void = True + if not void or not is_shared: + # send back the updated optimizer only in non shared mode + # because in shared mode if all results are void we don't + # fetch it at all + optimizers.put(opt) + # update the board used to skip already computed points + results = results_board.get() + for v in void_filtered: + results[tuple(v['params_dict'].values())] = v['loss'] + results_board.put(results) return void_filtered def parallel_objective(self, asked, results: Queue, n=0): @@ -839,14 +880,19 @@ class Hyperopt: """ Setup the optimizers objects, try to load from disk, or create new ones """ # try to load previous optimizers opts = self.load_previous_optimizers(self.opts_file) + n_opts = len(opts) + max_opts = self.n_jobs if self.multi: - if len(opts) > 0: - # put the restored optimizers in the queue and clear them from the object - for opt in opts: - backend.optimizers.put(opt) + # when sharing results there is only one optimizer that gets copied + if self.shared: + max_opts = 1 + # put the restored optimizers in the queue + if n_opts > 0: + for n in range(n_opts): + backend.optimizers.put(opts[n]) # generate as many optimizers as are still needed to fill the job count - remaining = self.n_jobs - backend.optimizers.qsize() + remaining = max_opts - backend.optimizers.qsize() if remaining > 0: opt = self.get_optimizer(self.dimensions, self.n_jobs, self.opt_n_initial_points) for _ in range(remaining): # generate optimizers @@ -859,7 +905,7 @@ class Hyperopt: else: # if we have more than 1 optimizer but are using single opt, # pick one discard the rest... - if len(opts) > 0: + if n_opts > 0: self.opt = opts[-1] else: self.opt = self.get_optimizer( @@ -897,7 +943,7 @@ class Hyperopt: # Trim startup period from analyzed dataframe for pair, df in preprocessed.items(): preprocessed[pair] = trim_dataframe(df, timerange) - self.n_samples += len(preprocessed[pair]) + self.n_candles += len(preprocessed[pair]) min_date, max_date = get_timerange(data) logger.info( @@ -944,7 +990,8 @@ class Hyperopt: # if self.multi: batch_len = batch_len // self.n_points # don't go over the limit if epochs_so_far + batch_len * n_points > epochs_limit(): - batch_len = (epochs_limit() - epochs_so_far) // n_points + q, r = divmod(epochs_limit() - epochs_so_far, n_points) + batch_len = q + r print( f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" f"/{epochs_limit()}: ", @@ -952,9 +999,9 @@ class Hyperopt: f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) saved = self.log_results(f_val, epochs_so_far, epochs_limit()) # stop if no epochs have been evaluated - if len(f_val) < 1: - logger.warning("All epochs evaluated were void, " - "check the loss function and the search space.") + if len(f_val) < batch_len: + logger.warning("Some evaluated epochs were void, " + "check the loss function and the search space.") if (not saved and len(f_val) > 1) or batch_len < 1: break # log_results add diff --git a/freqtrade/optimize/hyperopt_backend.py b/freqtrade/optimize/hyperopt_backend.py index 4871c44ca..7357fb4ee 100644 --- a/freqtrade/optimize/hyperopt_backend.py +++ b/freqtrade/optimize/hyperopt_backend.py @@ -7,7 +7,7 @@ manager: SyncManager # stores the optimizers in multi opt mode optimizers: Queue # stores a list of the results to share between optimizers -# each result is a tuple of the params_dict and a decreasing counter +# in the form of dict[tuple(Xi)] = yi results_board: Queue # store the results in single opt mode results: Queue From 8d03887b02c55454cf7acb78d969f3b5b8f540e8 Mon Sep 17 00:00:00 2001 From: orehunt Date: Tue, 17 Mar 2020 14:20:41 +0100 Subject: [PATCH 14/21] better defaults --- freqtrade/optimize/hyperopt.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 006cc230d..4408ceec3 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -154,6 +154,8 @@ class Hyperopt: backend.manager = Manager() self.mode = self.config.get('mode', 'single') self.shared = False + # models are only needed for posterior eval + self.n_models = 1 if self.mode in ('multi', 'shared'): self.multi = True if self.mode == 'shared': @@ -161,19 +163,19 @@ class Hyperopt: backend.optimizers = backend.manager.Queue() backend.results_board = backend.manager.Queue(maxsize=1) backend.results_board.put({}) + default_n_points = 2 self.opt_base_estimator = 'GBRT' self.opt_acq_optimizer = 'sampling' - # in multi opt one model is enough - self.n_models = 1 - default_n_points = 2 else: self.multi = False backend.results = backend.manager.Queue() - self.opt_base_estimator = 'GP' - self.opt_acq_optimizer = 'lbfgs' - # models are only needed for posterior eval - self.n_models = min(16, self.n_jobs) + self.opt_base_estimator = 'ET' + self.opt_acq_optimizer = 'sampling' default_n_points = 1 + # The GaussianProcessRegressor is heavy, which makes it not a good default + # however longer backtests might make it a better tradeoff + # self.opt_base_estimator = 'GP' + # self.opt_acq_optimizer = 'lbfgs' # in single opt assume runs are expensive so default to 1 point per ask self.n_points = self.config.get('n_points', default_n_points) @@ -235,17 +237,21 @@ class Hyperopt: num_trials = len(self.trials) print() if num_trials > self.num_trials_saved: - logger.info(f"Saving {num_trials} {plural(num_trials, 'epoch')}.") + logger.debug(f"Saving {num_trials} {plural(num_trials, 'epoch')}.") dump(self.trials, self.trials_file) self.num_trials_saved = num_trials self.save_opts() if final: - logger.info(f"{num_trials} {plural(num_trials, 'epoch')} " + logger.debug(f"{num_trials} {plural(num_trials, 'epoch')} " f"saved to '{self.trials_file}'.") def save_opts(self) -> None: - """ Save optimizers state to disk. The minimum required state could also be constructed - from the attributes [ models, space, rng ] with Xi, yi loaded from trials """ + """ + Save optimizers state to disk. The minimum required state could also be constructed + from the attributes [ models, space, rng ] with Xi, yi loaded from trials. + All we really care about are [rng, Xi, yi] since models are never passed over queues + and space is dependent on dimensions matching with hyperopt config + """ # synchronize with saved trials opts = [] n_opts = 0 @@ -259,7 +265,7 @@ class Hyperopt: if self.opt: n_opts = 1 opts = [self.opt] - logger.info(f"Saving {n_opts} {plural(n_opts, 'optimizer')}.") + logger.debug(f"Saving {n_opts} {plural(n_opts, 'optimizer')}.") dump(opts, self.opts_file) @staticmethod @@ -874,7 +880,7 @@ class Hyperopt: if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size print() - logger.info(f'Max epoch set to: {self.epochs_limit()}') + logger.debug(f'Max epoch set to: {self.epochs_limit()}') def setup_optimizers(self): """ Setup the optimizers objects, try to load from disk, or create new ones """ From cf76be684570468852b0db064a99627a1d48d081 Mon Sep 17 00:00:00 2001 From: orehunt Date: Wed, 18 Mar 2020 08:06:50 +0100 Subject: [PATCH 15/21] fixes for single mode --- freqtrade/optimize/hyperopt.py | 79 +++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 31 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 4408ceec3..41a00b279 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -2,7 +2,6 @@ """ This module contains the hyperopt logic """ - import functools import locale import logging @@ -52,8 +51,12 @@ with warnings.catch_warnings(): logger = logging.getLogger(__name__) # supported strategies when asking for multiple points to the optimizer -NEXT_POINT_METHODS = ["cl_min", "cl_mean", "cl_max"] -NEXT_POINT_METHODS_LENGTH = 3 +LIE_STRATS = ["cl_min", "cl_mean", "cl_max"] +LIE_STRATS_N = len(LIE_STRATS) + +# supported estimators +ESTIMATORS = ["GBRT", "ET", "RF"] +ESTIMATORS_N = len(ESTIMATORS) VOID_LOSS = iinfo(int32).max # just a big enough number to be bad result in loss optimization @@ -154,23 +157,25 @@ class Hyperopt: backend.manager = Manager() self.mode = self.config.get('mode', 'single') self.shared = False - # models are only needed for posterior eval + # in multi opt one model is enough self.n_models = 1 if self.mode in ('multi', 'shared'): self.multi = True if self.mode == 'shared': self.shared = True + self.opt_base_estimator = lambda: 'GBRT' + else: + self.opt_base_estimator = self.estimators + self.opt_acq_optimizer = 'sampling' backend.optimizers = backend.manager.Queue() backend.results_board = backend.manager.Queue(maxsize=1) backend.results_board.put({}) default_n_points = 2 - self.opt_base_estimator = 'GBRT' - self.opt_acq_optimizer = 'sampling' else: self.multi = False backend.results = backend.manager.Queue() - self.opt_base_estimator = 'ET' self.opt_acq_optimizer = 'sampling' + self.opt_base_estimator = lambda: 'ET' default_n_points = 1 # The GaussianProcessRegressor is heavy, which makes it not a good default # however longer backtests might make it a better tradeoff @@ -198,7 +203,7 @@ class Hyperopt: if lie_strat == 'default': self.lie_strat = lambda: 'cl_min' elif lie_strat == 'random': - self.lie_strat = self.get_next_point_strategy + self.lie_strat = self.lie_strategy else: self.lie_strat = lambda: lie_strat @@ -232,17 +237,17 @@ class Hyperopt: def save_trials(self, final: bool = False) -> None: """ - Save hyperopt trials to file + Save hyperopt trials """ num_trials = len(self.trials) - print() if num_trials > self.num_trials_saved: - logger.debug(f"Saving {num_trials} {plural(num_trials, 'epoch')}.") + logger.debug(f"\nSaving {num_trials} {plural(num_trials, 'epoch')}.") + # save_trials(self.trials, trials_path, self.num_trials_saved) dump(self.trials, self.trials_file) self.num_trials_saved = num_trials self.save_opts() if final: - logger.debug(f"{num_trials} {plural(num_trials, 'epoch')} " + logger.info(f"\n{num_trials} {plural(num_trials, 'epoch')} " f"saved to '{self.trials_file}'.") def save_opts(self) -> None: @@ -597,10 +602,13 @@ class Hyperopt: void_filtered = vals return void_filtered - def get_next_point_strategy(self): + def lie_strategy(self): """ Choose a strategy randomly among the supported ones, used in multi opt mode to increase the diversion of the searches of each optimizer """ - return NEXT_POINT_METHODS[random.randrange(0, NEXT_POINT_METHODS_LENGTH)] + return LIE_STRATS[random.randrange(0, LIE_STRATS_N)] + + def estimators(self): + return ESTIMATORS[random.randrange(0, ESTIMATORS_N)] def get_optimizer(self, dimensions: List[Dimension], n_jobs: int, n_initial_points: int) -> Optimizer: @@ -612,7 +620,7 @@ class Hyperopt: n_jobs = 1 return Optimizer( dimensions, - base_estimator=self.opt_base_estimator, + base_estimator=self.opt_base_estimator(), acq_optimizer=self.opt_acq_optimizer, n_initial_points=n_initial_points, acq_optimizer_kwargs={'n_jobs': n_jobs}, @@ -647,10 +655,13 @@ class Hyperopt: points become invalid. """ vals = [] + fit = False to_ask: deque = deque() evald: Set[Tuple] = set() opt = self.opt + ask = lambda: to_ask.extend(opt.ask(n_points=self.n_points, strategy=self.lie_strat())) for r in range(tries): + fit = (len(to_ask) < 1) while not backend.results.empty(): vals.append(backend.results.get()) if vals: @@ -659,18 +670,20 @@ class Hyperopt: if void_filtered: # again if all are filtered opt.tell([list(v['params_dict'].values()) for v in void_filtered], [v['loss'] for v in void_filtered], - fit=(len(to_ask) < 1)) # only fit when out of points + fit=fit) # only fit when out of points del vals[:], void_filtered[:] - if not to_ask: - to_ask.extend(opt.ask(n_points=self.n_points, strategy=self.lie_strat())) + if fit: + ask() a = tuple(to_ask.popleft()) while a in evald: - logger.info("this point was evaluated before...") + logger.debug("this point was evaluated before...") if len(to_ask) > 0: a = tuple(to_ask.popleft()) else: - break + opt.update_next() + ask() + a = tuple(to_ask.popleft()) evald.add(a) yield a @@ -700,8 +713,9 @@ class Hyperopt: optimizers.put(opt) # switch the seed to get a different point opt.rng.seed(rand) - opt.update_next() + # always update the next point because we never fit on tell + opt.update_next() # ask for points according to config asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) # check if some points have been evaluated by other optimizers @@ -728,10 +742,11 @@ class Hyperopt: opt.tell(Xi, yi, fit=False) else: void = True + # send back the updated optimizer only in non shared mode + # because in shared mode if all results are void we don't + # fetch it at all if not void or not is_shared: - # send back the updated optimizer only in non shared mode - # because in shared mode if all results are void we don't - # fetch it at all + del opt.models[:] optimizers.put(opt) # update the board used to skip already computed points results = results_board.get() @@ -740,11 +755,14 @@ class Hyperopt: results_board.put(results) return void_filtered - def parallel_objective(self, asked, results: Queue, n=0): + def parallel_objective(self, asked, results: Queue = None, n=0): """ objective run in single opt mode, run the backtest, store the results into a queue """ self.log_results_immediate(n) v = self.backtest_params(asked) - results.put(v) + if results: + results.put(v) + # the results logged won't be filtered + # the loss score will be == VOID_LOSS return v def log_results_immediate(self, n) -> None: @@ -756,7 +774,6 @@ class Hyperopt: """ Log results if it is better than any previous evaluation """ - print() current = frame_start + 1 i = 0 for i, v in enumerate(f_val, 1): @@ -778,7 +795,7 @@ class Hyperopt: self.max_epoch_reached = True return i - def setup_best_epochs(self) -> bool: + def setup_epochs(self) -> bool: """ used to resume the best epochs state from previous trials """ len_trials = len(self.trials) if len_trials > 0: @@ -879,8 +896,7 @@ class Hyperopt: (1 + self.effort)) if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size - print() - logger.debug(f'Max epoch set to: {self.epochs_limit()}') + logger.debug(f'\nMax epoch set to: {self.epochs_limit()}') def setup_optimizers(self): """ Setup the optimizers objects, try to load from disk, or create new ones """ @@ -962,7 +978,7 @@ class Hyperopt: self.backtesting.exchange = None # type: ignore self.trials = self.load_previous_results(self.trials_file) - self.setup_best_epochs() + self.setup_epochs() logger.info(f"Found {cpu_count()} CPU cores. Let's make them scream!") logger.info(f'Number of parallel jobs set as: {self.n_jobs}') @@ -1003,6 +1019,7 @@ class Hyperopt: f"/{epochs_limit()}: ", end='') f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) + print(' ' * batch_len * n_points, end='\r') saved = self.log_results(f_val, epochs_so_far, epochs_limit()) # stop if no epochs have been evaluated if len(f_val) < batch_len: From a982eae62255870f7aed079937ec88cfb84bad34 Mon Sep 17 00:00:00 2001 From: orehunt Date: Wed, 18 Mar 2020 14:48:38 +0100 Subject: [PATCH 16/21] - fixed cases where n_points == 1 would stall the search because of filtering - moved initial points flag set to workers instead of log loop --- freqtrade/constants.py | 2 +- freqtrade/optimize/hyperopt.py | 70 +++++++++++++++++++++++----------- 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 48a0700df..fc74af9d5 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -8,7 +8,7 @@ DEFAULT_EXCHANGE = 'bittrex' PROCESS_THROTTLE_SECS = 5 # sec HYPEROPT_EPOCH = 0 # epochs HYPEROPT_EFFORT = 0. # tune max epoch count -HYPEROPT_N_POINTS = 2 # tune iterations between estimations +HYPEROPT_N_POINTS = 1 # tune iterations between estimations HYPEROPT_MODE = 'single' HYPEROPT_LIE_STRAT = 'default' RETRY_TIMEOUT = 30 # sec diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 41a00b279..87a054b4f 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -170,7 +170,6 @@ class Hyperopt: backend.optimizers = backend.manager.Queue() backend.results_board = backend.manager.Queue(maxsize=1) backend.results_board.put({}) - default_n_points = 2 else: self.multi = False backend.results = backend.manager.Queue() @@ -179,15 +178,15 @@ class Hyperopt: default_n_points = 1 # The GaussianProcessRegressor is heavy, which makes it not a good default # however longer backtests might make it a better tradeoff - # self.opt_base_estimator = 'GP' + # self.opt_base_estimator = lambda: 'GP' # self.opt_acq_optimizer = 'lbfgs' # in single opt assume runs are expensive so default to 1 point per ask - self.n_points = self.config.get('n_points', default_n_points) + self.n_points = self.config.get('n_points', 1) # if 0 n_points are given, don't use any base estimator (akin to random search) if self.n_points < 1: self.n_points = 1 - self.opt_base_estimator = "DUMMY" + self.opt_base_estimator = lambda: "DUMMY" self.opt_acq_optimizer = "sampling" if self.n_points < 2: # ask_points is what is used in the ask call @@ -659,7 +658,16 @@ class Hyperopt: to_ask: deque = deque() evald: Set[Tuple] = set() opt = self.opt - ask = lambda: to_ask.extend(opt.ask(n_points=self.n_points, strategy=self.lie_strat())) + def point(): + if self.ask_points: + if to_ask: + return tuple(to_ask.popleft()) + else: + to_ask.extend(opt.ask(n_points=self.ask_points, strategy=self.lie_strat())) + return tuple(to_ask.popleft()) + else: + return tuple(opt.ask(strategy=self.lie_strat())) + for r in range(tries): fit = (len(to_ask) < 1) while not backend.results.empty(): @@ -673,17 +681,12 @@ class Hyperopt: fit=fit) # only fit when out of points del vals[:], void_filtered[:] - if fit: - ask() - a = tuple(to_ask.popleft()) + a = point() while a in evald: logger.debug("this point was evaluated before...") - if len(to_ask) > 0: - a = tuple(to_ask.popleft()) - else: + if not fit: opt.update_next() - ask() - a = tuple(to_ask.popleft()) + a = point() evald.add(a) yield a @@ -705,19 +708,30 @@ class Hyperopt: is_shared = self.shared # get an optimizer instance opt = optimizers.get() + # this is the counter used by the optimizer internally to track the initial + # points evaluated so far.. + initial_points = opt._n_initial_points if is_shared: # get a random number before putting it back to avoid - # replication with other workers + # replication with other workers and keep reproducibility rand = opt.rng.randint(0, VOID_LOSS) optimizers.put(opt) # switch the seed to get a different point opt.rng.seed(rand) + opt, opt.void_loss = opt.copy(random_state=opt.rng), opt.void_loss + # we have to get a new point if the last batch was all void + elif opt.void: + opt.update_next() + # a model is only fit after initial points + elif initial_points < 1: + opt.tell(opt.Xi, opt.yi) - # always update the next point because we never fit on tell - opt.update_next() # ask for points according to config asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) + # wrap in a list when asked for 1 point + if not self.ask_points: + asked = [asked] # check if some points have been evaluated by other optimizers p_asked = self.opt_get_past_points({tuple(a): None for a in asked}, results_board) Xi_d = [] # done @@ -742,17 +756,27 @@ class Hyperopt: opt.tell(Xi, yi, fit=False) else: void = True + opt.void = void # send back the updated optimizer only in non shared mode # because in shared mode if all results are void we don't # fetch it at all if not void or not is_shared: + # don't pickle models del opt.models[:] optimizers.put(opt) # update the board used to skip already computed points - results = results_board.get() - for v in void_filtered: - results[tuple(v['params_dict'].values())] = v['loss'] - results_board.put(results) + # NOTE: some results at the beginning won't be published + # because they are removed by the filter_void_losses + if not void: + results = results_board.get() + for v in void_filtered: + a = tuple(v['params_dict'].values()) + if a not in results: + results[a] = v['loss'] + results_board.put(results) + # set initial point flag + for n, v in enumerate(void_filtered): + v['is_initial_point'] = initial_points - n > 0 return void_filtered def parallel_objective(self, asked, results: Queue = None, n=0): @@ -761,8 +785,7 @@ class Hyperopt: v = self.backtest_params(asked) if results: results.put(v) - # the results logged won't be filtered - # the loss score will be == VOID_LOSS + v['is_initial_point'] = n < self.opt_n_initial_points return v def log_results_immediate(self, n) -> None: @@ -781,7 +804,6 @@ class Hyperopt: current = frame_start + i v['is_best'] = is_best v['current_epoch'] = current - v['is_initial_point'] = current <= self.n_initial_points logger.debug(f"Optimizer epoch evaluated: {v}") if is_best: self.current_best_loss = v['loss'] @@ -922,6 +944,7 @@ class Hyperopt: opt_copy = opt.copy(random_state=opt.rng.randint(0, iinfo(int32).max)) opt_copy.void_loss = VOID_LOSS + opt_copy.void = False backend.optimizers.put(opt_copy) del opt, opt_copy else: @@ -934,6 +957,7 @@ class Hyperopt: self.dimensions, self.n_jobs, self.opt_n_initial_points ) self.opt.void_loss = VOID_LOSS + self.opt.void = False del opts[:] def setup_points(self): From 9e0b07b2fdf2720e66a5dea56fa0d788ebd98a19 Mon Sep 17 00:00:00 2001 From: orehunt Date: Wed, 18 Mar 2020 18:36:10 +0100 Subject: [PATCH 17/21] - make sure dispatches always perform the given n of epochs - prints fixes --- freqtrade/optimize/hyperopt.py | 52 ++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 87a054b4f..f1ddaed5b 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -2,6 +2,8 @@ """ This module contains the hyperopt logic """ + +import os import functools import locale import logging @@ -175,7 +177,6 @@ class Hyperopt: backend.results = backend.manager.Queue() self.opt_acq_optimizer = 'sampling' self.opt_base_estimator = lambda: 'ET' - default_n_points = 1 # The GaussianProcessRegressor is heavy, which makes it not a good default # however longer backtests might make it a better tradeoff # self.opt_base_estimator = lambda: 'GP' @@ -727,20 +728,28 @@ class Hyperopt: elif initial_points < 1: opt.tell(opt.Xi, opt.yi) - # ask for points according to config - asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) - # wrap in a list when asked for 1 point - if not self.ask_points: - asked = [asked] - # check if some points have been evaluated by other optimizers - p_asked = self.opt_get_past_points({tuple(a): None for a in asked}, results_board) Xi_d = [] # done + yi_d = [] Xi_t = [] # to do - for a in p_asked: - if p_asked[a] is not None: - Xi_d.append(a) + # ask for points according to config + while True: + asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) + if not self.ask_points: + asked = {tuple(asked): None} else: - Xi_t.append(a) + asked = {tuple(a): None for a in asked} + # check if some points have been evaluated by other optimizers + p_asked = self.opt_get_past_points(asked, results_board) + for a in p_asked: + if p_asked[a] is not None: + Xi_d.append(a) + yi_d.append(p_asked[a]) + else: + Xi_t.append(a) + if len(Xi_t) < self.n_points: + opt.update_next() + else: + break # run the backtest for each point to do (Xi_t) f_val = [self.backtest_params(a) for a in Xi_t] # filter losses @@ -748,7 +757,7 @@ class Hyperopt: # add points of the current dispatch if any if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: Xi = [*Xi_d, *[list(v['params_dict'].values()) for v in void_filtered]] - yi = [*[p_asked[a] for a in Xi_d], *[v['loss'] for v in void_filtered]] + yi = [*yi_d, *[v['loss'] for v in void_filtered]] void = False if is_shared: # refresh the optimizer that stores all the points @@ -827,6 +836,7 @@ class Hyperopt: # sorting from lowest to highest, the first value is the current best best = sorted(best_epochs, key=lambda k: k["loss"])[0] self.current_best_epoch = best["current_epoch"] + self.current_best_loss = best["loss"] self.avg_best_occurrence = len_trials // len_best return True return False @@ -1021,30 +1031,36 @@ class Hyperopt: jobs_scheduler = self.run_backtest_parallel with parallel_backend('loky', inner_max_num_threads=2): with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: + jobs = parallel._effective_n_jobs() + logger.info(f'Effective number of parallel workers used: {jobs}') # update epochs count n_points = self.n_points prev_batch = -1 epochs_so_far = len(self.trials) epochs_limit = self.epochs_limit + columns, _ = os.get_terminal_size() + columns -= 1 while epochs_so_far > prev_batch or epochs_so_far < self.min_epochs: prev_batch = epochs_so_far + occurrence = int(self.avg_best_occurrence * (1 + self.effort)) # pad the batch length to the number of jobs to avoid desaturation - batch_len = (self.avg_best_occurrence + self.n_jobs - - self.avg_best_occurrence % self.n_jobs) + batch_len = (occurrence + jobs - + occurrence % jobs) # when using multiple optimizers each worker performs # n_points (epochs) in 1 dispatch but this reduces the batch len too much # if self.multi: batch_len = batch_len // self.n_points # don't go over the limit - if epochs_so_far + batch_len * n_points > epochs_limit(): + if epochs_so_far + batch_len * n_points >= epochs_limit(): q, r = divmod(epochs_limit() - epochs_so_far, n_points) batch_len = q + r print( f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" f"/{epochs_limit()}: ", end='') - f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, self.n_jobs) - print(' ' * batch_len * n_points, end='\r') + f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, jobs) + print(end='\r') saved = self.log_results(f_val, epochs_so_far, epochs_limit()) + print('\r', ' ' * columns, end='\r') # stop if no epochs have been evaluated if len(f_val) < batch_len: logger.warning("Some evaluated epochs were void, " From 0ccbaa8c966aa8dcf2f7784536d61a74213e7af4 Mon Sep 17 00:00:00 2001 From: orehunt Date: Fri, 20 Mar 2020 15:42:25 +0100 Subject: [PATCH 18/21] - refactoring - fixes to prevent stalling --- freqtrade/optimize/hyperopt.py | 236 +++++++++++++++++++-------------- 1 file changed, 137 insertions(+), 99 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index f1ddaed5b..876978a1b 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -247,7 +247,7 @@ class Hyperopt: self.num_trials_saved = num_trials self.save_opts() if final: - logger.info(f"\n{num_trials} {plural(num_trials, 'epoch')} " + logger.info(f"{num_trials} {plural(num_trials, 'epoch')} " f"saved to '{self.trials_file}'.") def save_opts(self) -> None: @@ -659,6 +659,7 @@ class Hyperopt: to_ask: deque = deque() evald: Set[Tuple] = set() opt = self.opt + def point(): if self.ask_points: if to_ask: @@ -683,23 +684,67 @@ class Hyperopt: del vals[:], void_filtered[:] a = point() - while a in evald: + if a in evald: logger.debug("this point was evaluated before...") if not fit: opt.update_next() a = point() + if a in evald: + break evald.add(a) yield a @staticmethod - def opt_get_past_points(asked: dict, results_board: Queue) -> dict: + def opt_get_past_points(asked: dict, results_board: Queue) -> Tuple[dict, int]: """ fetch shared results between optimizers """ results = results_board.get() results_board.put(results) for a in asked: if a in results: asked[a] = results[a] - return asked + return asked, len(results) + + @staticmethod + def opt_state(shared: bool, optimizers: Queue) -> Tuple[Optimizer, int]: + """ fetch an optimizer in multi opt mode """ + # get an optimizer instance + opt = optimizers.get() + # this is the counter used by the optimizer internally to track the initial + # points evaluated so far.. + initial_points = opt._n_initial_points + if shared: + # get a random number before putting it back to avoid + # replication with other workers and keep reproducibility + rand = opt.rng.randint(0, VOID_LOSS) + optimizers.put(opt) + # switch the seed to get a different point + opt.rng.seed(rand) + opt, opt.void_loss = opt.copy(random_state=opt.rng), opt.void_loss + # a model is only fit after initial points + elif initial_points < 1: + opt.tell(opt.Xi, opt.yi) + # we have to get a new point anyway + else: + opt.update_next() + return opt, initial_points + + @staticmethod + def opt_results(void: bool, void_filtered: list, + initial_points: int, results_board: Queue) -> list: + # update the board used to skip already computed points + # NOTE: some results at the beginning won't be published + # because they are removed by the filter_void_losses + if not void: + results = results_board.get() + for v in void_filtered: + a = tuple(v['params_dict'].values()) + if a not in results: + results[a] = v['loss'] + results_board.put(results) + # set initial point flag + for n, v in enumerate(void_filtered): + v['is_initial_point'] = initial_points - n > 0 + return void_filtered def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): """ @@ -707,47 +752,39 @@ class Hyperopt: """ self.log_results_immediate(n) is_shared = self.shared - # get an optimizer instance - opt = optimizers.get() - # this is the counter used by the optimizer internally to track the initial - # points evaluated so far.. - initial_points = opt._n_initial_points - - if is_shared: - # get a random number before putting it back to avoid - # replication with other workers and keep reproducibility - rand = opt.rng.randint(0, VOID_LOSS) - optimizers.put(opt) - # switch the seed to get a different point - opt.rng.seed(rand) - opt, opt.void_loss = opt.copy(random_state=opt.rng), opt.void_loss - # we have to get a new point if the last batch was all void - elif opt.void: - opt.update_next() - # a model is only fit after initial points - elif initial_points < 1: - opt.tell(opt.Xi, opt.yi) + id = optimizers.qsize() + opt, initial_points = self.opt_state(is_shared, optimizers) + sss = self.search_space_size + asked = {None: None} + asked_d = {} + told = 0 # told Xi_d = [] # done yi_d = [] Xi_t = [] # to do - # ask for points according to config - while True: + while asked != asked_d and len(opt.Xi) < sss: + asked_d = asked asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) if not self.ask_points: asked = {tuple(asked): None} else: asked = {tuple(a): None for a in asked} # check if some points have been evaluated by other optimizers - p_asked = self.opt_get_past_points(asked, results_board) + p_asked, _ = self.opt_get_past_points(asked, results_board) for a in p_asked: if p_asked[a] is not None: - Xi_d.append(a) - yi_d.append(p_asked[a]) + if a not in Xi_d: + Xi_d.append(a) + yi_d.append(p_asked[a]) else: Xi_t.append(a) if len(Xi_t) < self.n_points: - opt.update_next() + len_Xi_d = len(Xi_d) + if len_Xi_d > told: # tell new points + opt.tell(Xi_d[told:], yi_d[told:]) + told = len_Xi_d + else: + opt.update_next() else: break # run the backtest for each point to do (Xi_t) @@ -773,20 +810,8 @@ class Hyperopt: # don't pickle models del opt.models[:] optimizers.put(opt) - # update the board used to skip already computed points - # NOTE: some results at the beginning won't be published - # because they are removed by the filter_void_losses - if not void: - results = results_board.get() - for v in void_filtered: - a = tuple(v['params_dict'].values()) - if a not in results: - results[a] = v['loss'] - results_board.put(results) - # set initial point flag - for n, v in enumerate(void_filtered): - v['is_initial_point'] = initial_points - n > 0 - return void_filtered + + return self.opt_results(void, void_filtered, initial_points, results_board) def parallel_objective(self, asked, results: Queue = None, n=0): """ objective run in single opt mode, run the backtest, store the results into a queue """ @@ -892,9 +917,12 @@ class Hyperopt: n_parameters += len(d.bounds) # guess the size of the search space as the count of the # unordered combination of the dimensions entries - search_space_size = int( - (factorial(n_parameters) / - (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) + try: + search_space_size = int( + (factorial(n_parameters) / + (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) + except OverflowError: + search_space_size = VOID_LOSS # logger.info(f'Search space size: {search_space_size}') log_opt = int(log(opt_points, 2)) if opt_points > 4 else 2 if search_space_size < opt_points: @@ -913,7 +941,7 @@ class Hyperopt: n_initial_points = min(log_sss, search_space_size // 3) # it shall run for this much, I say min_epochs = int(max(n_initial_points, opt_points) * (1 + effort) + n_initial_points) - return n_initial_points, min_epochs, search_space_size + return n_initial_points or 1, min_epochs, search_space_size def update_max_epoch(self, val: Dict, current: int): """ calculate max epochs: store the number of non best epochs @@ -987,11 +1015,65 @@ class Hyperopt: # initialize average best occurrence self.avg_best_occurrence = self.min_epochs // self.n_jobs + def main_loop(self, jobs_scheduler): + """ main parallel loop """ + try: + if self.multi: + jobs_scheduler = self.run_multi_backtest_parallel + else: + jobs_scheduler = self.run_backtest_parallel + with parallel_backend('loky', inner_max_num_threads=2): + with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: + jobs = parallel._effective_n_jobs() + logger.info(f'Effective number of parallel workers used: {jobs}') + # update epochs count + n_points = self.n_points + prev_batch = -1 + epochs_so_far = len(self.trials) + epochs_limit = self.epochs_limit + columns, _ = os.get_terminal_size() + columns -= 1 + while epochs_so_far > prev_batch or epochs_so_far < self.min_epochs: + prev_batch = epochs_so_far + occurrence = int(self.avg_best_occurrence * (1 + self.effort)) + # pad the batch length to the number of jobs to avoid desaturation + batch_len = (occurrence + jobs - + occurrence % jobs) + # when using multiple optimizers each worker performs + # n_points (epochs) in 1 dispatch but this reduces the batch len too much + # if self.multi: batch_len = batch_len // self.n_points + # don't go over the limit + if epochs_so_far + batch_len * n_points >= epochs_limit(): + q, r = divmod(epochs_limit() - epochs_so_far, n_points) + batch_len = q + r + print( + f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" + f"/{epochs_limit()}: ", + end='') + f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, jobs) + print(end='\r') + saved = self.log_results(f_val, epochs_so_far, epochs_limit()) + print('\r', ' ' * columns, end='\r') + # stop if no epochs have been evaluated + if len(f_val) < batch_len: + logger.warning("Some evaluated epochs were void, " + "check the loss function and the search space.") + if (not saved and len(f_val) > 1) or batch_len < 1 or \ + (not saved and self.search_space_size < batch_len + epochs_limit()): + break + # log_results add + epochs_so_far += saved + if self.max_epoch_reached: + logger.info("Max epoch reached, terminating.") + break + except KeyboardInterrupt: + print('User interrupted..') + def start(self) -> None: """ Broom Broom """ self.random_state = self._set_random_state(self.config.get('hyperopt_random_state', None)) logger.info(f"Using optimizer random state: {self.random_state}") - + self.hyperopt_table_header = -1 data, timerange = self.backtesting.load_bt_data() preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data) @@ -1024,57 +1106,13 @@ class Hyperopt: colorama_init(autoreset=True) self.setup_optimizers() - try: - if self.multi: - jobs_scheduler = self.run_multi_backtest_parallel - else: - jobs_scheduler = self.run_backtest_parallel - with parallel_backend('loky', inner_max_num_threads=2): - with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: - jobs = parallel._effective_n_jobs() - logger.info(f'Effective number of parallel workers used: {jobs}') - # update epochs count - n_points = self.n_points - prev_batch = -1 - epochs_so_far = len(self.trials) - epochs_limit = self.epochs_limit - columns, _ = os.get_terminal_size() - columns -= 1 - while epochs_so_far > prev_batch or epochs_so_far < self.min_epochs: - prev_batch = epochs_so_far - occurrence = int(self.avg_best_occurrence * (1 + self.effort)) - # pad the batch length to the number of jobs to avoid desaturation - batch_len = (occurrence + jobs - - occurrence % jobs) - # when using multiple optimizers each worker performs - # n_points (epochs) in 1 dispatch but this reduces the batch len too much - # if self.multi: batch_len = batch_len // self.n_points - # don't go over the limit - if epochs_so_far + batch_len * n_points >= epochs_limit(): - q, r = divmod(epochs_limit() - epochs_so_far, n_points) - batch_len = q + r - print( - f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" - f"/{epochs_limit()}: ", - end='') - f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, jobs) - print(end='\r') - saved = self.log_results(f_val, epochs_so_far, epochs_limit()) - print('\r', ' ' * columns, end='\r') - # stop if no epochs have been evaluated - if len(f_val) < batch_len: - logger.warning("Some evaluated epochs were void, " - "check the loss function and the search space.") - if (not saved and len(f_val) > 1) or batch_len < 1: - break - # log_results add - epochs_so_far += saved - if self.max_epoch_reached: - logger.info("Max epoch reached, terminating.") - break - except KeyboardInterrupt: - print('User interrupted..') + if self.multi: + jobs_scheduler = self.run_multi_backtest_parallel + else: + jobs_scheduler = self.run_backtest_parallel + + self.main_loop(jobs_scheduler) self.save_trials(final=True) From cc47f3e1e4248914dd7d09383542b63e6879c94e Mon Sep 17 00:00:00 2001 From: orehunt Date: Sun, 22 Mar 2020 15:46:35 +0100 Subject: [PATCH 19/21] minor fixes, refactoring, comments --- freqtrade/commands/cli_options.py | 16 +++---- freqtrade/optimize/hyperopt.py | 77 +++++++++++++++++-------------- 2 files changed, 51 insertions(+), 42 deletions(-) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index b49f893e3..a83c9b9b4 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -206,23 +206,23 @@ AVAILABLE_CLI_OPTIONS = { ), "mode": Arg('--mode', - help=('Switches hyperopt to use one optimizer per job, use it', - 'when backtesting iterations are cheap (default: %(default)d).'), + help='Switches hyperopt to use one optimizer per job, use it' + 'when backtesting iterations are cheap (default: %(default)s).', metavar='NAME', default=constants.HYPEROPT_MODE), "n_points": Arg('--n-points', - help=('Controls how many points to ask to the optimizer', - 'increase if cpu usage of each core', - 'appears low (default: %(default)d).'), + help='Controls how many points to ask to the optimizer ' + 'increase if cpu usage of each core ' + 'appears low (default: %(default)d).', type=int, metavar='INT', default=constants.HYPEROPT_N_POINTS), "lie_strat": Arg('--lie-strat', - help=('Sets the strategy that the optimizer uses to lie', - 'when asking for more than one point, ', - 'no effect if n_point is one (default: %(default)d).'), + help='Sets the strategy that the optimizer uses to lie ' + 'when asking for more than one point, ' + 'no effect if n_point is one (default: %(default)s).', default=constants.HYPEROPT_LIE_STRAT), "spaces": Arg( diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 876978a1b..547ba502d 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -719,7 +719,7 @@ class Hyperopt: optimizers.put(opt) # switch the seed to get a different point opt.rng.seed(rand) - opt, opt.void_loss = opt.copy(random_state=opt.rng), opt.void_loss + opt, opt.void_loss, opt.void = opt.copy(random_state=opt.rng), opt.void_loss, opt.void # a model is only fit after initial points elif initial_points < 1: opt.tell(opt.Xi, opt.yi) @@ -729,12 +729,34 @@ class Hyperopt: return opt, initial_points @staticmethod - def opt_results(void: bool, void_filtered: list, - initial_points: int, results_board: Queue) -> list: - # update the board used to skip already computed points + def opt_results(opt: Optimizer, void_filtered: list, + Xi_d: list, yi_d: list, initial_points: int, is_shared: bool, + results_board: Queue, optimizers: Queue) -> list: + """ + update the board used to skip already computed points, + set the initial point status + """ + # add points of the current dispatch if any + if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: + Xi = [*Xi_d, *[list(v['params_dict'].values()) for v in void_filtered]] + yi = [*yi_d, *[v['loss'] for v in void_filtered]] + if is_shared: + # refresh the optimizer that stores all the points + opt = optimizers.get() + opt.tell(Xi, yi, fit=False) + opt.void = False + else: + opt.void = True + # send back the updated optimizer only in non shared mode + # because in shared mode if all results are void we don't + # fetch it at all + if not opt.void or not is_shared: + # don't pickle models + del opt.models[:] + optimizers.put(opt) # NOTE: some results at the beginning won't be published # because they are removed by the filter_void_losses - if not void: + if not opt.void: results = results_board.get() for v in void_filtered: a = tuple(v['params_dict'].values()) @@ -752,17 +774,19 @@ class Hyperopt: """ self.log_results_immediate(n) is_shared = self.shared - id = optimizers.qsize() opt, initial_points = self.opt_state(is_shared, optimizers) sss = self.search_space_size - asked = {None: None} - asked_d = {} + asked: Dict[Tuple, Any] = {tuple([]): None} + asked_d: Dict[Tuple, Any] = {} told = 0 # told Xi_d = [] # done yi_d = [] Xi_t = [] # to do - while asked != asked_d and len(opt.Xi) < sss: + # if opt.void == -1 the optimizer failed to give a new point (between dispatches), stop + # if asked == asked_d the points returned are the same, stop + # if opt.Xi > sss the optimizer has more points than the estimated search space size, stop + while opt.void != -1 and asked != asked_d and len(opt.Xi) < sss: asked_d = asked asked = opt.ask(n_points=self.ask_points, strategy=self.lie_strat()) if not self.ask_points: @@ -787,31 +811,20 @@ class Hyperopt: opt.update_next() else: break + # return early if there is nothing to backtest + if len(Xi_t) < 1: + if not is_shared: + opt.void = -1 + del opt.models[:] + optimizers.put(opt) + return [] # run the backtest for each point to do (Xi_t) f_val = [self.backtest_params(a) for a in Xi_t] # filter losses void_filtered = self.filter_void_losses(f_val, opt) - # add points of the current dispatch if any - if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: - Xi = [*Xi_d, *[list(v['params_dict'].values()) for v in void_filtered]] - yi = [*yi_d, *[v['loss'] for v in void_filtered]] - void = False - if is_shared: - # refresh the optimizer that stores all the points - opt = optimizers.get() - opt.tell(Xi, yi, fit=False) - else: - void = True - opt.void = void - # send back the updated optimizer only in non shared mode - # because in shared mode if all results are void we don't - # fetch it at all - if not void or not is_shared: - # don't pickle models - del opt.models[:] - optimizers.put(opt) - return self.opt_results(void, void_filtered, initial_points, results_board) + return self.opt_results(opt, void_filtered, Xi_d, yi_d, initial_points, is_shared, + results_board, optimizers) def parallel_objective(self, asked, results: Queue = None, n=0): """ objective run in single opt mode, run the backtest, store the results into a queue """ @@ -920,7 +933,7 @@ class Hyperopt: try: search_space_size = int( (factorial(n_parameters) / - (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) + (factorial(n_parameters - n_dimensions) * factorial(n_dimensions)))) except OverflowError: search_space_size = VOID_LOSS # logger.info(f'Search space size: {search_space_size}') @@ -1018,10 +1031,6 @@ class Hyperopt: def main_loop(self, jobs_scheduler): """ main parallel loop """ try: - if self.multi: - jobs_scheduler = self.run_multi_backtest_parallel - else: - jobs_scheduler = self.run_backtest_parallel with parallel_backend('loky', inner_max_num_threads=2): with Parallel(n_jobs=self.n_jobs, verbose=0, backend='loky') as parallel: jobs = parallel._effective_n_jobs() From 6b9bc7c83f48078dfb8ed250fb69f1ac15066ed9 Mon Sep 17 00:00:00 2001 From: orehunt Date: Tue, 24 Mar 2020 12:06:35 +0100 Subject: [PATCH 20/21] - reduction of pickling time by using epochs to load points - use object state just for rng and init points status, don't save models or points - other counting edge cases fixes --- freqtrade/optimize/hyperopt.py | 355 +++++++++++++++++-------- freqtrade/optimize/hyperopt_backend.py | 17 +- 2 files changed, 248 insertions(+), 124 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 547ba502d..aa0a01638 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -4,7 +4,6 @@ This module contains the hyperopt logic """ import os -import functools import locale import logging import random @@ -92,7 +91,9 @@ class Hyperopt: self.n_jobs = self.config.get('hyperopt_jobs', -1) if self.n_jobs < 0: self.n_jobs = cpu_count() // 2 or 1 - self.effort = self.config['effort'] if 'effort' in self.config else 0 + self.effort = max(0.01, + self.config['effort'] if 'effort' in self.config else 1 + ) self.total_epochs = self.config['epochs'] if 'epochs' in self.config else 0 self.max_epoch = 0 self.max_epoch_reached = False @@ -155,6 +156,8 @@ class Hyperopt: # optimizers self.opts: List[Optimizer] = [] self.opt: Optimizer = None + self.Xi: Dict = {} + self.yi: Dict = {} backend.manager = Manager() self.mode = self.config.get('mode', 'single') @@ -170,11 +173,14 @@ class Hyperopt: self.opt_base_estimator = self.estimators self.opt_acq_optimizer = 'sampling' backend.optimizers = backend.manager.Queue() - backend.results_board = backend.manager.Queue(maxsize=1) - backend.results_board.put({}) + backend.results_batch = backend.manager.Queue() else: self.multi = False - backend.results = backend.manager.Queue() + backend.results_list = backend.manager.list([]) + # this is where opt_ask_and_tell stores the results after points are + # used for fit and predict, to avoid additional pickling + self.batch_results = [] + # self.opt_base_estimator = lambda: BayesianRidge(n_iter=100, normalize=True) self.opt_acq_optimizer = 'sampling' self.opt_base_estimator = lambda: 'ET' # The GaussianProcessRegressor is heavy, which makes it not a good default @@ -262,14 +268,20 @@ class Hyperopt: n_opts = 0 if self.multi: while not backend.optimizers.empty(): - opts.append(backend.optimizers.get()) + opt = backend.optimizers.get() + opt = Hyperopt.opt_clear(opt) + opts.append(opt) n_opts = len(opts) for opt in opts: backend.optimizers.put(opt) else: + # when we clear the object for saving we have to make a copy to preserve state + opt = Hyperopt.opt_rand(self.opt, seed=False) if self.opt: n_opts = 1 - opts = [self.opt] + opts = [Hyperopt.opt_clear(self.opt)] + # (the optimizer copy function also fits a new model with the known points) + self.opt = opt logger.debug(f"Saving {n_opts} {plural(n_opts, 'optimizer')}.") dump(opts, self.opts_file) @@ -610,42 +622,41 @@ class Hyperopt: def estimators(self): return ESTIMATORS[random.randrange(0, ESTIMATORS_N)] - def get_optimizer(self, dimensions: List[Dimension], n_jobs: int, - n_initial_points: int) -> Optimizer: + def get_optimizer(self, random_state: int = None) -> Optimizer: " Construct an optimizer object " # https://github.com/scikit-learn/scikit-learn/issues/14265 # lbfgs uses joblib threading backend so n_jobs has to be reduced # to avoid oversubscription if self.opt_acq_optimizer == 'lbfgs': n_jobs = 1 + else: + n_jobs = self.n_jobs return Optimizer( - dimensions, + self.dimensions, base_estimator=self.opt_base_estimator(), acq_optimizer=self.opt_acq_optimizer, - n_initial_points=n_initial_points, + n_initial_points=self.opt_n_initial_points, acq_optimizer_kwargs={'n_jobs': n_jobs}, model_queue_size=self.n_models, - random_state=self.random_state, + random_state=random_state or self.random_state, ) def run_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, - jobs: int) -> List: + jobs: int): """ launch parallel in single opt mode, return the evaluated epochs """ - result = parallel( - delayed(wrap_non_picklable_objects(self.parallel_objective))(asked, backend.results, i) + parallel( + delayed(wrap_non_picklable_objects(self.parallel_objective)) + (asked, backend.results_list, i) for asked, i in zip(self.opt_ask_and_tell(jobs, tries), range(first_try, first_try + tries))) - return result def run_multi_backtest_parallel(self, parallel: Parallel, tries: int, first_try: int, - jobs: int) -> List: + jobs: int): """ launch parallel in multi opt mode, return the evaluated epochs""" - results = parallel( + parallel( delayed(wrap_non_picklable_objects(self.parallel_opt_objective))( - i, backend.optimizers, jobs, backend.results_board) + i, backend.optimizers, jobs, backend.results_shared, backend.results_batch) for i in range(first_try, first_try + tries)) - # each worker will return a list containing n_points, so compact into a single list - return functools.reduce(lambda x, y: [*x, *y], results, []) def opt_ask_and_tell(self, jobs: int, tries: int): """ @@ -660,34 +671,38 @@ class Hyperopt: evald: Set[Tuple] = set() opt = self.opt - def point(): - if self.ask_points: + # this is needed because when we ask None points, the optimizer doesn't return a list + if self.ask_points: + def point(): if to_ask: return tuple(to_ask.popleft()) else: to_ask.extend(opt.ask(n_points=self.ask_points, strategy=self.lie_strat())) return tuple(to_ask.popleft()) - else: + else: + def point(): return tuple(opt.ask(strategy=self.lie_strat())) for r in range(tries): fit = (len(to_ask) < 1) - while not backend.results.empty(): - vals.append(backend.results.get()) + if len(backend.results_list) > 0: + vals.extend(backend.results_list) + del backend.results_list[:] if vals: # filter losses - void_filtered = self.filter_void_losses(vals, opt) + void_filtered = Hyperopt.filter_void_losses(vals, opt) if void_filtered: # again if all are filtered - opt.tell([list(v['params_dict'].values()) for v in void_filtered], + opt.tell([Hyperopt.params_Xi(v) for v in void_filtered], [v['loss'] for v in void_filtered], fit=fit) # only fit when out of points - del vals[:], void_filtered[:] + self.batch_results.extend(void_filtered) + del vals[:], void_filtered[:] a = point() + # this usually happens at the start when trying to fit before the initial points if a in evald: logger.debug("this point was evaluated before...") - if not fit: - opt.update_next() + opt.update_next() a = point() if a in evald: break @@ -695,90 +710,111 @@ class Hyperopt: yield a @staticmethod - def opt_get_past_points(asked: dict, results_board: Queue) -> Tuple[dict, int]: + def opt_get_past_points(is_shared: bool, asked: dict, results_shared: Dict) -> Tuple[dict, int]: """ fetch shared results between optimizers """ - results = results_board.get() - results_board.put(results) + # a result is (y, counter) for a in asked: - if a in results: - asked[a] = results[a] - return asked, len(results) + if a in results_shared: + y, counter = results_shared[a] + asked[a] = y + counter -= 1 + if counter < 1: + del results_shared[a] + return asked, len(results_shared) @staticmethod - def opt_state(shared: bool, optimizers: Queue) -> Tuple[Optimizer, int]: + def opt_rand(opt: Optimizer, rand: int = None, seed: bool = True) -> Optimizer: + """ return a new instance of the optimizer with modified rng """ + if seed: + if not rand: + rand = opt.rng.randint(0, VOID_LOSS) + opt.rng.seed(rand) + opt, opt.void_loss, opt.void, opt.rs = ( + opt.copy(random_state=opt.rng), opt.void_loss, opt.void, opt.rs + ) + return opt + + @staticmethod + def opt_state(shared: bool, optimizers: Queue) -> Optimizer: """ fetch an optimizer in multi opt mode """ # get an optimizer instance opt = optimizers.get() - # this is the counter used by the optimizer internally to track the initial - # points evaluated so far.. - initial_points = opt._n_initial_points if shared: # get a random number before putting it back to avoid # replication with other workers and keep reproducibility rand = opt.rng.randint(0, VOID_LOSS) optimizers.put(opt) # switch the seed to get a different point - opt.rng.seed(rand) - opt, opt.void_loss, opt.void = opt.copy(random_state=opt.rng), opt.void_loss, opt.void - # a model is only fit after initial points - elif initial_points < 1: - opt.tell(opt.Xi, opt.yi) - # we have to get a new point anyway - else: - opt.update_next() - return opt, initial_points + opt = Hyperopt.opt_rand(opt, rand) + return opt @staticmethod - def opt_results(opt: Optimizer, void_filtered: list, - Xi_d: list, yi_d: list, initial_points: int, is_shared: bool, - results_board: Queue, optimizers: Queue) -> list: + def opt_clear(opt: Optimizer): + """ clear state from an optimizer object """ + del opt.models[:], opt.Xi[:], opt.yi[:] + return opt + + @staticmethod + def opt_results(opt: Optimizer, void_filtered: list, jobs: int, is_shared: bool, + results_shared: Dict, results_batch: Queue, optimizers: Queue): """ update the board used to skip already computed points, set the initial point status """ # add points of the current dispatch if any if opt.void_loss != VOID_LOSS or len(void_filtered) > 0: - Xi = [*Xi_d, *[list(v['params_dict'].values()) for v in void_filtered]] - yi = [*yi_d, *[v['loss'] for v in void_filtered]] - if is_shared: - # refresh the optimizer that stores all the points - opt = optimizers.get() - opt.tell(Xi, yi, fit=False) - opt.void = False + void = False else: - opt.void = True + void = True # send back the updated optimizer only in non shared mode - # because in shared mode if all results are void we don't - # fetch it at all - if not opt.void or not is_shared: - # don't pickle models - del opt.models[:] + if not is_shared: + opt = Hyperopt.opt_clear(opt) + # is not a replica in shared mode optimizers.put(opt) # NOTE: some results at the beginning won't be published - # because they are removed by the filter_void_losses - if not opt.void: - results = results_board.get() - for v in void_filtered: - a = tuple(v['params_dict'].values()) - if a not in results: - results[a] = v['loss'] - results_board.put(results) - # set initial point flag + # because they are removed by filter_void_losses + rs = opt.rs + if not void: + # the tuple keys are used to avoid computation of done points by any optimizer + results_shared.update({tuple(Hyperopt.params_Xi(v)): (v["loss"], jobs - 1) + for v in void_filtered}) + # in multi opt mode (non shared) also track results for each optimizer (using rs as ID) + # this keys should be cleared after each batch + Xi, yi = results_shared[rs] + Xi = Xi + tuple((Hyperopt.params_Xi(v)) for v in void_filtered) + yi = yi + tuple(v["loss"] for v in void_filtered) + results_shared[rs] = (Xi, yi) + # this is the counter used by the optimizer internally to track the initial + # points evaluated so far.. + initial_points = opt._n_initial_points + # set initial point flag and optimizer random state for n, v in enumerate(void_filtered): v['is_initial_point'] = initial_points - n > 0 - return void_filtered + v['random_state'] = rs + results_batch.put(void_filtered) - def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, results_board: Queue): + def parallel_opt_objective(self, n: int, optimizers: Queue, jobs: int, + results_shared: Dict, results_batch: Queue): """ objective run in multi opt mode, optimizers share the results as soon as they are completed """ self.log_results_immediate(n) is_shared = self.shared - opt, initial_points = self.opt_state(is_shared, optimizers) + opt = self.opt_state(is_shared, optimizers) sss = self.search_space_size asked: Dict[Tuple, Any] = {tuple([]): None} asked_d: Dict[Tuple, Any] = {} + # fit a model with the known points, (the optimizer has no points here since + # it was just fetched from the queue) + rs = opt.rs + Xi, yi = self.Xi[rs], self.yi[rs] + # add the points discovered within this batch + bXi, byi = results_shared[rs] + Xi.extend(list(bXi)) + yi.extend(list(byi)) + if Xi: + opt.tell(Xi, yi) told = 0 # told Xi_d = [] # done yi_d = [] @@ -794,7 +830,7 @@ class Hyperopt: else: asked = {tuple(a): None for a in asked} # check if some points have been evaluated by other optimizers - p_asked, _ = self.opt_get_past_points(asked, results_board) + p_asked, _ = Hyperopt.opt_get_past_points(is_shared, asked, results_shared) for a in p_asked: if p_asked[a] is not None: if a not in Xi_d: @@ -802,51 +838,55 @@ class Hyperopt: yi_d.append(p_asked[a]) else: Xi_t.append(a) + # no points to do? if len(Xi_t) < self.n_points: len_Xi_d = len(Xi_d) - if len_Xi_d > told: # tell new points + # did other workers backtest some points? + if len_Xi_d > told: + # if yes fit a new model with the new points opt.tell(Xi_d[told:], yi_d[told:]) told = len_Xi_d - else: - opt.update_next() + else: # or get new points from a different random state + opt = Hyperopt.opt_rand(opt) else: break # return early if there is nothing to backtest if len(Xi_t) < 1: - if not is_shared: - opt.void = -1 - del opt.models[:] - optimizers.put(opt) + if is_shared: + opt = optimizers.get() + opt.void = -1 + opt = Hyperopt.opt_clear(opt) + optimizers.put(opt) return [] # run the backtest for each point to do (Xi_t) - f_val = [self.backtest_params(a) for a in Xi_t] + results = [self.backtest_params(a) for a in Xi_t] # filter losses - void_filtered = self.filter_void_losses(f_val, opt) + void_filtered = Hyperopt.filter_void_losses(results, opt) - return self.opt_results(opt, void_filtered, Xi_d, yi_d, initial_points, is_shared, - results_board, optimizers) + Hyperopt.opt_results(opt, void_filtered, jobs, is_shared, + results_shared, results_batch, optimizers) - def parallel_objective(self, asked, results: Queue = None, n=0): + def parallel_objective(self, asked, results_list: List = [], n=0): """ objective run in single opt mode, run the backtest, store the results into a queue """ self.log_results_immediate(n) v = self.backtest_params(asked) - if results: - results.put(v) + v['is_initial_point'] = n < self.opt_n_initial_points - return v + v['random_state'] = self.random_state + results_list.append(v) def log_results_immediate(self, n) -> None: """ Signals that a new job has been scheduled""" print('.', end='') sys.stdout.flush() - def log_results(self, f_val, frame_start, total_epochs: int) -> int: + def log_results(self, batch_results, frame_start, total_epochs: int) -> int: """ Log results if it is better than any previous evaluation """ current = frame_start + 1 i = 0 - for i, v in enumerate(f_val, 1): + for i, v in enumerate(batch_results, 1): is_best = self.is_best_loss(v, self.current_best_loss) current = frame_start + i v['is_best'] = is_best @@ -857,8 +897,13 @@ class Hyperopt: self.update_max_epoch(v, current) self.print_results(v) self.trials.append(v) - # Save results and optimizersafter every batch + # Save results and optimizers after every batch self.save_trials() + # track new points if in multi mode + if self.multi: + self.track_points(trials=self.trials[frame_start:]) + # clear points used by optimizers intra batch + backend.results_shared.update(self.opt_empty_tuple()) # give up if no best since max epochs if current + 1 > self.epochs_limit(): self.max_epoch_reached = True @@ -953,8 +998,8 @@ class Hyperopt: # never waste n_initial_points = min(log_sss, search_space_size // 3) # it shall run for this much, I say - min_epochs = int(max(n_initial_points, opt_points) * (1 + effort) + n_initial_points) - return n_initial_points or 1, min_epochs, search_space_size + min_epochs = int(max(n_initial_points, opt_points) + 2 * n_initial_points) + return int(n_initial_points * effort) or 1, int(min_epochs * effort), search_space_size def update_max_epoch(self, val: Dict, current: int): """ calculate max epochs: store the number of non best epochs @@ -966,49 +1011,108 @@ class Hyperopt: self.current_best_epoch = current self.max_epoch = int( (self.current_best_epoch + self.avg_best_occurrence + self.min_epochs) * - (1 + self.effort)) + max(1, self.effort)) if self.max_epoch > self.search_space_size: self.max_epoch = self.search_space_size logger.debug(f'\nMax epoch set to: {self.epochs_limit()}') + @staticmethod + def params_Xi(v: dict): + return list(v["params_dict"].values()) + + def track_points(self, trials: List = None): + """ + keep tracking of the evaluated points per optimizer random state + """ + # if no trials are given, use saved trials + if not trials: + if len(self.trials) > 0: + if self.config.get('hyperopt_continue_filtered', False): + trials = filter_trials(self.trials, self.config) + else: + trials = self.trials + else: + return + for v in trials: + rs = v["random_state"] + try: + self.Xi[rs].append(Hyperopt.params_Xi(v)) + self.yi[rs].append(v["loss"]) + except IndexError: # Hyperopt was started with different random_state or number of jobs + pass + def setup_optimizers(self): """ Setup the optimizers objects, try to load from disk, or create new ones """ # try to load previous optimizers opts = self.load_previous_optimizers(self.opts_file) n_opts = len(opts) - max_opts = self.n_jobs if self.multi: + max_opts = self.n_jobs + rngs = [] # when sharing results there is only one optimizer that gets copied if self.shared: max_opts = 1 # put the restored optimizers in the queue - if n_opts > 0: + # only if they match the current number of jobs + if n_opts == max_opts: for n in range(n_opts): - backend.optimizers.put(opts[n]) + rngs.append(opts[n].rs) + # make sure to not store points and models in the optimizer + backend.optimizers.put(Hyperopt.opt_clear(opts[n])) # generate as many optimizers as are still needed to fill the job count remaining = max_opts - backend.optimizers.qsize() if remaining > 0: - opt = self.get_optimizer(self.dimensions, self.n_jobs, self.opt_n_initial_points) + opt = self.get_optimizer() + rngs = [] for _ in range(remaining): # generate optimizers # random state is preserved - opt_copy = opt.copy(random_state=opt.rng.randint(0, - iinfo(int32).max)) + rs = opt.rng.randint(0, iinfo(int32).max) + opt_copy = opt.copy(random_state=rs) opt_copy.void_loss = VOID_LOSS opt_copy.void = False + opt_copy.rs = rs + rngs.append(rs) backend.optimizers.put(opt_copy) del opt, opt_copy + # reconstruct observed points from epochs + # in shared mode each worker will remove the results once all the workers + # have read it (counter < 1) + counter = self.n_jobs + + def empty_dict(): + return {rs: [] for rs in rngs} + self.opt_empty_tuple = lambda: {rs: ((), ()) for rs in rngs} + self.Xi.update(empty_dict()) + self.yi.update(empty_dict()) + self.track_points() + # this is needed to keep track of results discovered within the same batch + # by each optimizer, use tuples! as the SyncManager doesn't handle nested dicts + Xi, yi = self.Xi, self.yi + results = {tuple(X): [yi[r][n], counter] for r in Xi for n, X in enumerate(Xi[r])} + results.update(self.opt_empty_tuple()) + backend.results_shared = backend.manager.dict(results) else: # if we have more than 1 optimizer but are using single opt, # pick one discard the rest... if n_opts > 0: self.opt = opts[-1] else: - self.opt = self.get_optimizer( - self.dimensions, self.n_jobs, self.opt_n_initial_points - ) + self.opt = self.get_optimizer() self.opt.void_loss = VOID_LOSS self.opt.void = False + self.opt.rs = self.random_state + # in single mode restore the points directly to the optimizer + # but delete first in case we have filtered the starting list of points + self.opt = Hyperopt.opt_clear(self.opt) + rs = self.random_state + self.Xi[rs] = [] + self.track_points() + if len(self.Xi[rs]) > 0: + self.opt.tell(self.Xi[rs], self.yi[rs], fit=False) + # delete points since in single mode the optimizer state sits in the main + # process and is not discarded + self.Xi, self.yi = {}, {} del opts[:] def setup_points(self): @@ -1028,6 +1132,20 @@ class Hyperopt: # initialize average best occurrence self.avg_best_occurrence = self.min_epochs // self.n_jobs + def return_results(self): + """ + results are passed by queue in multi mode, or stored by ask_and_tell in single mode + """ + batch_results = [] + if self.multi: + while not backend.results_batch.empty(): + worker_results = backend.results_batch.get() + batch_results.extend(worker_results) + else: + batch_results.extend(self.batch_results) + del self.batch_results[:] + return batch_results + def main_loop(self, jobs_scheduler): """ main parallel loop """ try: @@ -1036,7 +1154,7 @@ class Hyperopt: jobs = parallel._effective_n_jobs() logger.info(f'Effective number of parallel workers used: {jobs}') # update epochs count - n_points = self.n_points + opt_points = self.opt_points prev_batch = -1 epochs_so_far = len(self.trials) epochs_limit = self.epochs_limit @@ -1044,7 +1162,7 @@ class Hyperopt: columns -= 1 while epochs_so_far > prev_batch or epochs_so_far < self.min_epochs: prev_batch = epochs_so_far - occurrence = int(self.avg_best_occurrence * (1 + self.effort)) + occurrence = int(self.avg_best_occurrence * max(1, self.effort)) # pad the batch length to the number of jobs to avoid desaturation batch_len = (occurrence + jobs - occurrence % jobs) @@ -1052,22 +1170,23 @@ class Hyperopt: # n_points (epochs) in 1 dispatch but this reduces the batch len too much # if self.multi: batch_len = batch_len // self.n_points # don't go over the limit - if epochs_so_far + batch_len * n_points >= epochs_limit(): - q, r = divmod(epochs_limit() - epochs_so_far, n_points) + if epochs_so_far + batch_len * opt_points >= epochs_limit(): + q, r = divmod(epochs_limit() - epochs_so_far, opt_points) batch_len = q + r print( - f"{epochs_so_far+1}-{epochs_so_far+batch_len*n_points}" + f"{epochs_so_far+1}-{epochs_so_far+batch_len}" f"/{epochs_limit()}: ", end='') - f_val = jobs_scheduler(parallel, batch_len, epochs_so_far, jobs) + jobs_scheduler(parallel, batch_len, epochs_so_far, jobs) + batch_results = self.return_results() print(end='\r') - saved = self.log_results(f_val, epochs_so_far, epochs_limit()) + saved = self.log_results(batch_results, epochs_so_far, epochs_limit()) print('\r', ' ' * columns, end='\r') # stop if no epochs have been evaluated - if len(f_val) < batch_len: + if len(batch_results) < batch_len: logger.warning("Some evaluated epochs were void, " "check the loss function and the search space.") - if (not saved and len(f_val) > 1) or batch_len < 1 or \ + if (not saved and len(batch_results) > 1) or batch_len < 1 or \ (not saved and self.search_space_size < batch_len + epochs_limit()): break # log_results add diff --git a/freqtrade/optimize/hyperopt_backend.py b/freqtrade/optimize/hyperopt_backend.py index 7357fb4ee..1416f358d 100644 --- a/freqtrade/optimize/hyperopt_backend.py +++ b/freqtrade/optimize/hyperopt_backend.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Dict, List, Tuple from queue import Queue from multiprocessing.managers import SyncManager @@ -6,8 +6,13 @@ hyperopt: Any = None manager: SyncManager # stores the optimizers in multi opt mode optimizers: Queue -# stores a list of the results to share between optimizers -# in the form of dict[tuple(Xi)] = yi -results_board: Queue -# store the results in single opt mode -results: Queue +# stores the results to share between optimizers +# in the form of key = Tuple[Xi], value = Tuple[float, int] +# where float is the loss and int is a decreasing counter of optimizers +# that have registered the result +results_shared: Dict[Tuple, Tuple] +# in single mode the results_list is used to pass the results to the optimizer +# to fit new models +results_list: List +# results_batch stores keeps results per batch that are eventually logged and stored +results_batch: Queue From 86f10b3c8e27536734ddc38ff975751760eecf2b Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 19 Jul 2020 20:33:18 +0200 Subject: [PATCH 21/21] Fix some tests --- freqtrade/optimize/hyperopt.py | 3 ++- tests/optimize/test_hyperopt.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 1bb2fa3e5..7d7128f09 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -1139,7 +1139,8 @@ class Hyperopt: if not trials: if len(self.trials) > 0: if self.config.get('hyperopt_continue_filtered', False): - trials = filter_trials(self.trials, self.config) + raise ValueError() + # trials = filter_trials(self.trials, self.config) else: trials = self.trials else: diff --git a/tests/optimize/test_hyperopt.py b/tests/optimize/test_hyperopt.py index f7af75424..636e9346b 100644 --- a/tests/optimize/test_hyperopt.py +++ b/tests/optimize/test_hyperopt.py @@ -482,6 +482,7 @@ def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: def test_save_results_saves_epochs(mocker, hyperopt, testdatadir, caplog) -> None: epochs = create_results(mocker, hyperopt, testdatadir) mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None) + mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.save_opts') results_file = testdatadir / 'optimize' / 'ut_results.pickle' caplog.set_level(logging.DEBUG) @@ -808,7 +809,7 @@ def test_clean_hyperopt(mocker, default_conf, caplog): # once for tickerdata, once for trials, once for optimizers (list) assert unlinkmock.call_count == 3 - assert log_has(f"Removing `{h.tickerdata_pickle}`.", caplog) + assert log_has(f"Removing `{h.data_pickle_file}`.", caplog) def test_continue_hyperopt(mocker, default_conf, caplog):