Add hyperopt-list and hyperopt-show commands

This commit is contained in:
hroff-1902 2019-11-26 15:01:42 +03:00
parent cab748588c
commit 8e7512161a
5 changed files with 353 additions and 131 deletions

View File

@ -49,8 +49,14 @@ ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url", ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
"trade_source", "ticker_interval"] "trade_source", "ticker_interval"]
ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable", "print_colorized",
"print_json", "hyperopt_list_no_details"]
ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index",
"print_json", "hyperopt_show_no_header"]
NO_CONF_REQURIED = ["download-data", "list-timeframes", "list-markets", "list-pairs", NO_CONF_REQURIED = ["download-data", "list-timeframes", "list-markets", "list-pairs",
"plot-dataframe", "plot-profit"] "hyperopt_list", "hyperopt_show", "plot-dataframe", "plot-profit"]
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges"] NO_CONF_ALLOWED = ["create-userdir", "list-exchanges"]
@ -116,6 +122,7 @@ class Arguments:
from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge
from freqtrade.utils import (start_create_userdir, start_download_data, from freqtrade.utils import (start_create_userdir, start_download_data,
start_hyperopt_list, start_hyperopt_show,
start_list_exchanges, start_list_markets, start_list_exchanges, start_list_markets,
start_list_timeframes, start_trading) start_list_timeframes, start_trading)
from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit
@ -220,3 +227,21 @@ class Arguments:
) )
plot_profit_cmd.set_defaults(func=start_plot_profit) plot_profit_cmd.set_defaults(func=start_plot_profit)
self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd) self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)
# Add hyperopt-list subcommand
hyperopt_list_cmd = subparsers.add_parser(
'hyperopt-list',
help='List Hyperopt results',
parents=[_common_parser],
)
hyperopt_list_cmd.set_defaults(func=start_hyperopt_list)
self._build_args(optionlist=ARGS_HYPEROPT_LIST, parser=hyperopt_list_cmd)
# Add hyperopt-show subcommand
hyperopt_show_cmd = subparsers.add_parser(
'hyperopt-show',
help='Show details of Hyperopt results',
parents=[_common_parser],
)
hyperopt_show_cmd.set_defaults(func=start_hyperopt_show)
self._build_args(optionlist=ARGS_HYPEROPT_SHOW, parser=hyperopt_show_cmd)

View File

@ -18,6 +18,18 @@ def check_int_positive(value: str) -> int:
return uint return uint
def check_int_nonzero(value: str) -> int:
try:
uint = int(value)
if uint == 0:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
f"{value} is invalid for this parameter, should be a non-zero integer value"
)
return uint
class Arg: class Arg:
# Optional CLI arguments # Optional CLI arguments
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
@ -364,4 +376,31 @@ AVAILABLE_CLI_OPTIONS = {
choices=["DB", "file"], choices=["DB", "file"],
default="file", default="file",
), ),
# hyperopt-list, hyperopt-show
"hyperopt_list_profitable": Arg(
'--profitable',
help='Select only profitable epochs.',
action='store_true',
),
"hyperopt_list_best": Arg(
'--best',
help='Select only best epochs.',
action='store_true',
),
"hyperopt_list_no_details": Arg(
'--no-details',
help='Do not print best epoch details.',
action='store_true',
),
"hyperopt_show_index": Arg(
'-n', '--index',
help='Specify the index of the epoch to print details for.',
type=check_int_nonzero,
metavar='INT',
),
"hyperopt_show_no_header": Arg(
'--no-header',
help='Do not print epoch details header.',
action='store_true',
),
} }

View File

@ -308,6 +308,21 @@ class Configuration:
self._args_to_config(config, argname='hyperopt_loss', self._args_to_config(config, argname='hyperopt_loss',
logstring='Using Hyperopt loss class name: {}') logstring='Using Hyperopt loss class name: {}')
self._args_to_config(config, argname='hyperopt_show_index',
logstring='Parameter -n/--index detected: {}')
self._args_to_config(config, argname='hyperopt_list_best',
logstring='Parameter --best detected: {}')
self._args_to_config(config, argname='hyperopt_list_profitable',
logstring='Parameter --profitable detected: {}')
self._args_to_config(config, argname='hyperopt_list_no_details',
logstring='Parameter --no-details detected: {}')
self._args_to_config(config, argname='hyperopt_show_no_header',
logstring='Parameter --no-header detected: {}')
def _process_plot_options(self, config: Dict[str, Any]) -> None: def _process_plot_options(self, config: Dict[str, Any]) -> None:
self._args_to_config(config, argname='pairs', self._args_to_config(config, argname='pairs',

View File

@ -74,11 +74,11 @@ class Hyperopt:
else: else:
logger.info("Continuing on previous hyperopt results.") logger.info("Continuing on previous hyperopt results.")
self.num_trials_saved = 0
# Previous evaluations # Previous evaluations
self.trials: List = [] self.trials: List = []
self.num_trials_saved = 0
# Populate functions here (hasattr is slow so should not be run during "regular" operations) # Populate functions here (hasattr is slow so should not be run during "regular" operations)
if hasattr(self.custom_hyperopt, 'populate_indicators'): if hasattr(self.custom_hyperopt, 'populate_indicators'):
self.backtesting.strategy.advise_indicators = \ self.backtesting.strategy.advise_indicators = \
@ -104,6 +104,10 @@ class Hyperopt:
self.config['ask_strategy'] = {} self.config['ask_strategy'] = {}
self.config['ask_strategy']['use_sell_signal'] = True self.config['ask_strategy']['use_sell_signal'] = True
self.print_all = self.config.get('print_all', False)
self.print_colorized = self.config.get('print_colorized', False)
self.print_json = self.config.get('print_json', False)
@staticmethod @staticmethod
def get_lock_filename(config) -> str: def get_lock_filename(config) -> str:
@ -119,20 +123,18 @@ class Hyperopt:
logger.info(f"Removing `{p}`.") logger.info(f"Removing `{p}`.")
p.unlink() p.unlink()
def get_args(self, params): def _get_params_dict(self, raw_params: List[Any]) -> Dict:
dimensions = self.dimensions dimensions: List[Dimension] = self.dimensions
# Ensure the number of dimensions match # Ensure the number of dimensions match
# the number of parameters in the list x. # the number of parameters in the list.
if len(params) != len(dimensions): if len(raw_params) != len(dimensions):
raise ValueError('Mismatch in number of search-space dimensions. ' raise ValueError('Mismatch in number of search-space dimensions.')
f'len(dimensions)=={len(dimensions)} and len(x)=={len(params)}')
# Create a dict where the keys are the names of the dimensions # Return a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters x. # and the values are taken from the list of parameters.
arg_dict = {dim.name: value for dim, value in zip(dimensions, params)} return {d.name: v for d, v in zip(dimensions, raw_params)}
return arg_dict
def save_trials(self, final: bool = False) -> None: def save_trials(self, final: bool = False) -> None:
""" """
@ -147,106 +149,126 @@ class Hyperopt:
logger.info(f"{num_trials} {plural(num_trials, 'epoch')} " logger.info(f"{num_trials} {plural(num_trials, 'epoch')} "
f"saved to '{self.trials_file}'.") f"saved to '{self.trials_file}'.")
def read_trials(self) -> List: @staticmethod
def _read_trials(trials_file) -> List:
""" """
Read hyperopt trials file Read hyperopt trials file
""" """
logger.info("Reading Trials from '%s'", self.trials_file) logger.info("Reading Trials from '%s'", trials_file)
trials = load(self.trials_file) trials = load(trials_file)
self.trials_file.unlink()
return trials return trials
def log_trials_result(self) -> None: def _get_params_details(self, params: Dict) -> Dict:
""" """
Display Best hyperopt result Return the params for each space
""" """
# This is printed when Ctrl+C is pressed quickly, before first epochs have result: Dict = {}
# a chance to be evaluated.
if not self.trials:
print("No epochs evaluated yet, no best result.")
return
results = sorted(self.trials, key=itemgetter('loss')) if self.has_space('buy'):
best_result = results[0] result['buy'] = {p.name: params.get(p.name) for p in self.hyperopt_space('buy')}
params = best_result['params'] if self.has_space('sell'):
log_str = self.format_results_logstring(best_result) result['sell'] = {p.name: params.get(p.name) for p in self.hyperopt_space('sell')}
print(f"\nBest result:\n\n{log_str}\n") if self.has_space('roi'):
result['roi'] = self.custom_hyperopt.generate_roi_table(params)
if self.has_space('stoploss'):
result['stoploss'] = params.get('stoploss')
if self.config.get('print_json'): return result
@staticmethod # noqa: C901
def print_epoch_details(results, total_epochs, print_json: bool,
no_header: bool = False, header_str: str = None) -> None:
"""
Display details of the hyperopt result
"""
params = results['params_details']
# Default header string
if header_str is None:
header_str = "Best result"
if not no_header:
explanation_str = Hyperopt._format_explanation_string(results, total_epochs)
print(f"\n{header_str}:\n\n{explanation_str}\n")
if print_json:
result_dict: Dict = {} result_dict: Dict = {}
if self.has_space('buy') or self.has_space('sell'): result_params_dict: Dict = {}
result_dict['params'] = {} if 'buy' in params:
if self.has_space('buy'): result_params_dict.update(params['buy'])
result_dict['params'].update({p.name: params.get(p.name) if 'sell' in params:
for p in self.hyperopt_space('buy')}) result_params_dict.update(params['sell'])
if self.has_space('sell'): if result_params_dict:
result_dict['params'].update({p.name: params.get(p.name) result_dict['params'] = result_params_dict
for p in self.hyperopt_space('sell')}) if 'roi' in params:
if self.has_space('roi'):
# Convert keys in min_roi dict to strings because # Convert keys in min_roi dict to strings because
# rapidjson cannot dump dicts with integer keys... # rapidjson cannot dump dicts with integer keys...
# OrderedDict is used to keep the numeric order of the items # OrderedDict is used to keep the numeric order of the items
# in the dict. # in the dict.
result_dict['minimal_roi'] = OrderedDict( result_dict['minimal_roi'] = OrderedDict(
(str(k), v) for k, v in self.custom_hyperopt.generate_roi_table(params).items() (str(k), v) for k, v in params['roi'].items()
) )
if self.has_space('stoploss'): if 'stoploss' in params:
result_dict['stoploss'] = params.get('stoploss') result_dict['stoploss'] = params['stoploss']
print(rapidjson.dumps(result_dict, default=str, number_mode=rapidjson.NM_NATIVE)) print(rapidjson.dumps(result_dict, default=str, number_mode=rapidjson.NM_NATIVE))
else: else:
if self.has_space('buy'): if 'buy' in params:
print('Buy hyperspace params:') print('Buy hyperspace params:')
pprint({p.name: params.get(p.name) for p in self.hyperopt_space('buy')}, pprint(params['buy'], indent=4)
indent=4) if 'sell' in params:
if self.has_space('sell'):
print('Sell hyperspace params:') print('Sell hyperspace params:')
pprint({p.name: params.get(p.name) for p in self.hyperopt_space('sell')}, pprint(params['sell'], indent=4)
indent=4) if 'roi' in params:
if self.has_space('roi'):
print("ROI table:") print("ROI table:")
# Round printed values to 5 digits after the decimal point # Round printed values to 5 digits after the decimal point
pprint(round_dict(self.custom_hyperopt.generate_roi_table(params), 5), indent=4) pprint(round_dict(params['roi'], 5), indent=4)
if self.has_space('stoploss'): if 'stoploss' in params:
# Also round to 5 digits after the decimal point # Also round to 5 digits after the decimal point
print(f"Stoploss: {round(params.get('stoploss'), 5)}") print(f"Stoploss: {round(params['stoploss'], 5)}")
def is_best(self, results) -> bool: @staticmethod
return results['loss'] < self.current_best_loss def is_best_loss(results, current_best_loss) -> bool:
return results['loss'] < current_best_loss
def log_results(self, results) -> None: def print_results(self, results) -> None:
""" """
Log results if it is better than any previous evaluation Log results if it is better than any previous evaluation
""" """
print_all = self.config.get('print_all', False) is_best = results['is_best']
is_best_loss = self.is_best(results) if not self.print_all:
# Print '\n' after each 100th epoch to separate dots from the log messages.
if not print_all: # Otherwise output is messy on a terminal.
print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore
sys.stdout.flush() sys.stdout.flush()
if print_all or is_best_loss: if self.print_all or is_best:
if is_best_loss: if not self.print_all:
self.current_best_loss = results['loss'] # Separate the results explanation string from dots
log_str = self.format_results_logstring(results) print("\n")
# Colorize output self.print_results_explanation(results, self.total_epochs, self.print_all,
if self.config.get('print_colorized', False): self.print_colorized)
if results['total_profit'] > 0:
log_str = Fore.GREEN + log_str
if print_all and is_best_loss:
log_str = Style.BRIGHT + log_str
if print_all:
print(log_str)
else:
print(f'\n{log_str}')
def format_results_logstring(self, results) -> str: @staticmethod
current = results['current_epoch'] def print_results_explanation(results, total_epochs, highlight_best: bool,
total = self.total_epochs print_colorized: bool) -> None:
res = results['results_explanation'] """
loss = results['loss'] Log results explanation string
log_str = f'{current:5d}/{total}: {res} Objective: {loss:.5f}' """
log_str = f'*{log_str}' if results['is_initial_point'] else f' {log_str}' explanation_str = Hyperopt._format_explanation_string(results, total_epochs)
return log_str # Colorize output
if print_colorized:
if results['total_profit'] > 0:
explanation_str = Fore.GREEN + explanation_str
if highlight_best and results['is_best']:
explanation_str = Style.BRIGHT + explanation_str
print(explanation_str)
@staticmethod
def _format_explanation_string(results, total_epochs) -> str:
return (("*" if results['is_initial_point'] else " ") +
f"{results['current_epoch']:5d}/{total_epochs}: " +
f"{results['results_explanation']} " +
f"Objective: {results['loss']:.5f}")
def has_space(self, space: str) -> bool: def has_space(self, space: str) -> bool:
""" """
@ -276,33 +298,34 @@ class Hyperopt:
spaces += self.custom_hyperopt.stoploss_space() spaces += self.custom_hyperopt.stoploss_space()
return spaces return spaces
def generate_optimizer(self, _params: Dict, iteration=None) -> Dict: def generate_optimizer(self, raw_params: List[Any], iteration=None) -> Dict:
""" """
Used Optimize function. Called once per epoch to optimize whatever is configured. Used Optimize function. Called once per epoch to optimize whatever is configured.
Keep this function as optimized as possible! Keep this function as optimized as possible!
""" """
params = self.get_args(_params) params_dict = self._get_params_dict(raw_params)
params_details = self._get_params_details(params_dict)
if self.has_space('roi'): if self.has_space('roi'):
self.backtesting.strategy.minimal_roi = \ self.backtesting.strategy.minimal_roi = \
self.custom_hyperopt.generate_roi_table(params) self.custom_hyperopt.generate_roi_table(params_dict)
if self.has_space('buy'): if self.has_space('buy'):
self.backtesting.strategy.advise_buy = \ self.backtesting.strategy.advise_buy = \
self.custom_hyperopt.buy_strategy_generator(params) self.custom_hyperopt.buy_strategy_generator(params_dict)
if self.has_space('sell'): if self.has_space('sell'):
self.backtesting.strategy.advise_sell = \ self.backtesting.strategy.advise_sell = \
self.custom_hyperopt.sell_strategy_generator(params) self.custom_hyperopt.sell_strategy_generator(params_dict)
if self.has_space('stoploss'): if self.has_space('stoploss'):
self.backtesting.strategy.stoploss = params['stoploss'] self.backtesting.strategy.stoploss = params_dict['stoploss']
processed = load(self.tickerdata_pickle) processed = load(self.tickerdata_pickle)
min_date, max_date = get_timeframe(processed) min_date, max_date = get_timeframe(processed)
results = self.backtesting.backtest( backtesting_results = self.backtesting.backtest(
{ {
'stake_amount': self.config['stake_amount'], 'stake_amount': self.config['stake_amount'],
'processed': processed, 'processed': processed,
@ -312,58 +335,58 @@ class Hyperopt:
'end_date': max_date, 'end_date': max_date,
} }
) )
results_explanation = self.format_results(results) results_metrics = self._calculate_results_metrics(backtesting_results)
results_explanation = self._format_results_explanation_string(results_metrics)
trade_count = len(results.index) trade_count = results_metrics['trade_count']
total_profit = results.profit_abs.sum() total_profit = results_metrics['total_profit']
# If this evaluation contains too short amount of trades to be # If this evaluation contains too short amount of trades to be
# interesting -- consider it as 'bad' (assigned max. loss value) # interesting -- consider it as 'bad' (assigned max. loss value)
# in order to cast this hyperspace point away from optimization # in order to cast this hyperspace point away from optimization
# path. We do not want to optimize 'hodl' strategies. # path. We do not want to optimize 'hodl' strategies.
if trade_count < self.config['hyperopt_min_trades']: loss: float = MAX_LOSS
return { if trade_count >= self.config['hyperopt_min_trades']:
'loss': MAX_LOSS, loss = self.calculate_loss(results=backtesting_results, trade_count=trade_count,
'params': params, min_date=min_date.datetime, max_date=max_date.datetime)
'results_explanation': results_explanation,
'total_profit': total_profit,
}
loss = self.calculate_loss(results=results, trade_count=trade_count,
min_date=min_date.datetime, max_date=max_date.datetime)
return { return {
'loss': loss, 'loss': loss,
'params': params, 'params_dict': params_dict,
'params_details': params_details,
'results_metrics': results_metrics,
'results_explanation': results_explanation, 'results_explanation': results_explanation,
'total_profit': total_profit, 'total_profit': total_profit,
} }
def format_results(self, results: DataFrame) -> str: def _calculate_results_metrics(self, backtesting_results: DataFrame) -> Dict:
return {
'trade_count': len(backtesting_results.index),
'avg_profit': backtesting_results.profit_percent.mean() * 100.0,
'total_profit': backtesting_results.profit_abs.sum(),
'profit': backtesting_results.profit_percent.sum() * 100.0,
'duration': backtesting_results.trade_duration.mean(),
}
def _format_results_explanation_string(self, results_metrics: Dict) -> str:
""" """
Return the formatted results explanation in a string Return the formatted results explanation in a string
""" """
trades = len(results.index)
avg_profit = results.profit_percent.mean() * 100.0
total_profit = results.profit_abs.sum()
stake_cur = self.config['stake_currency'] stake_cur = self.config['stake_currency']
profit = results.profit_percent.sum() * 100.0 return (f"{results_metrics['trade_count']:6d} trades. "
duration = results.trade_duration.mean() f"Avg profit {results_metrics['avg_profit']: 6.2f}%. "
f"Total profit {results_metrics['total_profit']: 11.8f} {stake_cur} "
return (f'{trades:6d} trades. Avg profit {avg_profit: 5.2f}%. ' f"({results_metrics['profit']: 7.2f}\N{GREEK CAPITAL LETTER SIGMA}%). "
f'Total profit {total_profit: 11.8f} {stake_cur} ' f"Avg duration {results_metrics['duration']:5.1f} mins."
f'({profit: 7.2f}\N{GREEK CAPITAL LETTER SIGMA}%). '
f'Avg duration {duration:5.1f} mins.'
).encode(locale.getpreferredencoding(), 'replace').decode('utf-8') ).encode(locale.getpreferredencoding(), 'replace').decode('utf-8')
def get_optimizer(self, dimensions, cpu_count) -> Optimizer: def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer:
return Optimizer( return Optimizer(
dimensions, dimensions,
base_estimator="ET", base_estimator="ET",
acq_optimizer="auto", acq_optimizer="auto",
n_initial_points=INITIAL_POINTS, n_initial_points=INITIAL_POINTS,
acq_optimizer_kwargs={'n_jobs': cpu_count}, acq_optimizer_kwargs={'n_jobs': cpu_count},
random_state=self.config.get('hyperopt_random_state', None) random_state=self.config.get('hyperopt_random_state', None),
) )
def fix_optimizer_models_list(self): def fix_optimizer_models_list(self):
@ -387,14 +410,16 @@ class Hyperopt:
return parallel(delayed( return parallel(delayed(
wrap_non_picklable_objects(self.generate_optimizer))(v, i) for v in asked) wrap_non_picklable_objects(self.generate_optimizer))(v, i) for v in asked)
def load_previous_results(self): @staticmethod
""" read trials file if we have one """ def load_previous_results(trials_file) -> List:
if self.trials_file.is_file() and self.trials_file.stat().st_size > 0: """
self.trials = self.read_trials() Load data for epochs from the file if we have one
logger.info( """
'Loaded %d previous evaluations from disk.', trials: List = []
len(self.trials) if trials_file.is_file() and trials_file.stat().st_size > 0:
) trials = Hyperopt._read_trials(trials_file)
logger.info(f"Loaded {len(trials)} previous evaluations from disk.")
return trials
def start(self) -> None: def start(self) -> None:
data, timerange = self.backtesting.load_bt_data() data, timerange = self.backtesting.load_bt_data()
@ -415,17 +440,17 @@ class Hyperopt:
# We don't need exchange instance anymore while running hyperopt # We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange = None # type: ignore self.backtesting.exchange = None # type: ignore
self.load_previous_results() self.trials = self.load_previous_results(self.trials_file)
cpus = cpu_count() cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!") logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
config_jobs = self.config.get('hyperopt_jobs', -1) config_jobs = self.config.get('hyperopt_jobs', -1)
logger.info(f'Number of parallel jobs set as: {config_jobs}') logger.info(f'Number of parallel jobs set as: {config_jobs}')
self.dimensions = self.hyperopt_space() self.dimensions: List[Dimension] = self.hyperopt_space()
self.opt = self.get_optimizer(self.dimensions, config_jobs) self.opt = self.get_optimizer(self.dimensions, config_jobs)
if self.config.get('print_colorized', False): if self.print_colorized:
colorama_init(autoreset=True) colorama_init(autoreset=True)
try: try:
@ -439,19 +464,38 @@ class Hyperopt:
self.opt.tell(asked, [v['loss'] for v in f_val]) self.opt.tell(asked, [v['loss'] for v in f_val])
self.fix_optimizer_models_list() self.fix_optimizer_models_list()
for j in range(jobs): for j in range(jobs):
# Use human-friendly index here (starting from 1) # Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 current = i * jobs + j + 1
val = f_val[j] val = f_val[j]
val['current_epoch'] = current val['current_epoch'] = current
val['is_initial_point'] = current <= INITIAL_POINTS val['is_initial_point'] = current <= INITIAL_POINTS
logger.debug(f"Optimizer epoch evaluated: {val}") logger.debug(f"Optimizer epoch evaluated: {val}")
is_best = self.is_best(val)
self.log_results(val) is_best = self.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val['is_best'] = is_best
self.print_results(val)
if is_best:
self.current_best_loss = val['loss']
self.trials.append(val) self.trials.append(val)
# Save results after each best epoch and every 100 epochs
if is_best or current % 100 == 0: if is_best or current % 100 == 0:
self.save_trials() self.save_trials()
except KeyboardInterrupt: except KeyboardInterrupt:
print('User interrupted..') print('User interrupted..')
self.save_trials(final=True) self.save_trials(final=True)
self.log_trials_result()
if self.trials:
sorted_trials = sorted(self.trials, key=itemgetter('loss'))
results = sorted_trials[0]
self.print_epoch_details(results, self.total_epochs, self.print_json)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")

View File

@ -1,12 +1,14 @@
import logging import logging
import sys import sys
from collections import OrderedDict from collections import OrderedDict
from operator import itemgetter
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List from typing import Any, Dict, List
import arrow import arrow
import csv import csv
import rapidjson import rapidjson
from colorama import init as colorama_init
from tabulate import tabulate from tabulate import tabulate
from freqtrade import OperationalException from freqtrade import OperationalException
@ -18,6 +20,7 @@ from freqtrade.data.history import (convert_trades_to_ohlcv,
from freqtrade.exchange import (available_exchanges, ccxt_exchanges, market_is_active, from freqtrade.exchange import (available_exchanges, ccxt_exchanges, market_is_active,
symbol_is_pair) symbol_is_pair)
from freqtrade.misc import plural from freqtrade.misc import plural
from freqtrade.optimize.hyperopt import Hyperopt
from freqtrade.resolvers import ExchangeResolver from freqtrade.resolvers import ExchangeResolver
from freqtrade.state import RunMode from freqtrade.state import RunMode
@ -236,3 +239,99 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
args.get('list_pairs_print_json', False) or args.get('list_pairs_print_json', False) or
args.get('print_csv', False)): args.get('print_csv', False)):
print(f"{summary_str}.") print(f"{summary_str}.")
def start_hyperopt_list(args: Dict[str, Any]) -> None:
"""
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
only_best = config.get('hyperopt_list_best', False)
only_profitable = config.get('hyperopt_list_profitable', False)
print_colorized = config.get('print_colorized', False)
print_json = config.get('print_json', False)
no_details = config.get('hyperopt_list_no_details', False)
no_header = False
trials_file = (config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
# Previous evaluations
trials = Hyperopt.load_previous_results(trials_file)
total_epochs = len(trials)
trials = _hyperopt_filter_trials(trials, only_best, only_profitable)
# TODO: fetch the interval for epochs to print from the cli option
epoch_start, epoch_stop = 0, None
if print_colorized:
colorama_init(autoreset=True)
try:
# Human-friendly indexes used here (starting from 1)
for val in trials[epoch_start:epoch_stop]:
Hyperopt.print_results_explanation(val, total_epochs, not only_best, print_colorized)
except KeyboardInterrupt:
print('User interrupted..')
if trials and not no_details:
sorted_trials = sorted(trials, key=itemgetter('loss'))
results = sorted_trials[0]
Hyperopt.print_epoch_details(results, total_epochs, print_json, no_header)
def start_hyperopt_show(args: Dict[str, Any]) -> None:
"""
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
only_best = config.get('hyperopt_list_best', False)
only_profitable = config.get('hyperopt_list_profitable', False)
no_header = config.get('hyperopt_show_no_header', False)
trials_file = (config['user_data_dir'] /
'hyperopt_results' / 'hyperopt_results.pickle')
# Previous evaluations
trials = Hyperopt.load_previous_results(trials_file)
total_epochs = len(trials)
trials = _hyperopt_filter_trials(trials, only_best, only_profitable)
n = config.get('hyperopt_show_index', -1)
if n > total_epochs:
raise OperationalException(
f"The index of the epoch to show should be less than {total_epochs + 1}.")
if n < -total_epochs:
raise OperationalException(
f"The index of the epoch to showshould be greater than {-total_epochs - 1}.")
# Translate epoch index from human-readable format to pythonic
if n > 0:
n -= 1
print_json = config.get('print_json', False)
if trials:
val = trials[n]
Hyperopt.print_epoch_details(val, total_epochs, print_json, no_header,
header_str="Epoch details")
def _hyperopt_filter_trials(trials: List, only_best: bool, only_profitable: bool) -> List:
"""
Filter our items from the list of hyperopt results
"""
if only_best:
trials = [x for x in trials if x['is_best']]
if only_profitable:
trials = [x for x in trials if x['results_metrics']['profit'] > 0]
logger.info(f"{len(trials)} " +
("best " if only_best else "") +
("profitable " if only_profitable else "") +
"epochs found.")
return trials