Merge pull request #2563 from hroff-1902/hyperopt-save
Hyperopt: Save epochs at intermediate points
This commit is contained in:
commit
e9da4d8505
@ -23,7 +23,7 @@ from skopt import Optimizer
|
|||||||
from skopt.space import Dimension
|
from skopt.space import Dimension
|
||||||
|
|
||||||
from freqtrade.data.history import get_timeframe, trim_dataframe
|
from freqtrade.data.history import get_timeframe, trim_dataframe
|
||||||
from freqtrade.misc import round_dict
|
from freqtrade.misc import plural, round_dict
|
||||||
from freqtrade.optimize.backtesting import Backtesting
|
from freqtrade.optimize.backtesting import Backtesting
|
||||||
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
|
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
|
||||||
from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F4
|
from freqtrade.optimize.hyperopt_interface import IHyperOpt # noqa: F4
|
||||||
@ -77,6 +77,8 @@ class Hyperopt:
|
|||||||
# Previous evaluations
|
# Previous evaluations
|
||||||
self.trials: List = []
|
self.trials: List = []
|
||||||
|
|
||||||
|
self.num_trials_saved = 0
|
||||||
|
|
||||||
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
|
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
|
||||||
if hasattr(self.custom_hyperopt, 'populate_indicators'):
|
if hasattr(self.custom_hyperopt, 'populate_indicators'):
|
||||||
self.backtesting.strategy.advise_indicators = \
|
self.backtesting.strategy.advise_indicators = \
|
||||||
@ -132,13 +134,18 @@ class Hyperopt:
|
|||||||
arg_dict = {dim.name: value for dim, value in zip(dimensions, params)}
|
arg_dict = {dim.name: value for dim, value in zip(dimensions, params)}
|
||||||
return arg_dict
|
return arg_dict
|
||||||
|
|
||||||
def save_trials(self) -> None:
|
def save_trials(self, final: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Save hyperopt trials to file
|
Save hyperopt trials to file
|
||||||
"""
|
"""
|
||||||
if self.trials:
|
num_trials = len(self.trials)
|
||||||
logger.info("Saving %d evaluations to '%s'", len(self.trials), self.trials_file)
|
if num_trials > self.num_trials_saved:
|
||||||
|
logger.info(f"Saving {num_trials} {plural(num_trials, 'epoch')}.")
|
||||||
dump(self.trials, self.trials_file)
|
dump(self.trials, self.trials_file)
|
||||||
|
self.num_trials_saved = num_trials
|
||||||
|
if final:
|
||||||
|
logger.info(f"{num_trials} {plural(num_trials, 'epoch')} "
|
||||||
|
f"saved to '{self.trials_file}'.")
|
||||||
|
|
||||||
def read_trials(self) -> List:
|
def read_trials(self) -> List:
|
||||||
"""
|
"""
|
||||||
@ -153,6 +160,12 @@ class Hyperopt:
|
|||||||
"""
|
"""
|
||||||
Display Best hyperopt result
|
Display Best hyperopt result
|
||||||
"""
|
"""
|
||||||
|
# This is printed when Ctrl+C is pressed quickly, before first epochs have
|
||||||
|
# a chance to be evaluated.
|
||||||
|
if not self.trials:
|
||||||
|
print("No epochs evaluated yet, no best result.")
|
||||||
|
return
|
||||||
|
|
||||||
results = sorted(self.trials, key=itemgetter('loss'))
|
results = sorted(self.trials, key=itemgetter('loss'))
|
||||||
best_result = results[0]
|
best_result = results[0]
|
||||||
params = best_result['params']
|
params = best_result['params']
|
||||||
@ -197,12 +210,20 @@ class Hyperopt:
|
|||||||
# Also round to 5 digits after the decimal point
|
# Also round to 5 digits after the decimal point
|
||||||
print(f"Stoploss: {round(params.get('stoploss'), 5)}")
|
print(f"Stoploss: {round(params.get('stoploss'), 5)}")
|
||||||
|
|
||||||
|
def is_best(self, results) -> bool:
|
||||||
|
return results['loss'] < self.current_best_loss
|
||||||
|
|
||||||
def log_results(self, results) -> None:
|
def log_results(self, results) -> None:
|
||||||
"""
|
"""
|
||||||
Log results if it is better than any previous evaluation
|
Log results if it is better than any previous evaluation
|
||||||
"""
|
"""
|
||||||
print_all = self.config.get('print_all', False)
|
print_all = self.config.get('print_all', False)
|
||||||
is_best_loss = results['loss'] < self.current_best_loss
|
is_best_loss = self.is_best(results)
|
||||||
|
|
||||||
|
if not print_all:
|
||||||
|
print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
if print_all or is_best_loss:
|
if print_all or is_best_loss:
|
||||||
if is_best_loss:
|
if is_best_loss:
|
||||||
self.current_best_loss = results['loss']
|
self.current_best_loss = results['loss']
|
||||||
@ -217,13 +238,9 @@ class Hyperopt:
|
|||||||
print(log_str)
|
print(log_str)
|
||||||
else:
|
else:
|
||||||
print(f'\n{log_str}')
|
print(f'\n{log_str}')
|
||||||
else:
|
|
||||||
print('.', end='')
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
def format_results_logstring(self, results) -> str:
|
def format_results_logstring(self, results) -> str:
|
||||||
# Output human-friendly index here (starting from 1)
|
current = results['current_epoch']
|
||||||
current = results['current_epoch'] + 1
|
|
||||||
total = self.total_epochs
|
total = self.total_epochs
|
||||||
res = results['results_explanation']
|
res = results['results_explanation']
|
||||||
loss = results['loss']
|
loss = results['loss']
|
||||||
@ -422,15 +439,19 @@ class Hyperopt:
|
|||||||
self.opt.tell(asked, [v['loss'] for v in f_val])
|
self.opt.tell(asked, [v['loss'] for v in f_val])
|
||||||
self.fix_optimizer_models_list()
|
self.fix_optimizer_models_list()
|
||||||
for j in range(jobs):
|
for j in range(jobs):
|
||||||
current = i * jobs + j
|
# Use human-friendly index here (starting from 1)
|
||||||
|
current = i * jobs + j + 1
|
||||||
val = f_val[j]
|
val = f_val[j]
|
||||||
val['current_epoch'] = current
|
val['current_epoch'] = current
|
||||||
val['is_initial_point'] = current < INITIAL_POINTS
|
val['is_initial_point'] = current <= INITIAL_POINTS
|
||||||
|
logger.debug(f"Optimizer epoch evaluated: {val}")
|
||||||
|
is_best = self.is_best(val)
|
||||||
self.log_results(val)
|
self.log_results(val)
|
||||||
self.trials.append(val)
|
self.trials.append(val)
|
||||||
logger.debug(f"Optimizer epoch evaluated: {val}")
|
if is_best or current % 100 == 0:
|
||||||
|
self.save_trials()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print('User interrupted..')
|
print('User interrupted..')
|
||||||
|
|
||||||
self.save_trials()
|
self.save_trials(final=True)
|
||||||
self.log_trials_result()
|
self.log_trials_result()
|
||||||
|
@ -360,7 +360,7 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None:
|
|||||||
hyperopt.log_results(
|
hyperopt.log_results(
|
||||||
{
|
{
|
||||||
'loss': 1,
|
'loss': 1,
|
||||||
'current_epoch': 1,
|
'current_epoch': 2, # This starts from 1 (in a human-friendly manner)
|
||||||
'results_explanation': 'foo.',
|
'results_explanation': 'foo.',
|
||||||
'is_initial_point': False
|
'is_initial_point': False
|
||||||
}
|
}
|
||||||
@ -374,6 +374,7 @@ def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None:
|
|||||||
hyperopt.log_results(
|
hyperopt.log_results(
|
||||||
{
|
{
|
||||||
'loss': 3,
|
'loss': 3,
|
||||||
|
'current_epoch': 1,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
assert caplog.record_tuples == []
|
assert caplog.record_tuples == []
|
||||||
@ -382,13 +383,19 @@ def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None:
|
|||||||
def test_save_trials_saves_trials(mocker, hyperopt, testdatadir, caplog) -> None:
|
def test_save_trials_saves_trials(mocker, hyperopt, testdatadir, caplog) -> None:
|
||||||
trials = create_trials(mocker, hyperopt, testdatadir)
|
trials = create_trials(mocker, hyperopt, testdatadir)
|
||||||
mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None)
|
mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None)
|
||||||
hyperopt.trials = trials
|
|
||||||
hyperopt.save_trials()
|
|
||||||
|
|
||||||
trials_file = testdatadir / 'optimize' / 'ut_trials.pickle'
|
trials_file = testdatadir / 'optimize' / 'ut_trials.pickle'
|
||||||
assert log_has(f"Saving 1 evaluations to '{trials_file}'", caplog)
|
|
||||||
|
hyperopt.trials = trials
|
||||||
|
hyperopt.save_trials(final=True)
|
||||||
|
assert log_has("Saving 1 epoch.", caplog)
|
||||||
|
assert log_has(f"1 epoch saved to '{trials_file}'.", caplog)
|
||||||
mock_dump.assert_called_once()
|
mock_dump.assert_called_once()
|
||||||
|
|
||||||
|
hyperopt.trials = trials + trials
|
||||||
|
hyperopt.save_trials(final=True)
|
||||||
|
assert log_has("Saving 2 epochs.", caplog)
|
||||||
|
assert log_has(f"2 epochs saved to '{trials_file}'.", caplog)
|
||||||
|
|
||||||
|
|
||||||
def test_read_trials_returns_trials_file(mocker, hyperopt, testdatadir, caplog) -> None:
|
def test_read_trials_returns_trials_file(mocker, hyperopt, testdatadir, caplog) -> None:
|
||||||
trials = create_trials(mocker, hyperopt, testdatadir)
|
trials = create_trials(mocker, hyperopt, testdatadir)
|
||||||
|
Loading…
Reference in New Issue
Block a user