Run first epoch in non-parallel mode

this allows dataprovider to load it's cache.

closes #7384
This commit is contained in:
Matthias 2022-09-11 15:42:27 +02:00
parent 78cd46ecd5
commit 72d197a99d
2 changed files with 20 additions and 5 deletions

View File

@ -196,7 +196,9 @@ class DataProvider:
Clear pair dataframe cache.
"""
self.__cached_pairs = {}
self.__cached_pairs_backtesting = {}
# Don't reset backtesting pairs -
# otherwise they're reloaded each time during hyperopt due to with analyze_per_epoch
# self.__cached_pairs_backtesting = {}
self.__slice_index = 0
# Exchange functions

View File

@ -580,11 +580,24 @@ class Hyperopt:
max_value=self.total_epochs, redirect_stdout=False, redirect_stderr=False,
widgets=widgets
) as pbar:
EVALS = ceil(self.total_epochs / jobs)
for i in range(EVALS):
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
# print(asked)
f_val = self.generate_optimizer(asked[0])
self.opt.tell(asked, [f_val['loss']])
self.evaluate_result(f_val, 1, is_random[0])
pbar.update(1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - self.total_epochs
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
@ -594,7 +607,7 @@ class Hyperopt:
# Calculate progressbar outputs
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])