Merge pull request #2269 from hroff-1902/hyperopt-cleanup4
minor: Cleanup in hyperopt
This commit is contained in:
commit
27238d97d5
@ -73,9 +73,11 @@ class Hyperopt:
|
|||||||
self.trials: List = []
|
self.trials: List = []
|
||||||
|
|
||||||
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
|
# Populate functions here (hasattr is slow so should not be run during "regular" operations)
|
||||||
|
if hasattr(self.custom_hyperopt, 'populate_indicators'):
|
||||||
|
self.backtesting.strategy.advise_indicators = \
|
||||||
|
self.custom_hyperopt.populate_indicators # type: ignore
|
||||||
if hasattr(self.custom_hyperopt, 'populate_buy_trend'):
|
if hasattr(self.custom_hyperopt, 'populate_buy_trend'):
|
||||||
self.backtesting.advise_buy = self.custom_hyperopt.populate_buy_trend # type: ignore
|
self.backtesting.advise_buy = self.custom_hyperopt.populate_buy_trend # type: ignore
|
||||||
|
|
||||||
if hasattr(self.custom_hyperopt, 'populate_sell_trend'):
|
if hasattr(self.custom_hyperopt, 'populate_sell_trend'):
|
||||||
self.backtesting.advise_sell = self.custom_hyperopt.populate_sell_trend # type: ignore
|
self.backtesting.advise_sell = self.custom_hyperopt.populate_sell_trend # type: ignore
|
||||||
|
|
||||||
@ -109,7 +111,9 @@ class Hyperopt:
|
|||||||
p.unlink()
|
p.unlink()
|
||||||
|
|
||||||
def get_args(self, params):
|
def get_args(self, params):
|
||||||
dimensions = self.hyperopt_space()
|
|
||||||
|
dimensions = self.dimensions
|
||||||
|
|
||||||
# Ensure the number of dimensions match
|
# Ensure the number of dimensions match
|
||||||
# the number of parameters in the list x.
|
# the number of parameters in the list x.
|
||||||
if len(params) != len(dimensions):
|
if len(params) != len(dimensions):
|
||||||
@ -322,9 +326,9 @@ class Hyperopt:
|
|||||||
f'Total profit {total_profit: 11.8f} {stake_cur} '
|
f'Total profit {total_profit: 11.8f} {stake_cur} '
|
||||||
f'({profit: 7.2f}Σ%). Avg duration {duration:5.1f} mins.')
|
f'({profit: 7.2f}Σ%). Avg duration {duration:5.1f} mins.')
|
||||||
|
|
||||||
def get_optimizer(self, cpu_count) -> Optimizer:
|
def get_optimizer(self, dimensions, cpu_count) -> Optimizer:
|
||||||
return Optimizer(
|
return Optimizer(
|
||||||
self.hyperopt_space(),
|
dimensions,
|
||||||
base_estimator="ET",
|
base_estimator="ET",
|
||||||
acq_optimizer="auto",
|
acq_optimizer="auto",
|
||||||
n_initial_points=INITIAL_POINTS,
|
n_initial_points=INITIAL_POINTS,
|
||||||
@ -370,9 +374,6 @@ class Hyperopt:
|
|||||||
(max_date - min_date).days
|
(max_date - min_date).days
|
||||||
)
|
)
|
||||||
|
|
||||||
self.backtesting.strategy.advise_indicators = \
|
|
||||||
self.custom_hyperopt.populate_indicators # type: ignore
|
|
||||||
|
|
||||||
preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data)
|
preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data)
|
||||||
|
|
||||||
dump(preprocessed, self.tickerdata_pickle)
|
dump(preprocessed, self.tickerdata_pickle)
|
||||||
@ -387,7 +388,8 @@ class Hyperopt:
|
|||||||
config_jobs = self.config.get('hyperopt_jobs', -1)
|
config_jobs = self.config.get('hyperopt_jobs', -1)
|
||||||
logger.info(f'Number of parallel jobs set as: {config_jobs}')
|
logger.info(f'Number of parallel jobs set as: {config_jobs}')
|
||||||
|
|
||||||
opt = self.get_optimizer(config_jobs)
|
self.dimensions = self.hyperopt_space()
|
||||||
|
self.opt = self.get_optimizer(self.dimensions, config_jobs)
|
||||||
|
|
||||||
if self.config.get('print_colorized', False):
|
if self.config.get('print_colorized', False):
|
||||||
colorama_init(autoreset=True)
|
colorama_init(autoreset=True)
|
||||||
@ -398,9 +400,9 @@ class Hyperopt:
|
|||||||
logger.info(f'Effective number of parallel workers used: {jobs}')
|
logger.info(f'Effective number of parallel workers used: {jobs}')
|
||||||
EVALS = max(self.total_epochs // jobs, 1)
|
EVALS = max(self.total_epochs // jobs, 1)
|
||||||
for i in range(EVALS):
|
for i in range(EVALS):
|
||||||
asked = opt.ask(n_points=jobs)
|
asked = self.opt.ask(n_points=jobs)
|
||||||
f_val = self.run_optimizer_parallel(parallel, asked)
|
f_val = self.run_optimizer_parallel(parallel, asked)
|
||||||
opt.tell(asked, [v['loss'] for v in f_val])
|
self.opt.tell(asked, [v['loss'] for v in f_val])
|
||||||
for j in range(jobs):
|
for j in range(jobs):
|
||||||
current = i * jobs + j
|
current = i * jobs + j
|
||||||
val = f_val[j]
|
val = f_val[j]
|
||||||
|
@ -560,6 +560,7 @@ def test_generate_optimizer(mocker, default_conf) -> None:
|
|||||||
}
|
}
|
||||||
|
|
||||||
hyperopt = Hyperopt(default_conf)
|
hyperopt = Hyperopt(default_conf)
|
||||||
|
hyperopt.dimensions = hyperopt.hyperopt_space()
|
||||||
generate_optimizer_value = hyperopt.generate_optimizer(list(optimizer_param.values()))
|
generate_optimizer_value = hyperopt.generate_optimizer(list(optimizer_param.values()))
|
||||||
assert generate_optimizer_value == response_expected
|
assert generate_optimizer_value == response_expected
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user