serialization bottleneck resolved; back to cloudpickle

This commit is contained in:
hroff-1902 2019-04-17 13:37:22 +03:00
parent ae6f540b25
commit 2bed61436b

View File

@ -19,7 +19,6 @@ from typing import Any, Dict, List
from joblib import Parallel, delayed, dump, load, wrap_non_picklable_objects
from joblib._parallel_backends import LokyBackend
from joblib import register_parallel_backend, parallel_backend
from joblib.externals.loky import set_loky_pickler
from pandas import DataFrame
from skopt import Optimizer
from skopt.space import Dimension
@ -262,7 +261,9 @@ class Hyperopt(Backtesting):
)
def run_optimizer_parallel(self, parallel, tries: int, first_try: int) -> List:
result = parallel(delayed(self.parallel_objective)(asked, i) for asked, i in
result = parallel(delayed(
wrap_non_picklable_objects(self.parallel_objective))
(asked, i) for asked, i in
zip(self.opt_generator(), range(first_try, first_try + tries)))
return result
@ -314,7 +315,6 @@ class Hyperopt(Backtesting):
cpus = multiprocessing.cpu_count()
logger.info(f'Found {cpus} CPU cores. Let\'s make them scream!')
set_loky_pickler('pickle')
self.opt = self.get_optimizer(cpus)
frames = ((self.total_tries - 1) // EVALS_FRAME)
@ -344,6 +344,11 @@ class Hyperopt(Backtesting):
self.save_trials()
self.log_trials_result()
def __getstate__(self):
state = self.__dict__.copy()
del state['trials']
return state
def start(args: Namespace) -> None:
"""