Changed back to progressbar2 for better handling of logger.

Coloring still needs some work (bug + what colors to use)
This commit is contained in:
Fredrik81 2020-04-06 13:12:32 +02:00
parent 5737139979
commit d5609d4997
3 changed files with 59 additions and 51 deletions

View File

@ -22,7 +22,7 @@ from colorama import init as colorama_init
from joblib import (Parallel, cpu_count, delayed, dump, load, from joblib import (Parallel, cpu_count, delayed, dump, load,
wrap_non_picklable_objects) wrap_non_picklable_objects)
from pandas import DataFrame, json_normalize, isna from pandas import DataFrame, json_normalize, isna
from tqdm import tqdm import progressbar
import tabulate import tabulate
from os import path from os import path
import io import io
@ -44,7 +44,8 @@ with warnings.catch_warnings():
from skopt import Optimizer from skopt import Optimizer
from skopt.space import Dimension from skopt.space import Dimension
progressbar.streams.wrap_stderr()
progressbar.streams.wrap_stdout()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -682,55 +683,67 @@ class Hyperopt:
logger.info(f'Effective number of parallel workers used: {jobs}') logger.info(f'Effective number of parallel workers used: {jobs}')
# Define progressbar # Define progressbar
self.progress_bar = tqdm( if self.print_colorized:
total=self.total_epochs, ncols=108, unit=' Epoch', widgets = [
bar_format='Epoch {n_fmt}/{total_fmt} ({percentage:3.0f}%)|{bar}|' ' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
' [{elapsed}<{remaining} {rate_fmt}{postfix}]' ' (', progressbar.Percentage(), ')] ',
) progressbar.Bar(marker=progressbar.AnimatedMarker(
fill='',
fill_wrap='\x1b[32m{}\x1b[39m',
marker_wrap='\x1b[31m{}\x1b[39m',
)),
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
]
else:
widgets = [
' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs), '] ',
progressbar.Bar(marker=''),
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
]
with progressbar.ProgressBar(
maxval=self.total_epochs, redirect_stdout=True, redirect_stderr=True,
widgets=widgets
) as pbar:
EVALS = ceil(self.total_epochs / jobs)
for i in range(EVALS):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - self.total_epochs
current_jobs = jobs - n_rest if n_rest > 0 else jobs
EVALS = ceil(self.total_epochs / jobs) asked = self.opt.ask(n_points=current_jobs)
for i in range(EVALS): f_val = self.run_optimizer_parallel(parallel, asked, i)
# Correct the number of epochs to be processed for the last self.opt.tell(asked, [v['loss'] for v in f_val])
# iteration (should not exceed self.total_epochs in total) self.fix_optimizer_models_list()
n_rest = (i + 1) * jobs - self.total_epochs
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked = self.opt.ask(n_points=current_jobs) # Calculate progressbar outputs
f_val = self.run_optimizer_parallel(parallel, asked, i) for j, val in enumerate(f_val):
self.opt.tell(asked, [v['loss'] for v in f_val]) # Use human-friendly indexes here (starting from 1)
self.fix_optimizer_models_list() current = i * jobs + j + 1
val['current_epoch'] = current
val['is_initial_point'] = current <= INITIAL_POINTS
# Calculate progressbar outputs logger.debug(f"Optimizer epoch evaluated: {val}")
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1
val['current_epoch'] = current
val['is_initial_point'] = current <= INITIAL_POINTS
logger.debug(f"Optimizer epoch evaluated: {val}")
is_best = self.is_best_loss(val, self.current_best_loss) is_best = self.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method # This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because # to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the # evaluations can take different time. Here they are aligned in the
# order they will be shown to the user. # order they will be shown to the user.
val['is_best'] = is_best val['is_best'] = is_best
output = self.get_results(val) self.print_results(val)
if output:
self.progress_bar.write(output)
self.progress_bar.ncols = 108
self.progress_bar.update(1)
if is_best: if is_best:
self.current_best_loss = val['loss'] self.current_best_loss = val['loss']
self.trials.append(val) self.trials.append(val)
# Save results after each best epoch and every 100 epochs
if is_best or current % 100 == 0: # Save results after each best epoch and every 100 epochs
self.save_trials() if is_best or current % 100 == 0:
self.progress_bar.ncols = 108 self.save_trials()
self.progress_bar.close()
pbar.update(current)
except KeyboardInterrupt: except KeyboardInterrupt:
self.progress_bar.close()
print('User interrupted..') print('User interrupted..')
self.save_trials(final=True) self.save_trials(final=True)
@ -743,9 +756,3 @@ class Hyperopt:
# This is printed when Ctrl+C is pressed quickly, before first epochs have # This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated. # a chance to be evaluated.
print("No epochs evaluated yet, no best result.") print("No epochs evaluated yet, no best result.")
def __getstate__(self):
state = self.__dict__.copy()
del state['trials']
del state['progress_bar']
return state

View File

@ -7,4 +7,4 @@ scikit-learn==0.22.2.post1
scikit-optimize==0.7.4 scikit-optimize==0.7.4
filelock==3.0.12 filelock==3.0.12
joblib==0.14.1 joblib==0.14.1
tqdm==4.43.0 progressbar2==3.50.1

View File

@ -24,6 +24,7 @@ hyperopt = [
'scikit-optimize', 'scikit-optimize',
'filelock', 'filelock',
'joblib', 'joblib',
'progressbar2',
] ]
develop = [ develop = [