Merge pull request #3054 from Fredrik81/progress-bar
Hyperopt: Progressbar during hyperopt
This commit is contained in:
commit
18a6c98a82
@ -52,8 +52,8 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
|
||||
|
||||
if not export_csv:
|
||||
try:
|
||||
Hyperopt.print_result_table(config, trials, total_epochs,
|
||||
not filteroptions['only_best'], print_colorized, 0)
|
||||
print(Hyperopt.get_result_table(config, trials, total_epochs,
|
||||
not filteroptions['only_best'], print_colorized, 0))
|
||||
except KeyboardInterrupt:
|
||||
print('User interrupted..')
|
||||
|
||||
|
@ -18,13 +18,13 @@ def _set_loggers(verbosity: int = 0) -> None:
|
||||
"""
|
||||
|
||||
logging.getLogger('requests').setLevel(
|
||||
logging.INFO if verbosity <= 1 else logging.DEBUG
|
||||
logging.INFO if verbosity <= 1 else logging.DEBUG
|
||||
)
|
||||
logging.getLogger("urllib3").setLevel(
|
||||
logging.INFO if verbosity <= 1 else logging.DEBUG
|
||||
logging.INFO if verbosity <= 1 else logging.DEBUG
|
||||
)
|
||||
logging.getLogger('ccxt.base.exchange').setLevel(
|
||||
logging.INFO if verbosity <= 2 else logging.DEBUG
|
||||
logging.INFO if verbosity <= 2 else logging.DEBUG
|
||||
)
|
||||
logging.getLogger('telegram').setLevel(logging.INFO)
|
||||
|
||||
|
@ -7,7 +7,6 @@ This module contains the hyperopt logic
|
||||
import locale
|
||||
import logging
|
||||
import random
|
||||
import sys
|
||||
import warnings
|
||||
from math import ceil
|
||||
from collections import OrderedDict
|
||||
@ -18,10 +17,10 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
import rapidjson
|
||||
from colorama import Fore, Style
|
||||
from colorama import init as colorama_init
|
||||
from joblib import (Parallel, cpu_count, delayed, dump, load,
|
||||
wrap_non_picklable_objects)
|
||||
from pandas import DataFrame, json_normalize, isna
|
||||
import progressbar
|
||||
import tabulate
|
||||
from os import path
|
||||
import io
|
||||
@ -43,7 +42,8 @@ with warnings.catch_warnings():
|
||||
from skopt import Optimizer
|
||||
from skopt.space import Dimension
|
||||
|
||||
|
||||
progressbar.streams.wrap_stderr()
|
||||
progressbar.streams.wrap_stdout()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -266,21 +266,33 @@ class Hyperopt:
|
||||
Log results if it is better than any previous evaluation
|
||||
"""
|
||||
is_best = results['is_best']
|
||||
if not self.print_all:
|
||||
# Print '\n' after each 100th epoch to separate dots from the log messages.
|
||||
# Otherwise output is messy on a terminal.
|
||||
print('.', end='' if results['current_epoch'] % 100 != 0 else None) # type: ignore
|
||||
sys.stdout.flush()
|
||||
|
||||
if self.print_all or is_best:
|
||||
if not self.print_all:
|
||||
# Separate the results explanation string from dots
|
||||
print("\n")
|
||||
self.print_result_table(self.config, results, self.total_epochs,
|
||||
self.print_all, self.print_colorized,
|
||||
self.hyperopt_table_header)
|
||||
print(
|
||||
self.get_result_table(
|
||||
self.config, results, self.total_epochs,
|
||||
self.print_all, self.print_colorized,
|
||||
self.hyperopt_table_header
|
||||
)
|
||||
)
|
||||
self.hyperopt_table_header = 2
|
||||
|
||||
def get_results(self, results) -> str:
|
||||
"""
|
||||
Log results if it is better than any previous evaluation
|
||||
"""
|
||||
output = ''
|
||||
is_best = results['is_best']
|
||||
|
||||
if self.print_all or is_best:
|
||||
output = self.get_result_table(
|
||||
self.config, results, self.total_epochs,
|
||||
self.print_all, self.print_colorized,
|
||||
self.hyperopt_table_header
|
||||
)
|
||||
self.hyperopt_table_header = 2
|
||||
return output
|
||||
|
||||
@staticmethod
|
||||
def print_results_explanation(results, total_epochs, highlight_best: bool,
|
||||
print_colorized: bool) -> None:
|
||||
@ -304,13 +316,13 @@ class Hyperopt:
|
||||
f"Objective: {results['loss']:.5f}")
|
||||
|
||||
@staticmethod
|
||||
def print_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
print_colorized: bool, remove_header: int) -> None:
|
||||
def get_result_table(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
print_colorized: bool, remove_header: int) -> str:
|
||||
"""
|
||||
Log result table
|
||||
"""
|
||||
if not results:
|
||||
return
|
||||
return ''
|
||||
|
||||
tabulate.PRESERVE_WHITESPACE = True
|
||||
|
||||
@ -381,7 +393,7 @@ class Hyperopt:
|
||||
trials.to_dict(orient='list'), tablefmt='psql',
|
||||
headers='keys', stralign="right"
|
||||
)
|
||||
print(table)
|
||||
return table
|
||||
|
||||
@staticmethod
|
||||
def export_csv_file(config: dict, results: list, total_epochs: int, highlight_best: bool,
|
||||
@ -654,47 +666,75 @@ class Hyperopt:
|
||||
self.dimensions: List[Dimension] = self.hyperopt_space()
|
||||
self.opt = self.get_optimizer(self.dimensions, config_jobs)
|
||||
|
||||
if self.print_colorized:
|
||||
colorama_init(autoreset=True)
|
||||
|
||||
try:
|
||||
with Parallel(n_jobs=config_jobs) as parallel:
|
||||
jobs = parallel._effective_n_jobs()
|
||||
logger.info(f'Effective number of parallel workers used: {jobs}')
|
||||
EVALS = ceil(self.total_epochs / jobs)
|
||||
for i in range(EVALS):
|
||||
# Correct the number of epochs to be processed for the last
|
||||
# iteration (should not exceed self.total_epochs in total)
|
||||
n_rest = (i + 1) * jobs - self.total_epochs
|
||||
current_jobs = jobs - n_rest if n_rest > 0 else jobs
|
||||
|
||||
asked = self.opt.ask(n_points=current_jobs)
|
||||
f_val = self.run_optimizer_parallel(parallel, asked, i)
|
||||
self.opt.tell(asked, [v['loss'] for v in f_val])
|
||||
self.fix_optimizer_models_list()
|
||||
# Define progressbar
|
||||
if self.print_colorized:
|
||||
widgets = [
|
||||
' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
|
||||
' (', progressbar.Percentage(), ')] ',
|
||||
progressbar.Bar(marker=progressbar.AnimatedMarker(
|
||||
fill='\N{FULL BLOCK}',
|
||||
fill_wrap=Fore.GREEN + '{}' + Fore.RESET,
|
||||
marker_wrap=Style.BRIGHT + '{}' + Style.RESET_ALL,
|
||||
)),
|
||||
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
|
||||
]
|
||||
else:
|
||||
widgets = [
|
||||
' [Epoch ', progressbar.Counter(), ' of ', str(self.total_epochs),
|
||||
' (', progressbar.Percentage(), ')] ',
|
||||
progressbar.Bar(marker=progressbar.AnimatedMarker(
|
||||
fill='\N{FULL BLOCK}',
|
||||
)),
|
||||
' [', progressbar.ETA(), ', ', progressbar.Timer(), ']',
|
||||
]
|
||||
with progressbar.ProgressBar(
|
||||
maxval=self.total_epochs, redirect_stdout=True, redirect_stderr=True,
|
||||
widgets=widgets
|
||||
) as pbar:
|
||||
EVALS = ceil(self.total_epochs / jobs)
|
||||
for i in range(EVALS):
|
||||
# Correct the number of epochs to be processed for the last
|
||||
# iteration (should not exceed self.total_epochs in total)
|
||||
n_rest = (i + 1) * jobs - self.total_epochs
|
||||
current_jobs = jobs - n_rest if n_rest > 0 else jobs
|
||||
|
||||
for j, val in enumerate(f_val):
|
||||
# Use human-friendly indexes here (starting from 1)
|
||||
current = i * jobs + j + 1
|
||||
val['current_epoch'] = current
|
||||
val['is_initial_point'] = current <= INITIAL_POINTS
|
||||
logger.debug(f"Optimizer epoch evaluated: {val}")
|
||||
asked = self.opt.ask(n_points=current_jobs)
|
||||
f_val = self.run_optimizer_parallel(parallel, asked, i)
|
||||
self.opt.tell(asked, [v['loss'] for v in f_val])
|
||||
self.fix_optimizer_models_list()
|
||||
|
||||
is_best = self.is_best_loss(val, self.current_best_loss)
|
||||
# This value is assigned here and not in the optimization method
|
||||
# to keep proper order in the list of results. That's because
|
||||
# evaluations can take different time. Here they are aligned in the
|
||||
# order they will be shown to the user.
|
||||
val['is_best'] = is_best
|
||||
# Calculate progressbar outputs
|
||||
for j, val in enumerate(f_val):
|
||||
# Use human-friendly indexes here (starting from 1)
|
||||
current = i * jobs + j + 1
|
||||
val['current_epoch'] = current
|
||||
val['is_initial_point'] = current <= INITIAL_POINTS
|
||||
|
||||
self.print_results(val)
|
||||
logger.debug(f"Optimizer epoch evaluated: {val}")
|
||||
|
||||
is_best = self.is_best_loss(val, self.current_best_loss)
|
||||
# This value is assigned here and not in the optimization method
|
||||
# to keep proper order in the list of results. That's because
|
||||
# evaluations can take different time. Here they are aligned in the
|
||||
# order they will be shown to the user.
|
||||
val['is_best'] = is_best
|
||||
self.print_results(val)
|
||||
|
||||
if is_best:
|
||||
self.current_best_loss = val['loss']
|
||||
self.trials.append(val)
|
||||
|
||||
# Save results after each best epoch and every 100 epochs
|
||||
if is_best or current % 100 == 0:
|
||||
self.save_trials()
|
||||
|
||||
pbar.update(current)
|
||||
|
||||
if is_best:
|
||||
self.current_best_loss = val['loss']
|
||||
self.trials.append(val)
|
||||
# Save results after each best epoch and every 100 epochs
|
||||
if is_best or current % 100 == 0:
|
||||
self.save_trials()
|
||||
except KeyboardInterrupt:
|
||||
print('User interrupted..')
|
||||
|
||||
|
@ -7,3 +7,4 @@ scikit-learn==0.22.2.post1
|
||||
scikit-optimize==0.7.4
|
||||
filelock==3.0.12
|
||||
joblib==0.14.1
|
||||
progressbar2==3.50.1
|
||||
|
Loading…
Reference in New Issue
Block a user