This commit is contained in:
orehunt 2020-08-16 04:23:55 +00:00 committed by GitHub
commit 9f9055549b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 905 additions and 170 deletions

View File

@ -18,15 +18,16 @@ ARGS_TRADE = ["db_url", "sd_notify", "dry_run"]
ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange",
"max_open_trades", "stake_amount", "fee"] "max_open_trades", "stake_amount", "fee"]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions", ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"strategy_list", "export", "exportfilename"] "position_stacking", "use_max_market_positions", "strategy_list", "export", "exportfilename"
]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path", ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"position_stacking", "epochs", "spaces", "hyperopt", "hyperopt_path", "position_stacking", "epochs", "spaces",
"use_max_market_positions", "print_all", "use_max_market_positions", "print_all", "print_colorized", "print_json", "hyperopt_jobs",
"print_colorized", "print_json", "hyperopt_jobs", "hyperopt_random_state", "hyperopt_min_trades", "hyperopt_continue", "hyperopt_loss", "effort",
"hyperopt_random_state", "hyperopt_min_trades", "mode", "n_points", "lie_strat"
"hyperopt_continue", "hyperopt_loss"] ]
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"] ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]

View File

@ -195,6 +195,36 @@ AVAILABLE_CLI_OPTIONS = {
metavar='INT', metavar='INT',
default=constants.HYPEROPT_EPOCH, default=constants.HYPEROPT_EPOCH,
), ),
"effort": Arg(
'--effort',
help=('The higher the number, the longer will be the search if'
'no epochs are defined (default: %(default)d).'),
type=float,
metavar='FLOAT',
default=constants.HYPEROPT_EFFORT,
),
"mode": Arg(
'--mode',
help='Switches hyperopt to use one optimizer per job, use it'
'when backtesting iterations are cheap (default: %(default)s).',
metavar='NAME',
default=constants.HYPEROPT_MODE),
"n_points": Arg(
'--n-points',
help='Controls how many points to ask to the optimizer '
'increase if cpu usage of each core '
'appears low (default: %(default)d).',
type=int,
metavar='INT',
default=constants.HYPEROPT_N_POINTS
),
"lie_strat": Arg(
'--lie-strat',
help='Sets the strategy that the optimizer uses to lie '
'when asking for more than one point, '
'no effect if n_point is one (default: %(default)s).',
default=constants.HYPEROPT_LIE_STRAT
),
"spaces": Arg( "spaces": Arg(
'--spaces', '--spaces',
help='Specify which parameters to hyperopt. Space-separated list.', help='Specify which parameters to hyperopt. Space-separated list.',

View File

@ -59,6 +59,7 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
try: try:
from filelock import FileLock, Timeout from filelock import FileLock, Timeout
from freqtrade.optimize.hyperopt import Hyperopt from freqtrade.optimize.hyperopt import Hyperopt
from freqtrade.optimize import hyperopt_backend as backend
except ImportError as e: except ImportError as e:
raise OperationalException( raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
@ -77,8 +78,8 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
logging.getLogger('filelock').setLevel(logging.WARNING) logging.getLogger('filelock').setLevel(logging.WARNING)
# Initialize backtesting object # Initialize backtesting object
hyperopt = Hyperopt(config) backend.hyperopt = Hyperopt(config)
hyperopt.start() backend.hyperopt.start()
except Timeout: except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.") logger.info("Another running instance of freqtrade Hyperopt detected.")

View File

@ -265,10 +265,22 @@ class Configuration:
self._args_to_config(config, argname='epochs', self._args_to_config(config, argname='epochs',
logstring='Parameter --epochs detected ... ' logstring='Parameter --epochs detected ... '
'Will run Hyperopt with for {} epochs ...' 'Will run Hyperopt with for {} epochs ...')
) self._args_to_config(config,
argname='effort',
self._args_to_config(config, argname='spaces', logstring='Parameter --effort detected ... '
'Parameter --effort detected: {}')
self._args_to_config(config,
argname='mode',
logstring='Hyperopt will run in {} mode ...')
self._args_to_config(config,
argname='explore',
logstring='Acquisition strategy set to random {}...')
self._args_to_config(config,
argname='n_points',
logstring='Optimizers will be asked for {} points...')
self._args_to_config(config,
argname='spaces',
logstring='Parameter -s/--spaces detected: {}') logstring='Parameter -s/--spaces detected: {}')
self._args_to_config(config, argname='print_all', self._args_to_config(config, argname='print_all',

View File

@ -9,7 +9,11 @@ from typing import List, Tuple
DEFAULT_CONFIG = 'config.json' DEFAULT_CONFIG = 'config.json'
DEFAULT_EXCHANGE = 'bittrex' DEFAULT_EXCHANGE = 'bittrex'
PROCESS_THROTTLE_SECS = 5 # sec PROCESS_THROTTLE_SECS = 5 # sec
HYPEROPT_EPOCH = 100 # epochs HYPEROPT_EPOCH = 0 # epochs
HYPEROPT_EFFORT = 0. # tune max epoch count
HYPEROPT_N_POINTS = 1 # tune iterations between estimations
HYPEROPT_MODE = 'single'
HYPEROPT_LIE_STRAT = 'default'
RETRY_TIMEOUT = 30 # sec RETRY_TIMEOUT = 30 # sec
DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss' DEFAULT_HYPEROPT_LOSS = 'DefaultHyperOptLoss'
DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite' DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite'

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
from typing import Any, Dict, List, Tuple
from queue import Queue
from multiprocessing.managers import SyncManager
hyperopt: Any = None
manager: SyncManager
# stores the optimizers in multi opt mode
optimizers: Queue
# stores the results to share between optimizers
# in the form of key = Tuple[Xi], value = Tuple[float, int]
# where float is the loss and int is a decreasing counter of optimizers
# that have registered the result
results_shared: Dict[Tuple, Tuple]
# in single mode the results_list is used to pass the results to the optimizer
# to fit new models
results_list: List
# results_batch stores keeps results per batch that are eventually logged and stored
results_batch: Queue

View File

@ -482,6 +482,7 @@ def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None:
def test_save_results_saves_epochs(mocker, hyperopt, testdatadir, caplog) -> None: def test_save_results_saves_epochs(mocker, hyperopt, testdatadir, caplog) -> None:
epochs = create_results(mocker, hyperopt, testdatadir) epochs = create_results(mocker, hyperopt, testdatadir)
mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None) mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None)
mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.save_opts')
results_file = testdatadir / 'optimize' / 'ut_results.pickle' results_file = testdatadir / 'optimize' / 'ut_results.pickle'
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
@ -529,7 +530,7 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None:
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'loss': 1, 'results_explanation': 'foo result',
'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0}, 'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0},
@ -564,8 +565,11 @@ def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None:
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called 3 times, from:
assert dumper.call_count == 2 # 1 tickerdata
# 1 save_trials
# 1 save_opts
assert dumper.call_count == 3
assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_sell")
assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt.backtesting.strategy, "advise_buy")
assert hasattr(hyperopt, "max_open_trades") assert hasattr(hyperopt, "max_open_trades")
@ -686,8 +690,9 @@ def test_buy_strategy_generator(hyperopt, testdatadir) -> None:
assert 1 in result['buy'] assert 1 in result['buy']
def test_generate_optimizer(mocker, default_conf) -> None: def test_backtest_params(mocker, default_conf) -> None:
default_conf.update({'config': 'config.json.example', default_conf.update({
'config': 'config.json.example',
'hyperopt': 'DefaultHyperOpt', 'hyperopt': 'DefaultHyperOpt',
'timerange': None, 'timerange': None,
'spaces': 'all', 'spaces': 'all',
@ -792,8 +797,8 @@ def test_generate_optimizer(mocker, default_conf) -> None:
hyperopt = Hyperopt(default_conf) hyperopt = Hyperopt(default_conf)
hyperopt.dimensions = hyperopt.hyperopt_space() hyperopt.dimensions = hyperopt.hyperopt_space()
generate_optimizer_value = hyperopt.generate_optimizer(list(optimizer_param.values())) backtest_params_value = hyperopt.backtest_params(list(optimizer_param.values()))
assert generate_optimizer_value == response_expected assert backtest_params_value == response_expected
def test_clean_hyperopt(mocker, default_conf, caplog): def test_clean_hyperopt(mocker, default_conf, caplog):
@ -809,7 +814,8 @@ def test_clean_hyperopt(mocker, default_conf, caplog):
unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock())
h = Hyperopt(default_conf) h = Hyperopt(default_conf)
assert unlinkmock.call_count == 2 # once for tickerdata, once for trials, once for optimizers (list)
assert unlinkmock.call_count == 3
assert log_has(f"Removing `{h.data_pickle_file}`.", caplog) assert log_has(f"Removing `{h.data_pickle_file}`.", caplog)
@ -841,7 +847,7 @@ def test_print_json_spaces_all(mocker, default_conf, caplog, capsys) -> None:
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {}, 'loss': 1, 'results_explanation': 'foo result', 'params': {},
'params_details': { 'params_details': {
@ -886,8 +892,11 @@ def test_print_json_spaces_all(mocker, default_conf, caplog, capsys) -> None:
) )
assert result_str in out # noqa: E501 assert result_str in out # noqa: E501
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called 3 times from:
assert dumper.call_count == 2 # 1 tickerdata
# 1 save_trials
# 1 save_opts
assert dumper.call_count == 3
def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None: def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None:
@ -900,7 +909,7 @@ def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {}, 'loss': 1, 'results_explanation': 'foo result', 'params': {},
'params_details': { 'params_details': {
@ -940,8 +949,11 @@ def test_print_json_spaces_default(mocker, default_conf, caplog, capsys) -> None
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501 assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called three times, from:
assert dumper.call_count == 2 # 1 tickerdata
# 1 save_trials
# 1 save_opts
assert dumper.call_count == 3
def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) -> None: def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) -> None:
@ -954,7 +966,7 @@ def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) ->
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {}, 'loss': 1, 'results_explanation': 'foo result', 'params': {},
'params_details': {'roi': {}, 'stoploss': {'stoploss': None}}, 'params_details': {'roi': {}, 'stoploss': {'stoploss': None}},
@ -990,8 +1002,11 @@ def test_print_json_spaces_roi_stoploss(mocker, default_conf, caplog, capsys) ->
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert '{"minimal_roi":{},"stoploss":null}' in out assert '{"minimal_roi":{},"stoploss":null}' in out
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called three times from:
assert dumper.call_count == 2 # 1 for tickerdata
# 1 for save_trials
# 1 for save_opts
assert dumper.call_count == 3
def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys) -> None: def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys) -> None:
@ -1004,7 +1019,7 @@ def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys)
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0}, 'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0},
'results_metrics': 'results_metrics':
@ -1042,8 +1057,11 @@ def test_simplified_interface_roi_stoploss(mocker, default_conf, caplog, capsys)
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called three times, from:
assert dumper.call_count == 2 # 1 for tickerdata
# 1 for save_trials
# 1 for save_opts
assert dumper.call_count == 3
assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_sell")
assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt.backtesting.strategy, "advise_buy")
assert hasattr(hyperopt, "max_open_trades") assert hasattr(hyperopt, "max_open_trades")
@ -1092,7 +1110,7 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None:
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {}, 'loss': 1, 'results_explanation': 'foo result', 'params': {},
'results_metrics': 'results_metrics':
@ -1119,7 +1137,7 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None:
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={}) hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
# TODO: sell_strategy_generator() is actually not called because # TODO: sell_strategy_generator() is actually not called because
# run_optimizer_parallel() is mocked # run_backtest_parallel() is mocked
del hyperopt.custom_hyperopt.__class__.sell_strategy_generator del hyperopt.custom_hyperopt.__class__.sell_strategy_generator
del hyperopt.custom_hyperopt.__class__.sell_indicator_space del hyperopt.custom_hyperopt.__class__.sell_indicator_space
@ -1130,8 +1148,11 @@ def test_simplified_interface_buy(mocker, default_conf, caplog, capsys) -> None:
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called three times, from:
assert dumper.call_count == 2 # 1 tickerdata
# 1 save_trials
# 1 save_opts
assert dumper.call_count == 3
assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_sell")
assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt.backtesting.strategy, "advise_buy")
assert hasattr(hyperopt, "max_open_trades") assert hasattr(hyperopt, "max_open_trades")
@ -1149,7 +1170,7 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None
) )
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_backtest_parallel',
MagicMock(return_value=[{ MagicMock(return_value=[{
'loss': 1, 'results_explanation': 'foo result', 'params': {}, 'loss': 1, 'results_explanation': 'foo result', 'params': {},
'results_metrics': 'results_metrics':
@ -1176,7 +1197,7 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={}) hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
# TODO: buy_strategy_generator() is actually not called because # TODO: buy_strategy_generator() is actually not called because
# run_optimizer_parallel() is mocked # run_backtest_parallel() is mocked
del hyperopt.custom_hyperopt.__class__.buy_strategy_generator del hyperopt.custom_hyperopt.__class__.buy_strategy_generator
del hyperopt.custom_hyperopt.__class__.indicator_space del hyperopt.custom_hyperopt.__class__.indicator_space
@ -1187,8 +1208,11 @@ def test_simplified_interface_sell(mocker, default_conf, caplog, capsys) -> None
out, err = capsys.readouterr() out, err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
# Should be called twice, once for historical candle data, once to save evaluations # Should be called three times, from:
assert dumper.call_count == 2 # 1 tickerdata
# 1 save_trials
# 1 save_opt s
assert dumper.call_count == 3
assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_sell")
assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt.backtesting.strategy, "advise_buy")
assert hasattr(hyperopt, "max_open_trades") assert hasattr(hyperopt, "max_open_trades")