reduce freqai testing time by reducing retrain frequency and number of features
This commit is contained in:
parent
44b042ba51
commit
8855e36f57
@ -27,10 +27,9 @@ def freqai_conf(default_conf, tmpdir):
|
||||
"timerange": "20180110-20180115",
|
||||
"freqai": {
|
||||
"enabled": True,
|
||||
"startup_candles": 10000,
|
||||
"purge_old_models": True,
|
||||
"train_period_days": 2,
|
||||
"backtest_period_days": 2,
|
||||
"backtest_period_days": 10,
|
||||
"live_retrain_hours": 0,
|
||||
"expiration_hours": 1,
|
||||
"identifier": "uniqe-id100",
|
||||
|
@ -192,13 +192,13 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
||||
@pytest.mark.parametrize(
|
||||
"model, num_files, strat",
|
||||
[
|
||||
("LightGBMRegressor", 6, "freqai_test_strat"),
|
||||
("XGBoostRegressor", 6, "freqai_test_strat"),
|
||||
("CatboostRegressor", 6, "freqai_test_strat"),
|
||||
("ReinforcementLearner", 7, "freqai_rl_test_strat"),
|
||||
("XGBoostClassifier", 6, "freqai_test_classifier"),
|
||||
("LightGBMClassifier", 6, "freqai_test_classifier"),
|
||||
("CatboostClassifier", 6, "freqai_test_classifier")
|
||||
("LightGBMRegressor", 2, "freqai_test_strat"),
|
||||
("XGBoostRegressor", 2, "freqai_test_strat"),
|
||||
("CatboostRegressor", 2, "freqai_test_strat"),
|
||||
("ReinforcementLearner", 3, "freqai_rl_test_strat"),
|
||||
("XGBoostClassifier", 2, "freqai_test_classifier"),
|
||||
("LightGBMClassifier", 2, "freqai_test_classifier"),
|
||||
("CatboostClassifier", 2, "freqai_test_classifier")
|
||||
],
|
||||
)
|
||||
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):
|
||||
@ -305,7 +305,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
||||
|
||||
assert len(model_folders) == 6
|
||||
assert len(model_folders) == 2
|
||||
|
||||
# without deleting the existing folder structure, re-run
|
||||
|
||||
@ -333,7 +333,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
||||
|
||||
path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder)
|
||||
prediction_files = [x for x in path.iterdir() if x.is_file()]
|
||||
assert len(prediction_files) == 5
|
||||
assert len(prediction_files) == 1
|
||||
|
||||
shutil.rmtree(Path(freqai.dk.full_path))
|
||||
|
||||
|
@ -1,57 +1,19 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
import torch as th
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.RL.Base4ActionRLEnv import Actions, Base4ActionRLEnv, Positions
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReinforcementLearner_test_4ac(BaseReinforcementLearningModel):
|
||||
class ReinforcementLearner_test_4ac(ReinforcementLearner):
|
||||
"""
|
||||
User created Reinforcement Learning Model prediction model.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||
|
||||
train_df = data_dictionary["train_features"]
|
||||
total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df)
|
||||
|
||||
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||
net_arch=[64, 64])
|
||||
|
||||
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||
tensorboard_log=Path(
|
||||
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
||||
**self.freqai_info['model_training_parameters']
|
||||
)
|
||||
else:
|
||||
logger.info('Continual training activated - starting training from previously '
|
||||
'trained agent.')
|
||||
model = self.dd.model_dictionary[dk.pair]
|
||||
model.set_env(self.train_env)
|
||||
|
||||
model.learn(
|
||||
total_timesteps=int(total_timesteps),
|
||||
callback=self.eval_callback
|
||||
)
|
||||
|
||||
if Path(dk.data_path / "best_model.zip").is_file():
|
||||
logger.info('Callback found a best model.')
|
||||
best_model = self.MODELCLASS.load(dk.data_path / "best_model")
|
||||
return best_model
|
||||
|
||||
logger.info('Couldnt find best model, using final model instead.')
|
||||
|
||||
return model
|
||||
|
||||
class MyRLEnv(Base4ActionRLEnv):
|
||||
"""
|
||||
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||
|
@ -19,19 +19,6 @@ class freqai_rl_test_strat(IStrategy):
|
||||
|
||||
minimal_roi = {"0": 0.1, "240": -1}
|
||||
|
||||
plot_config = {
|
||||
"main_plot": {},
|
||||
"subplots": {
|
||||
"prediction": {"prediction": {"color": "blue"}},
|
||||
"target_roi": {
|
||||
"target_roi": {"color": "brown"},
|
||||
},
|
||||
"do_predict": {
|
||||
"do_predict": {"color": "brown"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
process_only_new_candles = True
|
||||
stoploss = -0.05
|
||||
use_exit_signal = True
|
||||
@ -50,10 +37,7 @@ class freqai_rl_test_strat(IStrategy):
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
# FIXME: add these outside the user strategy?
|
||||
# The following columns are necessary for RL models.
|
||||
informative[f"%-{pair}raw_close"] = informative["close"]
|
||||
informative[f"%-{pair}raw_open"] = informative["open"]
|
||||
@ -79,9 +63,6 @@ class freqai_rl_test_strat(IStrategy):
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||
|
||||
# For RL, there are no direct targets to set. This is filler (neutral)
|
||||
# until the agent sends an action.
|
||||
df["&-action"] = 0
|
||||
|
Loading…
Reference in New Issue
Block a user