From ad56c45ecd6ccc821296d99a9f27a0a01e3f3e9c Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Tue, 15 Nov 2022 10:43:28 +0100 Subject: [PATCH] Add total profit to the monitor file and add monitor for the trainingf This can help for beginners to see what is happening inside of the training, and helps them understand how much profit of their agent did under the period of training and eval. --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index d0ddce294..776578da3 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -140,18 +140,18 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - self.train_env = self.MyRLEnv(df=train_df, + self.train_env = Monitor(self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider) + dp=self.data_provider), info_keywords=('total_profit')) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider)) + dp=self.data_provider), info_keywords=('total_profit')) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path))