From caa47a2f47f6c6ce936a0762fee5bbaa39fc492d Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 28 Sep 2022 03:06:05 +0000 Subject: [PATCH 1/8] close subproc env on shutdown --- freqtrade/freqai/freqai_interface.py | 9 +++++++++ .../ReinforcementLearner_multiproc.py | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 1a847a25e..f8ca34ddb 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -158,6 +158,13 @@ class IFreqaiModel(ABC): self.model = None self.dk = None + def _on_stop(self): + """ + Callback for Subclasses to override to include logic for shutting down resources + when SIGINT is sent. + """ + return + def shutdown(self): """ Cleans up threads on Shutdown, set stop event. Join threads to wait @@ -166,6 +173,8 @@ class IFreqaiModel(ABC): logger.info("Stopping FreqAI") self._stop_event.set() + self._on_stop() + logger.info("Waiting on Training iteration") for _thread in self._threads: _thread.join() diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 0e6449dcd..efdd4883c 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,7 +73,7 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"]["thread_count"]) + num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i @@ -88,3 +88,15 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + + def _on_stop(self): + """ + Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. + """ + + if hasattr(self, "train_env") and self.train_env: + self.train_env.close() + + if hasattr(self, "eval_env") and self.eval_env: + self.eval_env.close() \ No newline at end of file From 9e36b0d2ea89a1bbbcf6aab411727a9ccedb4c32 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:02:33 -0600 Subject: [PATCH 2/8] fix formatting --- .../freqai/prediction_models/ReinforcementLearner_multiproc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index efdd4883c..034c752e7 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -89,7 +89,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - def _on_stop(self): """ Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. @@ -99,4 +98,4 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): self.train_env.close() if hasattr(self, "eval_env") and self.eval_env: - self.eval_env.close() \ No newline at end of file + self.eval_env.close() From 099137adaca3d81f5e5cada2cb70ea159ee6ffa1 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Tue, 27 Sep 2022 22:35:15 -0600 Subject: [PATCH 3/8] remove hasattr calls --- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 034c752e7..d01c409c3 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -94,8 +94,8 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown. """ - if hasattr(self, "train_env") and self.train_env: + if self.train_env: self.train_env.close() - if hasattr(self, "eval_env") and self.eval_env: + if self.eval_env: self.eval_env.close() From 83343dc2f11988cc2ee384ebdcba2731d156e26d Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 29 Sep 2022 00:10:18 +0200 Subject: [PATCH 4/8] control number of threads, update doc --- docs/freqai.md | 2 +- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 +++- freqtrade/freqai/data_kitchen.py | 6 +++++- freqtrade/freqai/freqai_interface.py | 2 ++ .../prediction_models/ReinforcementLearner_multiproc.py | 5 ++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index 938fb70f4..20562aadc 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -131,7 +131,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | | *Reinforcement Learning Parameters** | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model.
**Datatype:** Dictionary. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points.
**Datatype:** Integer. -| `thread_count` | Number of threads to dedicate to the Reinforcement Learning training process.
**Datatype:** int. +| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process.
**Datatype:** int. | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()`
**Datatype:** int. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website)
**Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3
**Datatype:** string. diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 70b3e58ef..8785192f4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - th.set_num_threads(self.freqai_info['rl_config'].get('thread_count', 4)) + self.max_threads = max(self.freqai_info['rl_config'].get( + 'cpu_count', 0), int(self.max_system_threads / 2)) + th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None self.eval_env: Union[SubprocVecEnv, gym.Env] = None diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 005005368..9f84e63b7 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import psutil from pandas import DataFrame from scipy import stats from sklearn import linear_model @@ -95,7 +96,10 @@ class FreqaiDataKitchen: ) self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) - self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1) + if not self.freqai_config.get("data_kitchen_thread_count", 0): + self.thread_count = int(psutil.cpu_count() * 2 - 2) + else: + self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame() self.unique_classes: Dict[str, list] = {} self.unique_class_list: list = [] diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index f8ca34ddb..5fe3c318c 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -11,6 +11,7 @@ from typing import Any, Dict, List, Optional, Tuple import numpy as np import pandas as pd +import psutil from numpy.typing import NDArray from pandas import DataFrame @@ -96,6 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None + self.max_system_threads = int(psutil.cpu_count() * 2 - 2) def __getstate__(self): """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index d01c409c3..a644c0c04 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -73,18 +73,17 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel): test_df = data_dictionary["test_features"] env_id = "train_env" - num_cpu = int(self.freqai_info["rl_config"].get("cpu_count", 2)) self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i - in range(num_cpu)]) + in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) From dcf6ebe273729bf9634c44804f016941805d68d9 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 00:37:03 +0200 Subject: [PATCH 5/8] Update BaseReinforcementLearningModel.py --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8785192f4..33568fa0b 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -39,7 +39,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) - self.max_threads = max(self.freqai_info['rl_config'].get( + self.max_threads = min(self.freqai_info['rl_config'].get( 'cpu_count', 0), int(self.max_system_threads / 2)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] From 555cc4263003fc57599896f912beba66a46376b1 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:00:09 +0200 Subject: [PATCH 6/8] Ensure 1 thread is available (for testing purposes) --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 33568fa0b..705c35297 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -40,7 +40,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def __init__(self, **kwargs): super().__init__(config=kwargs['config']) self.max_threads = min(self.freqai_info['rl_config'].get( - 'cpu_count', 0), int(self.max_system_threads / 2)) + 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[SubprocVecEnv, gym.Env] = None From 7ef56e30296ad3a32fb88a01feb58b5b9b236944 Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:01:22 +0200 Subject: [PATCH 7/8] Ensure at least 1 thread is available --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 5fe3c318c..44535f191 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -97,7 +97,7 @@ class IFreqaiModel(ABC): self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() self.strategy: Optional[IStrategy] = None - self.max_system_threads = int(psutil.cpu_count() * 2 - 2) + self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) def __getstate__(self): """ From 6e74d46660ac47aa44fc26a5c1c439d88d96576e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Thu, 29 Sep 2022 14:02:00 +0200 Subject: [PATCH 8/8] Ensure 1 thread available --- freqtrade/freqai/data_kitchen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 9f84e63b7..73717abce 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -97,7 +97,7 @@ class FreqaiDataKitchen: self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) if not self.freqai_config.get("data_kitchen_thread_count", 0): - self.thread_count = int(psutil.cpu_count() * 2 - 2) + self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1) else: self.thread_count = self.freqai_config["data_kitchen_thread_count"] self.train_dates: DataFrame = pd.DataFrame()