From 2285ca7d2a214c811c17e371e4780216d70760dc Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Dec 2022 18:22:20 +0100 Subject: [PATCH 1/6] add dp to multiproc --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 6 ++++-- .../prediction_models/ReinforcementLearner_multiproc.py | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 5e9b81108..b77f21d58 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -24,6 +24,7 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade +from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) @@ -384,7 +385,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}) -> Callable: + config: Dict[str, Any] = {}, dp: DataProvider = None) -> Callable: """ Utility function for multiprocessed env. @@ -398,7 +399,8 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, def _init() -> gym.Env: env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config) + reward_kwargs=reward_params, id=env_id, seed=seed + rank, + config=config, dp=dp) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 32a2a2076..c9b824978 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -37,14 +37,14 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): env_id = "train_env" self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i + config=self.config, dp=self.data_provider) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config) for i + config=self.config, dp=self.data_provider) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From dac1c8ab894c345649e158a14105bac8f76e2c35 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 14 Dec 2022 18:28:52 +0100 Subject: [PATCH 2/6] fix isort --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b77f21d58..0231124ff 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,6 +17,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -24,7 +25,6 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade -from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) From 2018da07677f6343bef7a28eb8c4782032fbb508 Mon Sep 17 00:00:00 2001 From: Emre Date: Wed, 14 Dec 2022 22:03:05 +0300 Subject: [PATCH 3/6] Add env_info dict to base environment --- freqtrade/freqai/RL/BaseEnvironment.py | 17 +++++------------ .../freqai/RL/BaseReinforcementLearningModel.py | 16 +++++++++++----- .../ReinforcementLearner_multiproc.py | 11 +++++++++-- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5a5a950e7..887910006 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -11,9 +11,6 @@ from gym import spaces from gym.utils import seeding from pandas import DataFrame -from freqtrade.data.dataprovider import DataProvider -from freqtrade.enums import RunMode - logger = logging.getLogger(__name__) @@ -48,7 +45,7 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, id: str = 'baseenv-1', seed: int = 1, config: dict = {}, - dp: Optional[DataProvider] = None): + env_info: dict = {}): """ Initializes the training/eval environment. :param df: dataframe of features @@ -59,7 +56,7 @@ class BaseEnvironment(gym.Env): :param id: string id of the environment (used in backend for multiprocessed env) :param seed: Sets the seed of the environment higher in the gym.Env object :param config: Typical user configuration file - :param dp: dataprovider from freqtrade + :param env_info: Environment info dictionary, used to pass live status, fee, etc. """ self.config = config self.rl_config = config['freqai']['rl_config'] @@ -71,17 +68,13 @@ class BaseEnvironment(gym.Env): self.compound_trades = config['stake_amount'] == 'unlimited' if self.config.get('fee', None) is not None: self.fee = self.config['fee'] - elif dp is not None: - self.fee = dp._exchange.get_fee(symbol=dp.current_whitelist()[0]) # type: ignore else: - self.fee = 0.0015 + self.fee = env_info.get('fee', 0.0015) # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.tensorboard_metrics: dict = {} - self.live: bool = False - if dp: - self.live = dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + self.live = env_info.get('live', False) if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") @@ -213,7 +206,7 @@ class BaseEnvironment(gym.Env): """ features_window = self.signal_features[( self._current_tick - self.window_size):self._current_tick] - if self.add_state_info and self.live: + if self.add_state_info: features_and_state = DataFrame(np.zeros((len(features_window), 3)), columns=['current_profit_pct', 'position', diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b77f21d58..a41f02cba 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,6 +17,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -24,7 +25,6 @@ from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade -from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) @@ -144,18 +144,24 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + env_info = {"live": False} + if self.data_provider: + env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider) + env_info=env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, window_size=self.CONV_WIDTH, reward_kwargs=self.reward_params, config=self.config, - dp=self.data_provider)) + env_info=env_info)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -385,7 +391,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}, dp: DataProvider = None) -> Callable: + config: Dict[str, Any] = {}, env_info: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. @@ -400,7 +406,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, env = MyRLEnv(df=train_df, prices=price, window_size=window_size, reward_kwargs=reward_params, id=env_id, seed=seed + rank, - config=config, dp=dp) + config=config, env_info=env_info) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index c9b824978..58735e78f 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -5,6 +5,7 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv +from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env @@ -34,17 +35,23 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] + env_info = {"live": False} + if self.data_provider: + env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_id = "train_env" self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, dp=self.data_provider) for i + config=self.config, env_info=env_info) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, dp=self.data_provider) for i + config=self.config, env_info=env_info) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From 3af2251ce86aee7a72fe659c6964338c412fadf7 Mon Sep 17 00:00:00 2001 From: Emre Date: Wed, 14 Dec 2022 22:03:23 +0300 Subject: [PATCH 4/6] Fix add_state_info backtesting bug --- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 887910006..49361cbde 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -62,8 +62,6 @@ class BaseEnvironment(gym.Env): self.rl_config = config['freqai']['rl_config'] self.add_state_info = self.rl_config.get('add_state_info', False) self.id = id - self.seed(seed) - self.reset_env(df, prices, window_size, reward_kwargs, starting_point) self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) self.compound_trades = config['stake_amount'] == 'unlimited' if self.config.get('fee', None) is not None: @@ -78,6 +76,8 @@ class BaseEnvironment(gym.Env): if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") + self.seed(seed) + self.reset_env(df, prices, window_size, reward_kwargs, starting_point) def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): From 7b4abd5ef50f3c6f84c6604fc1f79ff4b92c2575 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Dec 2022 12:25:33 +0100 Subject: [PATCH 5/6] use a dictionary to make code more readable --- freqtrade/freqai/RL/BaseEnvironment.py | 8 ++-- .../RL/BaseReinforcementLearningModel.py | 40 ++++++++++--------- .../ReinforcementLearner_multiproc.py | 18 ++++----- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 49361cbde..39e8609f5 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -44,8 +44,8 @@ class BaseEnvironment(gym.Env): def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}, - env_info: dict = {}): + id: str = 'baseenv-1', seed: int = 1, config: dict = {}, live: bool = False, + fee: float = 0.0015): """ Initializes the training/eval environment. :param df: dataframe of features @@ -67,12 +67,12 @@ class BaseEnvironment(gym.Env): if self.config.get('fee', None) is not None: self.fee = self.config['fee'] else: - self.fee = env_info.get('fee', 0.0015) + self.fee = fee # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.tensorboard_metrics: dict = {} - self.live = env_info.get('live', False) + self.live = live if not self.live and self.add_state_info: self.add_state_info = False logger.warning("add_state_info is not available in backtesting. Deactivating.") diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index a41f02cba..62963f194 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -17,7 +17,6 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel @@ -144,24 +143,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - env_info = {"live": False} - if self.data_provider: - env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) - env_info["fee"] = self.data_provider._exchange \ - .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_info = self.pack_env_dict() self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, - config=self.config, - env_info=env_info) + **env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, - window_size=self.CONV_WIDTH, - reward_kwargs=self.reward_params, - config=self.config, - env_info=env_info)) + **env_info)) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) @@ -169,6 +158,20 @@ class BaseReinforcementLearningModel(IFreqaiModel): actions = self.train_env.get_actions() self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) + def pack_env_dict(self) -> Dict[str, Any]: + """ + Create dictionary of environment arguments + """ + env_info = {"window_size": self.CONV_WIDTH, + "reward_kwargs": self.reward_params, + "config": self.config, + "live": self.live} + if self.data_provider: + env_info["fee"] = self.data_provider._exchange \ + .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + + return env_info + @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): """ @@ -390,8 +393,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, seed: int, train_df: DataFrame, price: DataFrame, - reward_params: Dict[str, int], window_size: int, monitor: bool = False, - config: Dict[str, Any] = {}, env_info: Dict[str, Any] = {}) -> Callable: + monitor: bool = False, + env_info: Dict[str, Any] = {}) -> Callable: """ Utility function for multiprocessed env. @@ -404,9 +407,8 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, def _init() -> gym.Env: - env = MyRLEnv(df=train_df, prices=price, window_size=window_size, - reward_kwargs=reward_params, id=env_id, seed=seed + rank, - config=config, env_info=env_info) + env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank, + **env_info) if monitor: env = Monitor(env) return env diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 58735e78f..a9be87b0b 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -5,7 +5,6 @@ from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv -from freqtrade.enums import RunMode from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env @@ -35,23 +34,20 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] - env_info = {"live": False} - if self.data_provider: - env_info["live"] = self.data_provider.runmode in (RunMode.DRY_RUN, RunMode.LIVE) - env_info["fee"] = self.data_provider._exchange \ - .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_info = self.pack_env_dict() env_id = "train_env" - self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, env_info=env_info) for i + self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, + train_df, prices_train, + monitor=True, + env_info=env_info) for i in range(self.max_threads)]) eval_env_id = 'eval_env' self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, - self.reward_params, self.CONV_WIDTH, monitor=True, - config=self.config, env_info=env_info) for i + monitor=True, + env_info=env_info) for i in range(self.max_threads)]) self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), From 581a5296cc7a76ea927eec9157559e426f170daa Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 15 Dec 2022 16:50:08 +0100 Subject: [PATCH 6/6] fix docstrings to reflect new env_info changes --- freqtrade/freqai/RL/BaseEnvironment.py | 3 ++- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 39e8609f5..17d82a3ba 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -56,7 +56,8 @@ class BaseEnvironment(gym.Env): :param id: string id of the environment (used in backend for multiprocessed env) :param seed: Sets the seed of the environment higher in the gym.Env object :param config: Typical user configuration file - :param env_info: Environment info dictionary, used to pass live status, fee, etc. + :param live: Whether or not this environment is active in dry/live/backtesting + :param fee: The fee to use for environmental interactions. """ self.config = config self.rl_config = config['freqai']['rl_config'] diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 62963f194..d7e3a3cad 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -402,6 +402,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess + :param env_info: (dict) all required arguments to instantiate the environment. :return: (Callable) """