fix multiproc callback, add continual learning to multiproc, fix totalprofit bug in env, set eval_freq automatically, improve default reward
This commit is contained in:
parent
d1bee29b1e
commit
94cfc8e63f
@ -56,9 +56,9 @@
|
|||||||
"freqai": {
|
"freqai": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"model_save_type": "stable_baselines",
|
"model_save_type": "stable_baselines",
|
||||||
"conv_width": 10,
|
"conv_width": 4,
|
||||||
"purge_old_models": true,
|
"purge_old_models": true,
|
||||||
"train_period_days": 10,
|
"train_period_days": 5,
|
||||||
"backtest_period_days": 2,
|
"backtest_period_days": 2,
|
||||||
"identifier": "unique-id",
|
"identifier": "unique-id",
|
||||||
"data_kitchen_thread_count": 2,
|
"data_kitchen_thread_count": 2,
|
||||||
@ -72,7 +72,7 @@
|
|||||||
"30m"
|
"30m"
|
||||||
],
|
],
|
||||||
"indicator_max_period_candles": 10,
|
"indicator_max_period_candles": 10,
|
||||||
"indicator_periods_candles": [5, 10]
|
"indicator_periods_candles": [5]
|
||||||
},
|
},
|
||||||
"data_split_parameters": {
|
"data_split_parameters": {
|
||||||
"test_size": 0.5,
|
"test_size": 0.5,
|
||||||
@ -85,13 +85,13 @@
|
|||||||
"verbose": 1
|
"verbose": 1
|
||||||
},
|
},
|
||||||
"rl_config": {
|
"rl_config": {
|
||||||
"train_cycles": 3,
|
"train_cycles": 6,
|
||||||
"eval_cycles": 3,
|
|
||||||
"thread_count": 4,
|
"thread_count": 4,
|
||||||
"max_trade_duration_candles": 100,
|
"max_trade_duration_candles": 300,
|
||||||
"model_type": "PPO",
|
"model_type": "PPO",
|
||||||
"policy_type": "MlpPolicy",
|
"policy_type": "MlpPolicy",
|
||||||
"continual_retraining": true,
|
"continual_learning": false,
|
||||||
|
"max_training_drawdown_pct": 0.5,
|
||||||
"model_reward_parameters": {
|
"model_reward_parameters": {
|
||||||
"rr": 1,
|
"rr": 1,
|
||||||
"profit_aim": 0.02,
|
"profit_aim": 0.02,
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
# Example of a 3 action environment.
|
||||||
|
|
||||||
# import logging
|
# import logging
|
||||||
# from enum import Enum
|
# from enum import Enum
|
||||||
|
|
||||||
|
@ -77,8 +77,7 @@ class Base5ActionRLEnv(gym.Env):
|
|||||||
self._position = Positions.Neutral
|
self._position = Positions.Neutral
|
||||||
self._position_history: list = [None]
|
self._position_history: list = [None]
|
||||||
self.total_reward: float = 0
|
self.total_reward: float = 0
|
||||||
self._total_profit: float = 0
|
self._total_profit: float = 1
|
||||||
self._first_rendering: bool = False
|
|
||||||
self.history: dict = {}
|
self.history: dict = {}
|
||||||
self.trade_history: list = []
|
self.trade_history: list = []
|
||||||
|
|
||||||
@ -101,7 +100,6 @@ class Base5ActionRLEnv(gym.Env):
|
|||||||
|
|
||||||
self.total_reward = 0.
|
self.total_reward = 0.
|
||||||
self._total_profit = 1. # unit
|
self._total_profit = 1. # unit
|
||||||
self._first_rendering = True
|
|
||||||
self.history = {}
|
self.history = {}
|
||||||
self.trade_history = []
|
self.trade_history = []
|
||||||
self.portfolio_log_returns = np.zeros(len(self.prices))
|
self.portfolio_log_returns = np.zeros(len(self.prices))
|
||||||
@ -165,7 +163,7 @@ class Base5ActionRLEnv(gym.Env):
|
|||||||
{'price': self.current_price(), 'index': self._current_tick,
|
{'price': self.current_price(), 'index': self._current_tick,
|
||||||
'type': trade_type})
|
'type': trade_type})
|
||||||
|
|
||||||
if self._total_profit < 0.5:
|
if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8):
|
||||||
self._done = True
|
self._done = True
|
||||||
|
|
||||||
self._position_history.append(self._position)
|
self._position_history.append(self._position)
|
||||||
@ -293,7 +291,6 @@ class Base5ActionRLEnv(gym.Env):
|
|||||||
return 0.
|
return 0.
|
||||||
|
|
||||||
def _update_profit(self, action):
|
def _update_profit(self, action):
|
||||||
# if self._is_trade(action) or self._done:
|
|
||||||
if self._is_trade(action) or self._done:
|
if self._is_trade(action) or self._done:
|
||||||
pnl = self.get_unrealized_profit()
|
pnl = self.get_unrealized_profit()
|
||||||
|
|
||||||
|
@ -23,8 +23,8 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
torch.multiprocessing.set_sharing_strategy('file_system')
|
torch.multiprocessing.set_sharing_strategy('file_system')
|
||||||
|
|
||||||
SB3_MODELS = ['PPO', 'A2C', 'DQN', 'TD3', 'SAC']
|
SB3_MODELS = ['PPO', 'A2C', 'DQN']
|
||||||
SB3_CONTRIB_MODELS = ['TRPO', 'ARS']
|
SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO']
|
||||||
|
|
||||||
|
|
||||||
class BaseReinforcementLearningModel(IFreqaiModel):
|
class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
@ -41,7 +41,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
self.eval_callback: EvalCallback = None
|
self.eval_callback: EvalCallback = None
|
||||||
self.model_type = self.freqai_info['rl_config']['model_type']
|
self.model_type = self.freqai_info['rl_config']['model_type']
|
||||||
self.rl_config = self.freqai_info['rl_config']
|
self.rl_config = self.freqai_info['rl_config']
|
||||||
self.continual_retraining = self.rl_config.get('continual_retraining', False)
|
self.continual_learning = self.rl_config.get('continual_learning', False)
|
||||||
if self.model_type in SB3_MODELS:
|
if self.model_type in SB3_MODELS:
|
||||||
import_str = 'stable_baselines3'
|
import_str = 'stable_baselines3'
|
||||||
elif self.model_type in SB3_CONTRIB_MODELS:
|
elif self.model_type in SB3_CONTRIB_MODELS:
|
||||||
@ -109,7 +109,6 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
"""
|
"""
|
||||||
train_df = data_dictionary["train_features"]
|
train_df = data_dictionary["train_features"]
|
||||||
test_df = data_dictionary["test_features"]
|
test_df = data_dictionary["test_features"]
|
||||||
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
|
|
||||||
|
|
||||||
self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
|
self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
|
||||||
reward_kwargs=self.reward_params, config=self.config)
|
reward_kwargs=self.reward_params, config=self.config)
|
||||||
@ -117,7 +116,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
window_size=self.CONV_WIDTH,
|
window_size=self.CONV_WIDTH,
|
||||||
reward_kwargs=self.reward_params, config=self.config))
|
reward_kwargs=self.reward_params, config=self.config))
|
||||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
render=False, eval_freq=eval_freq,
|
render=False, eval_freq=len(train_df),
|
||||||
best_model_save_path=str(dk.data_path))
|
best_model_save_path=str(dk.data_path))
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -138,6 +137,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
|||||||
for trade in open_trades:
|
for trade in open_trades:
|
||||||
if trade.pair == pair:
|
if trade.pair == pair:
|
||||||
# FIXME: mypy typing doesnt like that strategy may be "None" (it never will be)
|
# FIXME: mypy typing doesnt like that strategy may be "None" (it never will be)
|
||||||
|
# FIXME: get_rate and trade_udration shouldn't work with backtesting,
|
||||||
|
# we need to use candle dates and prices to compute that.
|
||||||
current_value = self.strategy.dp._exchange.get_rate(
|
current_value = self.strategy.dp._exchange.get_rate(
|
||||||
pair, refresh=False, side="exit", is_short=trade.is_short)
|
pair, refresh=False, side="exit", is_short=trade.is_short)
|
||||||
openrate = trade.open_rate
|
openrate = trade.open_rate
|
||||||
@ -256,7 +257,7 @@ def make_env(env_id: str, rank: int, seed: int, train_df: DataFrame, price: Data
|
|||||||
env = MyRLEnv(df=train_df, prices=price, window_size=window_size,
|
env = MyRLEnv(df=train_df, prices=price, window_size=window_size,
|
||||||
reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config)
|
reward_kwargs=reward_params, id=env_id, seed=seed + rank, config=config)
|
||||||
if monitor:
|
if monitor:
|
||||||
env = Monitor(env, ".")
|
env = Monitor(env)
|
||||||
return env
|
return env
|
||||||
set_random_seed(seed)
|
set_random_seed(seed)
|
||||||
return _init
|
return _init
|
||||||
@ -272,18 +273,19 @@ class MyRLEnv(Base5ActionRLEnv):
|
|||||||
|
|
||||||
# first, penalize if the action is not valid
|
# first, penalize if the action is not valid
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
return -15
|
return -2
|
||||||
|
|
||||||
pnl = self.get_unrealized_profit()
|
pnl = self.get_unrealized_profit()
|
||||||
rew = np.sign(pnl) * (pnl + 1)
|
rew = np.sign(pnl) * (pnl + 1)
|
||||||
factor = 100
|
factor = 100
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
if action in (Actions.Long_enter.value, Actions.Short_enter.value):
|
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
||||||
|
and self._position == Positions.Neutral:
|
||||||
return 25
|
return 25
|
||||||
# discourage agent from not entering trades
|
# discourage agent from not entering trades
|
||||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
return -15
|
return -1
|
||||||
|
|
||||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
trade_duration = self._current_tick - self._last_trade_tick
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
@ -294,8 +296,8 @@ class MyRLEnv(Base5ActionRLEnv):
|
|||||||
factor *= 0.5
|
factor *= 0.5
|
||||||
|
|
||||||
# discourage sitting in position
|
# discourage sitting in position
|
||||||
if self._position in (Positions.Short, Positions.Long):
|
if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value:
|
||||||
return -50 * trade_duration / max_trade_duration
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
|
||||||
# close long
|
# close long
|
||||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
|
@ -27,7 +27,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
|||||||
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
net_arch=[512, 512, 256])
|
net_arch=[512, 512, 256])
|
||||||
|
|
||||||
if dk.pair not in self.dd.model_dictionary or not self.continual_retraining:
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
tensorboard_log=Path(dk.data_path / "tensorboard"),
|
tensorboard_log=Path(dk.data_path / "tensorboard"),
|
||||||
**self.freqai_info['model_training_parameters']
|
**self.freqai_info['model_training_parameters']
|
||||||
@ -61,7 +61,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
|||||||
"""
|
"""
|
||||||
train_df = data_dictionary["train_features"]
|
train_df = data_dictionary["train_features"]
|
||||||
test_df = data_dictionary["test_features"]
|
test_df = data_dictionary["test_features"]
|
||||||
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
|
|
||||||
|
|
||||||
self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
|
self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
|
||||||
reward_kwargs=self.reward_params, config=self.config)
|
reward_kwargs=self.reward_params, config=self.config)
|
||||||
@ -69,7 +68,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
|||||||
window_size=self.CONV_WIDTH,
|
window_size=self.CONV_WIDTH,
|
||||||
reward_kwargs=self.reward_params, config=self.config))
|
reward_kwargs=self.reward_params, config=self.config))
|
||||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
render=False, eval_freq=eval_freq,
|
render=False, eval_freq=len(train_df),
|
||||||
best_model_save_path=str(dk.data_path))
|
best_model_save_path=str(dk.data_path))
|
||||||
|
|
||||||
|
|
||||||
@ -83,18 +82,19 @@ class MyRLEnv(Base5ActionRLEnv):
|
|||||||
|
|
||||||
# first, penalize if the action is not valid
|
# first, penalize if the action is not valid
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
return -15
|
return -2
|
||||||
|
|
||||||
pnl = self.get_unrealized_profit()
|
pnl = self.get_unrealized_profit()
|
||||||
rew = np.sign(pnl) * (pnl + 1)
|
rew = np.sign(pnl) * (pnl + 1)
|
||||||
factor = 100
|
factor = 100
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
if action in (Actions.Long_enter.value, Actions.Short_enter.value):
|
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
||||||
|
and self._position == Positions.Neutral:
|
||||||
return 25
|
return 25
|
||||||
# discourage agent from not entering trades
|
# discourage agent from not entering trades
|
||||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
return -15
|
return -1
|
||||||
|
|
||||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
trade_duration = self._current_tick - self._last_trade_tick
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
@ -105,8 +105,8 @@ class MyRLEnv(Base5ActionRLEnv):
|
|||||||
factor *= 0.5
|
factor *= 0.5
|
||||||
|
|
||||||
# discourage sitting in position
|
# discourage sitting in position
|
||||||
if self._position in (Positions.Short, Positions.Long):
|
if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value:
|
||||||
return -50 * trade_duration / max_trade_duration
|
return -1 * trade_duration / max_trade_duration
|
||||||
|
|
||||||
# close long
|
# close long
|
||||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
|
@ -26,12 +26,19 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
|
|||||||
|
|
||||||
# model arch
|
# model arch
|
||||||
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
net_arch=[512, 512, 256])
|
net_arch=[256, 256])
|
||||||
|
|
||||||
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
tensorboard_log=Path(dk.full_path / "tensorboard"),
|
tensorboard_log=Path(dk.full_path / "tensorboard"),
|
||||||
**self.freqai_info['model_training_parameters']
|
**self.freqai_info['model_training_parameters']
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
logger.info('Continual training activated - starting training from previously '
|
||||||
|
'trained agent.')
|
||||||
|
model = self.dd.model_dictionary[dk.pair]
|
||||||
|
model.tensorboard_log = Path(dk.data_path / "tensorboard")
|
||||||
|
model.set_env(self.train_env)
|
||||||
|
|
||||||
model.learn(
|
model.learn(
|
||||||
total_timesteps=int(total_timesteps),
|
total_timesteps=int(total_timesteps),
|
||||||
@ -57,8 +64,6 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
|
|||||||
test_df = data_dictionary["test_features"]
|
test_df = data_dictionary["test_features"]
|
||||||
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
|
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
|
||||||
|
|
||||||
# environments
|
|
||||||
if not self.train_env:
|
|
||||||
env_id = "train_env"
|
env_id = "train_env"
|
||||||
num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2)
|
num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2)
|
||||||
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
|
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
|
||||||
@ -74,13 +79,3 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
|
|||||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
render=False, eval_freq=eval_freq,
|
render=False, eval_freq=eval_freq,
|
||||||
best_model_save_path=dk.data_path)
|
best_model_save_path=dk.data_path)
|
||||||
else:
|
|
||||||
self.train_env.env_method('reset')
|
|
||||||
self.eval_env.env_method('reset')
|
|
||||||
self.train_env.env_method('reset_env', train_df, prices_train,
|
|
||||||
self.CONV_WIDTH, self.reward_params)
|
|
||||||
self.eval_env.env_method('reset_env', train_df, prices_train,
|
|
||||||
self.CONV_WIDTH, self.reward_params)
|
|
||||||
self.eval_callback.__init__(self.eval_env, deterministic=True,
|
|
||||||
render=False, eval_freq=eval_freq,
|
|
||||||
best_model_save_path=dk.data_path)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user