fix multiproc callback, add continual learning to multiproc, fix totalprofit bug in env, set eval_freq automatically, improve default reward

This commit is contained in:
robcaulk
2022-08-25 11:46:18 +02:00
parent d1bee29b1e
commit 94cfc8e63f
6 changed files with 58 additions and 62 deletions

View File

@@ -27,7 +27,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
policy_kwargs = dict(activation_fn=th.nn.ReLU,
net_arch=[512, 512, 256])
if dk.pair not in self.dd.model_dictionary or not self.continual_retraining:
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
tensorboard_log=Path(dk.data_path / "tensorboard"),
**self.freqai_info['model_training_parameters']
@@ -61,7 +61,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
"""
train_df = data_dictionary["train_features"]
test_df = data_dictionary["test_features"]
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
self.train_env = MyRLEnv(df=train_df, prices=prices_train, window_size=self.CONV_WIDTH,
reward_kwargs=self.reward_params, config=self.config)
@@ -69,7 +68,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
window_size=self.CONV_WIDTH,
reward_kwargs=self.reward_params, config=self.config))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path))
@@ -83,18 +82,19 @@ class MyRLEnv(Base5ActionRLEnv):
# first, penalize if the action is not valid
if not self._is_valid(action):
return -15
return -2
pnl = self.get_unrealized_profit()
rew = np.sign(pnl) * (pnl + 1)
factor = 100
# reward agent for entering trades
if action in (Actions.Long_enter.value, Actions.Short_enter.value):
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
and self._position == Positions.Neutral:
return 25
# discourage agent from not entering trades
if action == Actions.Neutral.value and self._position == Positions.Neutral:
return -15
return -1
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
trade_duration = self._current_tick - self._last_trade_tick
@@ -105,8 +105,8 @@ class MyRLEnv(Base5ActionRLEnv):
factor *= 0.5
# discourage sitting in position
if self._position in (Positions.Short, Positions.Long):
return -50 * trade_duration / max_trade_duration
if self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value:
return -1 * trade_duration / max_trade_duration
# close long
if action == Actions.Long_exit.value and self._position == Positions.Long:

View File

@@ -26,12 +26,19 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
# model arch
policy_kwargs = dict(activation_fn=th.nn.ReLU,
net_arch=[512, 512, 256])
net_arch=[256, 256])
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
tensorboard_log=Path(dk.full_path / "tensorboard"),
**self.freqai_info['model_training_parameters']
)
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
tensorboard_log=Path(dk.full_path / "tensorboard"),
**self.freqai_info['model_training_parameters']
)
else:
logger.info('Continual training activated - starting training from previously '
'trained agent.')
model = self.dd.model_dictionary[dk.pair]
model.tensorboard_log = Path(dk.data_path / "tensorboard")
model.set_env(self.train_env)
model.learn(
total_timesteps=int(total_timesteps),
@@ -57,30 +64,18 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
test_df = data_dictionary["test_features"]
eval_freq = self.freqai_info["rl_config"]["eval_cycles"] * len(test_df)
# environments
if not self.train_env:
env_id = "train_env"
num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2)
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
self.reward_params, self.CONV_WIDTH,
config=self.config) for i
in range(num_cpu)])
env_id = "train_env"
num_cpu = int(self.freqai_info["rl_config"]["thread_count"] / 2)
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
self.reward_params, self.CONV_WIDTH,
config=self.config) for i
in range(num_cpu)])
eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test,
self.reward_params, self.CONV_WIDTH, monitor=True,
config=self.config) for i
in range(num_cpu)])
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=dk.data_path)
else:
self.train_env.env_method('reset')
self.eval_env.env_method('reset')
self.train_env.env_method('reset_env', train_df, prices_train,
self.CONV_WIDTH, self.reward_params)
self.eval_env.env_method('reset_env', train_df, prices_train,
self.CONV_WIDTH, self.reward_params)
self.eval_callback.__init__(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=dk.data_path)
eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test,
self.reward_params, self.CONV_WIDTH, monitor=True,
config=self.config) for i
in range(num_cpu)])
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=dk.data_path)