expose environment reward parameters to the user config
This commit is contained in:
@@ -57,26 +57,20 @@ class MyRLEnv(Base5ActionRLEnv):
|
||||
|
||||
# close long
|
||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||
last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open)
|
||||
return float(np.log(current_price) - np.log(last_trade_price))
|
||||
|
||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||
if self.close_trade_profit[-1] > self.profit_aim * self.rr:
|
||||
last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open)
|
||||
return float((np.log(current_price) - np.log(last_trade_price)) * 2)
|
||||
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
|
||||
factor = 1
|
||||
if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr:
|
||||
factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float((np.log(current_price) - np.log(last_trade_price)) * factor)
|
||||
|
||||
# close short
|
||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||
last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open)
|
||||
return float(np.log(last_trade_price) - np.log(current_price))
|
||||
|
||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||
if self.close_trade_profit[-1] > self.profit_aim * self.rr:
|
||||
last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open)
|
||||
return float((np.log(last_trade_price) - np.log(current_price)) * 2)
|
||||
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
|
||||
factor = 1
|
||||
if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr:
|
||||
factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(np.log(last_trade_price) - np.log(current_price) * factor)
|
||||
|
||||
return 0.
|
||||
|
@@ -62,12 +62,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
|
||||
env_id = "train_env"
|
||||
num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2)
|
||||
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
|
||||
self.reward_params, self.CONV_WIDTH) for i
|
||||
self.reward_params, self.CONV_WIDTH, config=self.config) for i
|
||||
in range(num_cpu)])
|
||||
|
||||
eval_env_id = 'eval_env'
|
||||
self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test,
|
||||
self.reward_params, self.CONV_WIDTH, monitor=True) for i
|
||||
self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i
|
||||
in range(num_cpu)])
|
||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||
render=False, eval_freq=eval_freq,
|
||||
|
Reference in New Issue
Block a user