expose environment reward parameters to the user config

This commit is contained in:
robcaulk
2022-08-21 20:33:09 +02:00
parent d88a0dbf82
commit 29f0e01c4a
5 changed files with 28 additions and 32 deletions

View File

@@ -62,12 +62,12 @@ class ReinforcementLearner_multiproc(BaseReinforcementLearningModel):
env_id = "train_env"
num_cpu = int(self.freqai_info["data_kitchen_thread_count"] / 2)
self.train_env = SubprocVecEnv([make_env(env_id, i, 1, train_df, prices_train,
self.reward_params, self.CONV_WIDTH) for i
self.reward_params, self.CONV_WIDTH, config=self.config) for i
in range(num_cpu)])
eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(eval_env_id, i, 1, test_df, prices_test,
self.reward_params, self.CONV_WIDTH, monitor=True) for i
self.reward_params, self.CONV_WIDTH, monitor=True, config=self.config) for i
in range(num_cpu)])
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,