add dp to multiproc

This commit is contained in:
robcaulk
2022-12-14 18:22:20 +01:00
parent 350cebb0a8
commit 2285ca7d2a
2 changed files with 6 additions and 4 deletions

View File

@@ -37,14 +37,14 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
env_id = "train_env"
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train,
self.reward_params, self.CONV_WIDTH, monitor=True,
config=self.config) for i
config=self.config, dp=self.data_provider) for i
in range(self.max_threads)])
eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
test_df, prices_test,
self.reward_params, self.CONV_WIDTH, monitor=True,
config=self.config) for i
config=self.config, dp=self.data_provider) for i
in range(self.max_threads)])
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),