add multiproc fix flake8
This commit is contained in:
parent
469aa0d43f
commit
d6f45a12ae
@ -102,7 +102,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||
for action in Actions:
|
||||
self.custom_info[f"{action.name}"] = 0
|
||||
return super().reset()
|
||||
|
||||
|
||||
def step(self, action: int):
|
||||
observation, step_reward, done, info = super().step(action)
|
||||
info = dict(
|
||||
@ -134,7 +134,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||
factor = 100.
|
||||
|
||||
# reward agent for entering trades
|
||||
if (action ==Actions.Long_enter.value
|
||||
if (action == Actions.Long_enter.value
|
||||
and self._position == Positions.Neutral):
|
||||
self.custom_info[f"{Actions.Long_enter.name}"] += 1
|
||||
return 25
|
||||
@ -174,6 +174,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
self.custom_info[f"{Actions.Short_exit.name}"] += 1
|
||||
return float(pnl * factor)
|
||||
|
||||
|
||||
self.custom_info["Unknown"] += 1
|
||||
return 0.
|
||||
|
@ -8,7 +8,7 @@ from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import TensorboardCallback, make_env
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -49,3 +49,5 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
|
||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||
render=False, eval_freq=len(train_df),
|
||||
best_model_save_path=str(dk.data_path))
|
||||
|
||||
self.tensorboard_callback = TensorboardCallback()
|
||||
|
Loading…
Reference in New Issue
Block a user