fix generic reward, add time duration to reward

This commit is contained in:
robcaulk
2022-08-23 14:58:38 +02:00
parent 280a1dc3f8
commit b26ed7dea4
5 changed files with 43 additions and 45 deletions

View File

@@ -3,7 +3,6 @@ from typing import Any, Dict # , Tuple
# import numpy.typing as npt
import torch as th
import numpy as np
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
@@ -47,30 +46,36 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
class MyRLEnv(Base5ActionRLEnv):
"""
User can modify any part of the environment by overriding base
functions
User can override any function in BaseRLEnv and gym.Env. Here the user
sets a custom reward based on profit and trade duration.
"""
def calculate_reward(self, action):
if self._last_trade_tick is None:
return 0.
pnl = self.get_unrealized_profit()
max_trade_duration = self.rl_config['max_trade_duration_candles']
trade_duration = self._current_tick - self._last_trade_tick
factor = 1
if trade_duration <= max_trade_duration:
factor *= 1.5
elif trade_duration > max_trade_duration:
factor *= 0.5
# close long
if action == Actions.Long_exit.value and self._position == Positions.Long:
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
factor = 1
if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr:
factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float((np.log(current_price) - np.log(last_trade_price)) * factor)
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
# close short
if action == Actions.Short_exit.value and self._position == Positions.Short:
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
factor = 1
if self.close_trade_profit and self.close_trade_profit[-1] > self.profit_aim * self.rr:
factor = self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(np.log(last_trade_price) - np.log(current_price) * factor)
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
return 0.