2022-08-25 19:40:16 +00:00
|
|
|
import logging
|
|
|
|
from enum import Enum
|
|
|
|
|
|
|
|
from gym import spaces
|
2022-08-28 17:21:57 +00:00
|
|
|
|
|
|
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
|
|
|
|
|
|
|
|
2022-08-25 19:40:16 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class Actions(Enum):
|
|
|
|
Neutral = 0
|
|
|
|
Exit = 1
|
|
|
|
Long_enter = 2
|
|
|
|
Short_enter = 3
|
|
|
|
|
|
|
|
|
2022-08-28 17:21:57 +00:00
|
|
|
class Base4ActionRLEnv(BaseEnvironment):
|
2022-08-25 19:40:16 +00:00
|
|
|
"""
|
2022-08-28 17:21:57 +00:00
|
|
|
Base class for a 4 action environment
|
2022-08-25 19:40:16 +00:00
|
|
|
"""
|
2022-12-05 19:22:54 +00:00
|
|
|
def __init__(self, **kwargs):
|
|
|
|
super().__init__(**kwargs)
|
2022-12-04 12:54:30 +00:00
|
|
|
self.actions = Actions
|
2022-08-25 19:40:16 +00:00
|
|
|
|
2022-08-28 17:21:57 +00:00
|
|
|
def set_action_space(self):
|
2022-08-25 19:40:16 +00:00
|
|
|
self.action_space = spaces.Discrete(len(Actions))
|
|
|
|
|
|
|
|
def step(self, action: int):
|
2022-09-23 17:17:27 +00:00
|
|
|
"""
|
|
|
|
Logic for a single step (incrementing one candle in time)
|
|
|
|
by the agent
|
|
|
|
:param: action: int = the action type that the agent plans
|
|
|
|
to take for the current step.
|
|
|
|
:returns:
|
|
|
|
observation = current state of environment
|
|
|
|
step_reward = the reward from `calculate_reward()`
|
|
|
|
_done = if the agent "died" or if the candles finished
|
|
|
|
info = dict passed back to openai gym lib
|
|
|
|
"""
|
2022-08-25 19:40:16 +00:00
|
|
|
self._done = False
|
|
|
|
self._current_tick += 1
|
|
|
|
|
|
|
|
if self._current_tick == self._end_tick:
|
|
|
|
self._done = True
|
|
|
|
|
2022-09-04 09:21:54 +00:00
|
|
|
self._update_unrealized_total_profit()
|
2022-08-25 19:40:16 +00:00
|
|
|
step_reward = self.calculate_reward(action)
|
|
|
|
self.total_reward += step_reward
|
2022-12-11 14:31:29 +00:00
|
|
|
self.tensorboard_log(self.actions._member_names_[action])
|
2022-08-25 19:40:16 +00:00
|
|
|
|
|
|
|
trade_type = None
|
|
|
|
if self.is_tradesignal(action):
|
|
|
|
"""
|
|
|
|
Action: Neutral, position: Long -> Close Long
|
|
|
|
Action: Neutral, position: Short -> Close Short
|
|
|
|
|
|
|
|
Action: Long, position: Neutral -> Open Long
|
|
|
|
Action: Long, position: Short -> Close Short and Open Long
|
|
|
|
|
|
|
|
Action: Short, position: Neutral -> Open Short
|
|
|
|
Action: Short, position: Long -> Close Long and Open Short
|
|
|
|
"""
|
|
|
|
|
|
|
|
if action == Actions.Neutral.value:
|
|
|
|
self._position = Positions.Neutral
|
|
|
|
trade_type = "neutral"
|
|
|
|
self._last_trade_tick = None
|
|
|
|
elif action == Actions.Long_enter.value:
|
|
|
|
self._position = Positions.Long
|
|
|
|
trade_type = "long"
|
|
|
|
self._last_trade_tick = self._current_tick
|
|
|
|
elif action == Actions.Short_enter.value:
|
|
|
|
self._position = Positions.Short
|
|
|
|
trade_type = "short"
|
|
|
|
self._last_trade_tick = self._current_tick
|
|
|
|
elif action == Actions.Exit.value:
|
2022-11-13 12:41:17 +00:00
|
|
|
self._update_total_profit()
|
2022-11-26 20:35:20 +00:00
|
|
|
self._position = Positions.Neutral
|
2022-08-25 19:40:16 +00:00
|
|
|
trade_type = "neutral"
|
|
|
|
self._last_trade_tick = None
|
|
|
|
else:
|
|
|
|
print("case not defined")
|
|
|
|
|
|
|
|
if trade_type is not None:
|
|
|
|
self.trade_history.append(
|
|
|
|
{'price': self.current_price(), 'index': self._current_tick,
|
|
|
|
'type': trade_type})
|
|
|
|
|
|
|
|
if self._total_profit < 1 - self.rl_config.get('max_training_drawdown_pct', 0.8):
|
|
|
|
self._done = True
|
|
|
|
|
|
|
|
self._position_history.append(self._position)
|
|
|
|
|
|
|
|
info = dict(
|
|
|
|
tick=self._current_tick,
|
2022-12-04 12:54:30 +00:00
|
|
|
action=action,
|
2022-08-25 19:40:16 +00:00
|
|
|
total_reward=self.total_reward,
|
|
|
|
total_profit=self._total_profit,
|
2022-12-04 12:54:30 +00:00
|
|
|
position=self._position.value,
|
|
|
|
trade_duration=self.get_trade_duration(),
|
|
|
|
current_profit_pct=self.get_unrealized_profit()
|
2022-08-25 19:40:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
observation = self._get_observation()
|
|
|
|
|
|
|
|
self._update_history(info)
|
|
|
|
|
|
|
|
return observation, step_reward, self._done, info
|
|
|
|
|
2022-11-26 12:03:07 +00:00
|
|
|
def is_tradesignal(self, action: int) -> bool:
|
2022-08-25 19:40:16 +00:00
|
|
|
"""
|
|
|
|
Determine if the signal is a trade signal
|
|
|
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
|
|
|
"""
|
|
|
|
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or
|
|
|
|
(action == Actions.Neutral.value and self._position == Positions.Short) or
|
|
|
|
(action == Actions.Neutral.value and self._position == Positions.Long) or
|
|
|
|
(action == Actions.Short_enter.value and self._position == Positions.Short) or
|
2022-08-28 17:21:57 +00:00
|
|
|
(action == Actions.Short_enter.value and self._position == Positions.Long) or
|
2022-08-25 19:40:16 +00:00
|
|
|
(action == Actions.Exit.value and self._position == Positions.Neutral) or
|
|
|
|
(action == Actions.Long_enter.value and self._position == Positions.Long) or
|
|
|
|
(action == Actions.Long_enter.value and self._position == Positions.Short))
|
|
|
|
|
2022-11-26 12:03:07 +00:00
|
|
|
def _is_valid(self, action: int) -> bool:
|
2022-08-25 19:40:16 +00:00
|
|
|
"""
|
|
|
|
Determine if the signal is valid.
|
|
|
|
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
|
|
|
"""
|
|
|
|
# Agent should only try to exit if it is in position
|
2022-08-28 17:21:57 +00:00
|
|
|
if action == Actions.Exit.value:
|
2022-08-25 19:40:16 +00:00
|
|
|
if self._position not in (Positions.Short, Positions.Long):
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Agent should only try to enter if it is not in position
|
|
|
|
if action in (Actions.Short_enter.value, Actions.Long_enter.value):
|
|
|
|
if self._position != Positions.Neutral:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|