add documentation for tensorboard_log, change how users interact with tensorboard_log

This commit is contained in:
robcaulk 2022-12-11 15:31:29 +01:00
parent cb8fc3c8c7
commit 0fd8e214e4
5 changed files with 57 additions and 7 deletions

View File

@ -247,6 +247,32 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th
![tensorboard](assets/tensorboard.jpg)
### Custom logging
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
```py
class MyRLEnv(Base5ActionRLEnv):
"""
User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function.
"""
def calculate_reward(self, action: int) -> float:
if not self._is_valid(action):
self.tensorboard_log("is_valid")
return -2
```
!!! Note
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`.
### Choosing a base environment
FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 4 or 5 actions. In the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Meanwhile, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:

View File

@ -48,7 +48,7 @@ class Base4ActionRLEnv(BaseEnvironment):
self._update_unrealized_total_profit()
step_reward = self.calculate_reward(action)
self.total_reward += step_reward
self.tensorboard_metrics[self.actions._member_names_[action]] += 1
self.tensorboard_log(self.actions._member_names_[action])
trade_type = None
if self.is_tradesignal(action):

View File

@ -49,7 +49,7 @@ class Base5ActionRLEnv(BaseEnvironment):
self._update_unrealized_total_profit()
step_reward = self.calculate_reward(action)
self.total_reward += step_reward
self.tensorboard_metrics[self.actions._member_names_[action]] += 1
self.tensorboard_log(self.actions._member_names_[action])
trade_type = None
if self.is_tradesignal(action):

View File

@ -2,7 +2,7 @@ import logging
import random
from abc import abstractmethod
from enum import Enum
from typing import Optional, Type
from typing import Optional, Type, Union
import gym
import numpy as np
@ -132,14 +132,37 @@ class BaseEnvironment(gym.Env):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def tensorboard_log(self, metric: str, inc: Union[int, float] = 1):
"""
Function builds the tensorboard_metrics dictionary
to be parsed by the TensorboardCallback. This
function is designed for tracking incremented objects,
events, actions inside the training environment.
For example, a user can call this to track the
frequency of occurence of an `is_valid` call in
their `calculate_reward()`:
def calculate_reward(self, action: int) -> float:
if not self._is_valid(action):
self.tensorboard_log("is_valid")
return -2
:param metric: metric to be tracked and incremented
:param inc: value to increment `metric` by
"""
if metric not in self.tensorboard_metrics:
self.tensorboard_metrics[metric] = inc
else:
self.tensorboard_metrics[metric] += inc
def reset_tensorboard_log(self):
self.tensorboard_metrics = {}
def reset(self):
"""
Reset is called at the beginning of every episode
"""
# tensorboard_metrics is used for episodic reports and tensorboard logging
self.tensorboard_metrics: dict = {}
for action in self.actions:
self.tensorboard_metrics[action.name] = 0
self.reset_tensorboard_log()
self._done = False

View File

@ -100,6 +100,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
"""
# first, penalize if the action is not valid
if not self._is_valid(action):
self.tensorboard_log("is_valid")
return -2
pnl = self.get_unrealized_profit()