fix skipped mac test, fix RL bug in add_state_info, fix use of __import__, revise doc

This commit is contained in:
robcaulk 2022-11-17 21:50:02 +01:00
parent bf4d5b432a
commit 60fcd8dce2
3 changed files with 5 additions and 5 deletions

View File

@ -20,7 +20,7 @@ With the current framework, we aim to expose the training environment via the co
We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configruation settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely.
The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework (albeit with some basic assistance from core-dev). For the environment, it is inheriting from `gym.env` which means that a user would need to write an entirely new environment if they wish to switch to a different library. The framework is built on stable_baselines3 (torch) and openai gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library.
## Running Reinforcement Learning ## Running Reinforcement Learning

View File

@ -1,3 +1,4 @@
import importlib
import logging import logging
from abc import abstractmethod from abc import abstractmethod
from datetime import datetime, timezone from datetime import datetime, timezone
@ -58,8 +59,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
f'sb3_contrib. please choose one of {SB3_MODELS} or ' f'sb3_contrib. please choose one of {SB3_MODELS} or '
f'{SB3_CONTRIB_MODELS}') f'{SB3_CONTRIB_MODELS}')
mod = __import__(import_str, fromlist=[ mod = importlib.import_module(import_str, self.model_type)
self.model_type])
self.MODELCLASS = getattr(mod, self.model_type) self.MODELCLASS = getattr(mod, self.model_type)
self.policy_type = self.freqai_info['rl_config']['policy_type'] self.policy_type = self.freqai_info['rl_config']['policy_type']
self.unset_outlier_removal() self.unset_outlier_removal()
@ -236,7 +236,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
def _predict(window): def _predict(window):
observations = dataframe.iloc[window.index] observations = dataframe.iloc[window.index]
if self.live and self.rl_config('add_state_info', False): if self.live and self.rl_config.get('add_state_info', False):
market_side, current_profit, trade_duration = self.get_state_info(dk.pair) market_side, current_profit, trade_duration = self.get_state_info(dk.pair)
observations['current_profit_pct'] = current_profit observations['current_profit_pct'] = current_profit
observations['position'] = market_side observations['position'] = market_side

View File

@ -196,7 +196,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
if is_arm() and "Catboost" in model: if is_arm() and "Catboost" in model:
pytest.skip("CatBoost is not supported on ARM") pytest.skip("CatBoost is not supported on ARM")
if is_mac(): if is_mac() and 'Reinforcement' in model:
pytest.skip("Reinforcement learning module not available on intel based Mac OS") pytest.skip("Reinforcement learning module not available on intel based Mac OS")
Trade.use_db = False Trade.use_db = False