Merge pull request #8291 from freqtrade/allow-ohlc-removal

allow user to drop ohlc from features in RL
This commit is contained in:
Matthias 2023-03-08 21:04:34 +01:00 committed by GitHub
commit 29dfb5c169
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 31 additions and 16 deletions

View File

@ -84,6 +84,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
### Additional parameters

View File

@ -176,9 +176,11 @@ As you begin to modify the strategy and the prediction model, you will quickly r
factor = 100
pair = self.pair.replace(':', '')
# you can use feature values from dataframe
# Assumes the shifted RSI indicator has been generated in the strategy.
rsi_now = self.raw_features[f"%-rsi-period-10_shift-1_{self.pair}_"
rsi_now = self.raw_features[f"%-rsi-period-10_shift-1_{pair}_"
f"{self.config['timeframe']}"].iloc[self._current_tick]
# reward agent for entering trades

View File

@ -588,6 +588,7 @@ CONF_SCHEMA = {
"rl_config": {
"type": "object",
"properties": {
"drop_ohlc_from_features": {"type": "boolean", "default": False},
"train_cycles": {"type": "integer"},
"max_trade_duration_candles": {"type": "integer"},
"add_state_info": {"type": "boolean", "default": False},

View File

@ -114,6 +114,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
# normalize all data based on train_dataset only
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
data_dictionary = dk.normalize_data(data_dictionary)
# data cleaning/analysis
@ -148,12 +149,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
env_info = self.pack_env_dict(dk.pair)
self.train_env = self.MyRLEnv(df=train_df,
prices=prices_train,
**env_info)
self.eval_env = Monitor(self.MyRLEnv(df=test_df,
prices=prices_test,
**env_info))
self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info)
self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path))
@ -238,6 +235,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
filtered_dataframe, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_dataframe = self.drop_ohlc_from_df(filtered_dataframe, dk)
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
dk.data_dictionary["prediction_features"] = filtered_dataframe
@ -285,7 +285,6 @@ class BaseReinforcementLearningModel(IFreqaiModel):
train_df = data_dictionary["train_features"]
test_df = data_dictionary["test_features"]
# %-raw_volume_gen_shift-2_ETH/USDT_1h
# price data for model training and evaluation
tf = self.config['timeframe']
rename_dict = {'%-raw_open': 'open', '%-raw_low': 'low',
@ -318,8 +317,24 @@ class BaseReinforcementLearningModel(IFreqaiModel):
prices_test.rename(columns=rename_dict, inplace=True)
prices_test.reset_index(drop=True)
train_df = self.drop_ohlc_from_df(train_df, dk)
test_df = self.drop_ohlc_from_df(test_df, dk)
return prices_train, prices_test
def drop_ohlc_from_df(self, df: DataFrame, dk: FreqaiDataKitchen):
"""
Given a dataframe, drop the ohlc data
"""
drop_list = ['%-raw_open', '%-raw_low', '%-raw_high', '%-raw_close']
if self.rl_config["drop_ohlc_from_features"]:
df.drop(drop_list, axis=1, inplace=True)
feature_list = dk.training_features_list
dk.training_features_list = [e for e in feature_list if e not in drop_list]
return df
def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any:
"""
Can be used by user if they are trying to limit_ram_usage *and*

View File

@ -78,7 +78,9 @@ def make_rl_config(conf):
"rr": 1,
"profit_aim": 0.02,
"win_reward_factor": 2
}}
},
"drop_ohlc_from_features": False
}
return conf

View File

@ -68,13 +68,6 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
freqai_conf['freqai']['feature_parameters'].update({"shuffle_after_split": shuffle})
freqai_conf['freqai']['feature_parameters'].update({"buffer_train_data_candles": buffer})
if 'ReinforcementLearner' in model:
model_save_ext = 'zip'
freqai_conf = make_rl_config(freqai_conf)
# test the RL guardrails
freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True})
freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True})
if 'ReinforcementLearner' in model:
model_save_ext = 'zip'
freqai_conf = make_rl_config(freqai_conf)
@ -84,6 +77,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
if 'test_3ac' in model or 'test_4ac' in model:
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)