From b0d2d13eb19a5a64a4bec8b5314d36544ec21a38 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 23 May 2022 21:05:05 +0200 Subject: [PATCH] improve data persistence/mapping for live/dry. This accommodates quick reloads after crash and handles multi-pair cleanly --- freqtrade/freqai/data_drawer.py | 59 +++++++++ freqtrade/freqai/data_kitchen.py | 123 ++++++++++-------- freqtrade/freqai/freqai_interface.py | 102 ++++++++++----- .../CatboostPredictionModel.py | 46 ------- 4 files changed, 199 insertions(+), 131 deletions(-) create mode 100644 freqtrade/freqai/data_drawer.py diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py new file mode 100644 index 000000000..a27a4b67f --- /dev/null +++ b/freqtrade/freqai/data_drawer.py @@ -0,0 +1,59 @@ + +import json +import logging +from pathlib import Path +from typing import Any, Dict, Tuple + +# import pickle as pk +import numpy as np + + +logger = logging.getLogger(__name__) + + +class FreqaiDataDrawer: + """ + Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving + /loading to/from disk. + This object remains persistent throughout live/dry, unlike FreqaiDataKitchen, which is + reinstantiated for each coin. + """ + def __init__(self, full_path: Path): + + # dictionary holding all pair metadata necessary to load in from disk + self.pair_dict: Dict[str, Any] = {} + # dictionary holding all actively inferenced models in memory given a model filename + self.model_dictionary: Dict[str, Any] = {} + self.full_path = full_path + self.load_drawer_from_disk() + + def load_drawer_from_disk(self): + exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists() + if exists: + with open(self.full_path / str('pair_dictionary.json'), "r") as fp: + self.pair_dict = json.load(fp) + else: + logger.info("Could not find existing datadrawer, starting from scratch") + return exists + + def save_drawer_to_disk(self): + with open(self.full_path / str('pair_dictionary.json'), "w") as fp: + json.dump(self.pair_dict, fp, default=self.np_encoder) + + def np_encoder(self, object): + if isinstance(object, np.generic): + return object.item() + + def get_pair_dict_info(self, metadata: dict) -> Tuple[str, int, bool]: + pair_in_dict = self.pair_dict.get(metadata['pair']) + if pair_in_dict: + model_filename = self.pair_dict[metadata['pair']]['model_filename'] + trained_timestamp = self.pair_dict[metadata['pair']]['trained_timestamp'] + coin_first = self.pair_dict[metadata['pair']]['first'] + else: + self.pair_dict[metadata['pair']] = {} + model_filename = self.pair_dict[metadata['pair']]['model_filename'] = '' + coin_first = self.pair_dict[metadata['pair']]['first'] = True + trained_timestamp = self.pair_dict[metadata['pair']]['trained_timestamp'] = 0 + + return model_filename, trained_timestamp, coin_first diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 148efd5dd..f5ddf8462 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -19,6 +19,7 @@ from sklearn.model_selection import train_test_split from freqtrade.configuration import TimeRange from freqtrade.data.history import load_pair_history from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data +from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.resolvers import ExchangeResolver from freqtrade.strategy.interface import IStrategy @@ -33,13 +34,13 @@ logger = logging.getLogger(__name__) class FreqaiDataKitchen: """ - Class designed to handle all the data for the IFreqaiModel class model. + Class designed to analyze data for a single pair. Employed by the IFreqaiModel class. Functionalities include holding, saving, loading, and analyzing the data. author: Robert Caulk, rob.caulk@gmail.com """ - def __init__(self, config: Dict[str, Any], dataframe: DataFrame, live: bool = False): - self.full_dataframe = dataframe + def __init__(self, config: Dict[str, Any], data_drawer: FreqaiDataDrawer, live: bool = False, + pair: str = ''): self.data: Dict[Any, Any] = {} self.data_dictionary: Dict[Any, Any] = {} self.config = config @@ -53,10 +54,10 @@ class FreqaiDataKitchen: self.full_do_predict: npt.ArrayLike = np.array([]) self.full_target_mean: npt.ArrayLike = np.array([]) self.full_target_std: npt.ArrayLike = np.array([]) - self.model_path = Path() + self.data_path = Path() self.model_filename: str = "" - self.model_dictionary: Dict[Any, Any] = {} self.live = live + self.pair = pair self.svm_model: linear_model.SGDOneClassSVM = None if not self.live: self.full_timerange = self.create_fulltimerange(self.config["timerange"], @@ -69,6 +70,8 @@ class FreqaiDataKitchen: config["freqai"]["backtest_period"], ) + self.data_drawer = data_drawer + def assert_config(self, config: Dict[str, Any], live: bool) -> None: assert config.get('freqai'), "No Freqai parameters found in config file." assert config.get('freqai', {}).get('train_period'), ("No Freqai train_period found in" @@ -88,18 +91,18 @@ class FreqaiDataKitchen: assert config.get('freqai', {}).get('feature_parameters'), ("No Freqai feature_parameters" "found in config file.") - def set_paths(self) -> None: + def set_paths(self, trained_timestamp: int = None) -> None: self.full_path = Path(self.config['user_data_dir'] / "models" / str(self.freqai_config.get('live_full_backtestrange') + self.freqai_config.get('identifier'))) - self.model_path = Path(self.full_path / str("sub-train" + "-" + - str(self.freqai_config.get('live_trained_timerange')))) + self.data_path = Path(self.full_path / str("sub-train" + "-" + self.pair.split("/")[0] + + str(trained_timestamp))) return - def save_data(self, model: Any) -> None: + def save_data(self, model: Any, coin: str = '') -> None: """ Saves all data associated with a model for a single sub-train time range :params: @@ -107,10 +110,10 @@ class FreqaiDataKitchen: predictions """ - if not self.model_path.is_dir(): - self.model_path.mkdir(parents=True, exist_ok=True) + if not self.data_path.is_dir(): + self.data_path.mkdir(parents=True, exist_ok=True) - save_path = Path(self.model_path) + save_path = Path(self.data_path) # Save the trained model dump(model, save_path / str(self.model_filename + "_model.joblib")) @@ -118,7 +121,7 @@ class FreqaiDataKitchen: if self.svm_model is not None: dump(self.svm_model, save_path / str(self.model_filename + "_svm_model.joblib")) - self.data["model_path"] = str(self.model_path) + self.data["data_path"] = str(self.data_path) self.data["model_filename"] = str(self.model_filename) self.data["training_features_list"] = list(self.data_dictionary["train_features"].columns) # store the metadata @@ -131,7 +134,10 @@ class FreqaiDataKitchen: ) if self.live: - self.model_dictionary[self.model_filename] = model + self.data_drawer.model_dictionary[self.model_filename] = model + self.data_drawer.pair_dict[coin]['model_filename'] = self.model_filename + self.data_drawer.pair_dict[coin]['data_path'] = str(self.data_path) + self.data_drawer.save_drawer_to_disk() # TODO add a helper function to let user save/load any data they are custom adding. We # do not want them having to edit the default save/load methods here. Below is an example @@ -148,19 +154,23 @@ class FreqaiDataKitchen: return - def load_data(self) -> Any: + def load_data(self, coin: str = '') -> Any: """ loads all data required to make a prediction on a sub-train time range :returns: :model: User trained model which can be inferenced for new predictions """ - with open(self.model_path / str(self.model_filename + "_metadata.json"), "r") as fp: + if self.live: + self.model_filename = self.data_drawer.pair_dict[coin]['model_filename'] + self.data_path = Path(self.data_drawer.pair_dict[coin]['data_path']) + + with open(self.data_path / str(self.model_filename + "_metadata.json"), "r") as fp: self.data = json.load(fp) self.training_features_list = self.data["training_features_list"] self.data_dictionary["train_features"] = pd.read_pickle( - self.model_path / str(self.model_filename + "_trained_df.pkl") + self.data_path / str(self.model_filename + "_trained_df.pkl") ) # TODO add a helper function to let user save/load any data they are custom adding. We @@ -169,34 +179,34 @@ class FreqaiDataKitchen: # if self.freqai_config.get('feature_parameters','determine_statistical_distributions'): # self.data_dictionary["upper_quantiles"] = pd.read_pickle( - # self.model_path / str(self.model_filename + "_upper_quantiles.pkl") + # self.data_path / str(self.model_filename + "_upper_quantiles.pkl") # ) # self.data_dictionary["lower_quantiles"] = pd.read_pickle( - # self.model_path / str(self.model_filename + "_lower_quantiles.pkl") + # self.data_path / str(self.model_filename + "_lower_quantiles.pkl") # ) - self.model_path = Path(self.data["model_path"]) - self.model_filename = self.data["model_filename"] + # self.data_path = Path(self.data["data_path"]) + # self.model_filename = self.data["model_filename"] # try to access model in memory instead of loading object from disk to save time - if self.live and self.model_filename in self.model_dictionary: - model = self.model_dictionary[self.model_filename] + if self.live and self.model_filename in self.data_drawer.model_dictionary: + model = self.data_drawer.model_dictionary[self.model_filename] else: - model = load(self.model_path / str(self.model_filename + "_model.joblib")) + model = load(self.data_path / str(self.model_filename + "_model.joblib")) - if Path(self.model_path / str(self.model_filename + + if Path(self.data_path / str(self.model_filename + "_svm_model.joblib")).resolve().exists(): - self.svm_model = load(self.model_path / str(self.model_filename + "_svm_model.joblib")) + self.svm_model = load(self.data_path / str(self.model_filename + "_svm_model.joblib")) assert model, ( f"Unable to load model, ensure model exists at " - f"{self.model_path} " + f"{self.data_path} " ) if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: self.pca = pk.load( - open(self.model_path / str(self.model_filename + "_pca_object.pkl"), "rb") + open(self.data_path / str(self.model_filename + "_pca_object.pkl"), "rb") ) return model @@ -539,9 +549,9 @@ class FreqaiDataKitchen: logger.info(f'PCA reduced total features from {n_components} to {n_keep_components}') - if not self.model_path.is_dir(): - self.model_path.mkdir(parents=True, exist_ok=True) - pk.dump(pca2, open(self.model_path / str(self.model_filename + "_pca_object.pkl"), "wb")) + if not self.data_path.is_dir(): + self.data_path.mkdir(parents=True, exist_ok=True) + pk.dump(pca2, open(self.data_path / str(self.model_filename + "_pca_object.pkl"), "wb")) return None @@ -717,40 +727,51 @@ class FreqaiDataKitchen: return full_timerange - def check_if_new_training_required(self, trained_timerange: TimeRange, - metadata: dict) -> Tuple[bool, TimeRange]: + def check_if_new_training_required(self, trained_timestamp: int) -> Tuple[bool, TimeRange]: time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp() - - if trained_timerange.startts != 0: - elapsed_time = (time - trained_timerange.stopts) / SECONDS_IN_DAY + trained_timerange = TimeRange() + if trained_timestamp != 0: + elapsed_time = (time - trained_timestamp) / SECONDS_IN_DAY retrain = elapsed_time > self.freqai_config.get('backtest_period') if retrain: - trained_timerange.startts += self.freqai_config.get( - 'backtest_period', 0) * SECONDS_IN_DAY - trained_timerange.stopts += self.freqai_config.get( - 'backtest_period', 0) * SECONDS_IN_DAY + trained_timerange.startts = int(time - self.freqai_config.get( + 'backtest_period', 0) * SECONDS_IN_DAY) + trained_timerange.stopts = int(time) else: # user passed no live_trained_timerange in config - trained_timerange = TimeRange() trained_timerange.startts = int(time - self.freqai_config.get('train_period') * SECONDS_IN_DAY) trained_timerange.stopts = int(time) retrain = True - if retrain: - coin, _ = metadata['pair'].split("/") - # set the new model_path - self.model_path = Path(self.full_path / str("sub-train" + "-" + - str(int(trained_timerange.stopts)))) + # if retrain: + # coin, _ = metadata['pair'].split("/") + # # set the new data_path + # self.data_path = Path(self.full_path / str("sub-train" + "-" + + # str(int(trained_timerange.stopts)))) - self.model_filename = "cb_" + coin.lower() + "_" + str(int(trained_timerange.stopts)) - # this is not persistent at the moment TODO - self.freqai_config['live_trained_timerange'] = str(int(trained_timerange.stopts)) - # enables persistence, but not fully implemented into save/load data yer - self.data['live_trained_timerange'] = str(int(trained_timerange.stopts)) + # self.model_filename = "cb_" + coin.lower() + "_" + str(int(trained_timerange.stopts)) + # # this is not persistent at the moment TODO + # self.freqai_config['live_trained_timerange'] = str(int(trained_timerange.stopts)) + # # enables persistence, but not fully implemented into save/load data yer + # self.data['live_trained_timerange'] = str(int(trained_timerange.stopts)) return retrain, trained_timerange + def set_new_model_names(self, metadata: dict, trained_timerange: TimeRange): + + coin, _ = metadata['pair'].split("/") + # set the new data_path + self.data_path = Path(self.full_path / str("sub-train" + "-" + + metadata['pair'].split("/")[0] + + str(int(trained_timerange.stopts)))) + + self.model_filename = "cb_" + coin.lower() + "_" + str(int(trained_timerange.stopts)) + # this is not persistent at the moment TODO + self.freqai_config['live_trained_timerange'] = str(int(trained_timerange.stopts)) + # enables persistence, but not fully implemented into save/load data yer + self.data['live_trained_timerange'] = str(int(trained_timerange.stopts)) + def download_new_data_for_retraining(self, timerange: TimeRange, metadata: dict) -> None: exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 2b3addab3..0b1fb3b86 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -13,6 +13,7 @@ from pandas import DataFrame from freqtrade.configuration import TimeRange from freqtrade.enums import RunMode +from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.strategy.interface import IStrategy @@ -65,11 +66,14 @@ class IFreqaiModel(ABC): self.training_on_separate_thread = False self.retrain = False self.first = True - if self.freqai_info.get('live_trained_timerange'): - self.new_trained_timerange = TimeRange.parse_timerange( - self.freqai_info['live_trained_timerange']) - else: - self.new_trained_timerange = TimeRange() + # if self.freqai_info.get('live_trained_timerange'): + # self.new_trained_timerange = TimeRange.parse_timerange( + # self.freqai_info['live_trained_timerange']) + # else: + # self.new_trained_timerange = TimeRange() + + self.set_full_path() + self.data_drawer = FreqaiDataDrawer(Path(self.full_path)) def assert_config(self, config: Dict[str, Any]) -> None: @@ -86,7 +90,7 @@ class IFreqaiModel(ABC): def start(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> DataFrame: """ - Entry point to the FreqaiModel, it will train a new model if + Entry point to the FreqaiModel from a specific pair, it will train a new model if necessary before making the prediction. The backtesting and training paradigm is a sliding training window with a following backtest window. Both windows slide according to the @@ -103,8 +107,8 @@ class IFreqaiModel(ABC): self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE) - self.pair = metadata["pair"] - self.dh = FreqaiDataKitchen(self.config, dataframe, self.live) + # FreqaiDataKitchen is reinstantiated for each coin + self.dh = FreqaiDataKitchen(self.config, self.data_drawer, self.live, metadata["pair"]) if self.live: # logger.info('testing live') @@ -113,7 +117,7 @@ class IFreqaiModel(ABC): return (self.dh.full_predictions, self.dh.full_do_predict, self.dh.full_target_mean, self.dh.full_target_std) - logger.info("going to train %s timeranges", len(self.dh.training_timeranges)) + logger.info(f'Training {len(self.dh.training_timeranges)} timeranges') # Loop enforcing the sliding window training/backtesting paradigm # tr_train is the training time range e.g. 1 historical month @@ -129,9 +133,12 @@ class IFreqaiModel(ABC): self.training_timerange = tr_train dataframe_train = self.dh.slice_dataframe(tr_train, dataframe) dataframe_backtest = self.dh.slice_dataframe(tr_backtest, dataframe) - logger.info("training %s for %s", self.pair, tr_train) - self.dh.model_path = Path(self.dh.full_path / str("sub-train" + "-" + str(tr_train))) - if not self.model_exists(self.pair, training_timerange=tr_train): + logger.info("training %s for %s", metadata["pair"], tr_train) + trained_timestamp = TimeRange.parse_timerange(tr_train) + self.dh.data_path = Path(self.dh.full_path / + str("sub-train" + "-" + metadata['pair'].split("/")[0] + + str(int(trained_timestamp.stopts)))) + if not self.model_exists(metadata["pair"], trained_timestamp=trained_timestamp.stopts): self.model = self.train(dataframe_train, metadata) self.dh.save_data(self.model) else: @@ -161,36 +168,40 @@ class IFreqaiModel(ABC): """ - self.dh.set_paths() + (model_filename, + trained_timestamp, + coin_first) = self.data_drawer.get_pair_dict_info(metadata) - file_exists = self.model_exists(metadata['pair'], - training_timerange=self.freqai_info[ - 'live_trained_timerange']) + if trained_timestamp != 0: + self.dh.set_paths(trained_timestamp) + # data_drawer thinks the file eixts, verify here + file_exists = self.model_exists(metadata['pair'], + trained_timestamp=trained_timestamp, + model_filename=model_filename) if not self.training_on_separate_thread: # this will also prevent other pairs from trying to train simultaneously. (self.retrain, - self.new_trained_timerange) = self.dh.check_if_new_training_required( - self.new_trained_timerange, - metadata) + new_trained_timerange) = self.dh.check_if_new_training_required( + trained_timestamp) + self.dh.set_paths(new_trained_timerange.stopts) else: logger.info("FreqAI training a new model on background thread.") self.retrain = False if self.retrain or not file_exists: - if self.first: - self.train_model_in_series(self.new_trained_timerange, metadata, strategy) - self.first = False + if coin_first: + self.train_model_in_series(new_trained_timerange, metadata, strategy) else: self.training_on_separate_thread = True # acts like a lock - self.retrain_model_on_separate_thread(self.new_trained_timerange, + self.retrain_model_on_separate_thread(new_trained_timerange, metadata, strategy) - self.model = self.dh.load_data() + self.model = self.dh.load_data(coin=metadata['pair']) strategy_provided_features = self.dh.find_features(dataframe) if strategy_provided_features != self.dh.training_features_list: - self.train_model_in_series(self.new_trained_timerange, metadata, strategy) + self.train_model_in_series(new_trained_timerange, metadata, strategy) preds, do_preds = self.predict(dataframe, metadata) self.dh.append_predictions(preds, do_preds, len(dataframe)) @@ -252,24 +263,34 @@ class IFreqaiModel(ABC): if self.freqai_info.get('feature_parameters', {}).get('DI_threshold'): self.dh.check_if_pred_in_training_spaces() # sets do_predict - def model_exists(self, pair: str, training_timerange: str) -> bool: + def model_exists(self, pair: str, trained_timestamp: int = None, + model_filename: str = '') -> bool: """ Given a pair and path, check if a model already exists :param pair: pair e.g. BTC/USD :param path: path to model """ - if self.live and training_timerange == "": - return False coin, _ = pair.split("/") - self.dh.model_filename = "cb_" + coin.lower() + "_" + training_timerange - path_to_modelfile = Path(self.dh.model_path / str(self.dh.model_filename + "_model.joblib")) + + if self.live and trained_timestamp is None: + self.dh.model_filename = model_filename + else: + self.dh.model_filename = "cb_" + coin.lower() + "_" + str(trained_timestamp) + + path_to_modelfile = Path(self.dh.data_path / str(self.dh.model_filename + "_model.joblib")) file_exists = path_to_modelfile.is_file() if file_exists: - logger.info("Found model at %s", self.dh.model_path / self.dh.model_filename) + logger.info("Found model at %s", self.dh.data_path / self.dh.model_filename) else: - logger.info("Could not find model at %s", self.dh.model_path / self.dh.model_filename) + logger.info("Could not find model at %s", self.dh.data_path / self.dh.model_filename) return file_exists + def set_full_path(self) -> None: + self.full_path = Path(self.config['user_data_dir'] / + "models" / + str(self.freqai_info.get('live_full_backtestrange') + + self.freqai_info.get('identifier'))) + @threaded def retrain_model_on_separate_thread(self, new_trained_timerange: TimeRange, metadata: dict, strategy: IStrategy): @@ -285,7 +306,13 @@ class IFreqaiModel(ABC): metadata) self.model = self.train(unfiltered_dataframe, metadata) - self.dh.save_data(self.model) + + self.data_drawer.pair_dict[metadata['pair']][ + 'trained_timestamp'] = new_trained_timerange.stopts + + self.dh.set_new_model_names(metadata, new_trained_timerange) + + self.dh.save_data(self.model, coin=metadata['pair']) self.training_on_separate_thread = False self.retrain = False @@ -303,7 +330,14 @@ class IFreqaiModel(ABC): metadata) self.model = self.train(unfiltered_dataframe, metadata) - self.dh.save_data(self.model) + + self.data_drawer.pair_dict[metadata['pair']][ + 'trained_timestamp'] = new_trained_timerange.stopts + + self.dh.set_new_model_names(metadata, new_trained_timerange) + + self.data_drawer.pair_dict[metadata['pair']]['first'] = False + self.dh.save_data(self.model, coin=metadata['pair']) self.retrain = False # Methods which are overridden by user made prediction models. diff --git a/freqtrade/freqai/prediction_models/CatboostPredictionModel.py b/freqtrade/freqai/prediction_models/CatboostPredictionModel.py index d09554e3e..6349174ad 100644 --- a/freqtrade/freqai/prediction_models/CatboostPredictionModel.py +++ b/freqtrade/freqai/prediction_models/CatboostPredictionModel.py @@ -140,49 +140,3 @@ class CatboostPredictionModel(IFreqaiModel): # logger.info("--------------------Finished prediction--------------------") return (self.dh.predictions, self.dh.do_predict) - - def data_cleaning_train(self) -> None: - """ - User can add data analysis and cleaning here. - Any function inside this method should drop training data points from the filtered_dataframe - based on user decided logic. See FreqaiDataKitchen::remove_outliers() for an example - of how outlier data points are dropped from the dataframe used for training. - """ - if self.freqai_info.get('feature_parameters', {}).get('principal_component_analysis'): - self.dh.principal_component_analysis() - - # if self.feature_parameters["determine_statistical_distributions"]: - # self.dh.determine_statistical_distributions() - # if self.feature_parameters["remove_outliers"]: - # self.dh.remove_outliers(predict=False) - - if self.freqai_info.get('feature_parameters', {}).get('use_SVM_to_remove_outliers'): - self.dh.use_SVM_to_remove_outliers(predict=False) - - if self.freqai_info.get('feature_parameters', {}).get('DI_threshold'): - self.dh.data["avg_mean_dist"] = self.dh.compute_distances() - - def data_cleaning_predict(self, filtered_dataframe: DataFrame) -> None: - """ - User can add data analysis and cleaning here. - These functions each modify self.dh.do_predict, which is a dataframe with equal length - to the number of candles coming from and returning to the strategy. Inside do_predict, - 1 allows prediction and < 0 signals to the strategy that the model is not confident in - the prediction. - See FreqaiDataKitchen::remove_outliers() for an example - of how the do_predict vector is modified. do_predict is ultimately passed back to strategy - for buy signals. - """ - if self.freqai_info.get('feature_parameters', {}).get('principal_component_analysis'): - self.dh.pca_transform() - - # if self.feature_parameters["determine_statistical_distributions"]: - # self.dh.determine_statistical_distributions() - # if self.feature_parameters["remove_outliers"]: - # self.dh.remove_outliers(predict=True) # creates dropped index - - if self.freqai_info.get('feature_parameters', {}).get('use_SVM_to_remove_outliers'): - self.dh.use_SVM_to_remove_outliers(predict=True) - - if self.freqai_info.get('feature_parameters', {}).get('DI_threshold'): - self.dh.check_if_pred_in_training_spaces() # sets do_predict