From d3cb211283ced68d082cfdbdac12f3d2ab90d63b Mon Sep 17 00:00:00 2001 From: th0rntwig Date: Thu, 18 Aug 2022 14:44:49 +0200 Subject: [PATCH 01/31] Add inlier metric computation --- freqtrade/freqai/data_kitchen.py | 74 ++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 763a07375..62e353949 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -723,6 +723,80 @@ class FreqaiDataKitchen: ) return + + def compute_inlier_metric(self) -> None: + """ + + Compute inlier metric from backwards distance distributions. + This metric defines how well features from a timepoint fit + into previous timepoints. + """ + + import scipy.stats as ss + + nmb_previous_points = self.data['InlierMetric_nmb_points'] + weibull_percentile = self.data['InlierMetric_weib_perc'] + + train_ft_df = self.data_dictionary['train_features'] + train_ft_df_reindexed = train_ft_df.reindex( + index=np.flip(train_ft_df.index) + ) + + pairwise = pd.DataFrame( + np.triu( + pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) + ), + columns=train_ft_df_reindexed.index, + index=train_ft_df_reindexed.index + ) + pairwise = pairwise.round(5) + + column_labels = [ + '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) + ] + distances = pd.DataFrame( + columns=column_labels, index=train_ft_df.index + ) + for index in train_ft_df.index[nmb_previous_points]: + current_row = pairwise.loc[[index]] + current_row_no_zeros = current_row.loc[ + :, (current_row!=0).any(axis=0) + ] + distances.loc[[index]] = current_row_no_zeros.iloc[ + :, :nmb_previous_points + ] + distances = distances.replace([np.inf, -np.inf], np.nan) + drop_index = pd.isnull(distances).any(1) + distances = distances[drop_index==0] + + inliers = pd.DataFrame(index=distances.index) + for key in distances.keys(): + current_distances = distances[key].dropna() + fit_params = ss.weibull_min.fit(current_distances) + cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) + is_inlier = np.where( + current_distances<=cutoff, 1, 0 + ) + df_inlier = pd.DataFrame( + {key+'_IsInlier':is_inlier}, index=distances.index + ) + inliers = pd.concat( + [inliers, df_inlier], axis=1 + ) + + self.data_dictionary['train_features'] = pd.DataFrame( + data=inliers.sum(axis=1)/nmb_previous_points, + columns=['inlier_metric'], + index = train_ft_df.index + ) + + percent_outliers = np.round( + 100*(1-self.data_dictionary['iniler_metric'].sum()/ + len(train_ft_df.index)), 2 + ) + logger.info('{percent_outliers}%% of data points were identified as outliers') + + return None def find_features(self, dataframe: DataFrame) -> None: """ From b11742a4c5a7130d7aabc0d20c343372f7911379 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 18 Aug 2022 19:15:29 +0200 Subject: [PATCH 02/31] integrate inlier metric function --- freqtrade/freqai/data_kitchen.py | 85 ++++++++++++++++++---------- freqtrade/freqai/freqai_interface.py | 36 ++++++++---- 2 files changed, 79 insertions(+), 42 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 62e353949..80919626c 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -723,81 +723,104 @@ class FreqaiDataKitchen: ) return - - def compute_inlier_metric(self) -> None: + + def compute_inlier_metric(self, set_='train') -> None: """ - - Compute inlier metric from backwards distance distributions. - This metric defines how well features from a timepoint fit + + Compute inlier metric from backwards distance distributions. + This metric defines how well features from a timepoint fit into previous timepoints. """ import scipy.stats as ss - - nmb_previous_points = self.data['InlierMetric_nmb_points'] - weibull_percentile = self.data['InlierMetric_weib_perc'] - train_ft_df = self.data_dictionary['train_features'] - train_ft_df_reindexed = train_ft_df.reindex( - index=np.flip(train_ft_df.index) + no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"] + weib_pct = self.freqai_config["feature_parameters"]["inlier_metric_weibull_cutoff"] + + if set_ == 'train': + compute_df = copy.deepcopy(self.data_dictionary['train_features']) + elif set_ == 'test': + compute_df = copy.deepcopy(self.data_dictionary['test_features']) + else: + compute_df = copy.deepcopy(self.data_dictionary['prediction_features']) + + compute_df_reindexed = compute_df.reindex( + index=np.flip(compute_df.index) ) pairwise = pd.DataFrame( np.triu( - pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) + pairwise_distances(compute_df_reindexed, n_jobs=self.thread_count) ), - columns=train_ft_df_reindexed.index, - index=train_ft_df_reindexed.index + columns=compute_df_reindexed.index, + index=compute_df_reindexed.index ) pairwise = pairwise.round(5) column_labels = [ - '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) + '{}{}'.format('d', i) for i in range(1, no_prev_pts + 1) ] distances = pd.DataFrame( - columns=column_labels, index=train_ft_df.index + columns=column_labels, index=compute_df.index ) - for index in train_ft_df.index[nmb_previous_points]: + + for index in compute_df.index[no_prev_pts:]: current_row = pairwise.loc[[index]] current_row_no_zeros = current_row.loc[ - :, (current_row!=0).any(axis=0) + :, (current_row != 0).any(axis=0) ] distances.loc[[index]] = current_row_no_zeros.iloc[ - :, :nmb_previous_points + :, :no_prev_pts ] distances = distances.replace([np.inf, -np.inf], np.nan) drop_index = pd.isnull(distances).any(1) - distances = distances[drop_index==0] + distances = distances[drop_index == 0] inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) - cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) + cutoff = ss.weibull_min.ppf(weib_pct, *fit_params) is_inlier = np.where( - current_distances<=cutoff, 1, 0 + current_distances <= cutoff, 1, 0 ) df_inlier = pd.DataFrame( - {key+'_IsInlier':is_inlier}, index=distances.index + {key + '_IsInlier': is_inlier}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 ) - self.data_dictionary['train_features'] = pd.DataFrame( - data=inliers.sum(axis=1)/nmb_previous_points, + inlier_metric = pd.DataFrame( + data=inliers.sum(axis=1) / no_prev_pts, columns=['inlier_metric'], - index = train_ft_df.index + index=compute_df.index ) - percent_outliers = np.round( - 100*(1-self.data_dictionary['iniler_metric'].sum()/ - len(train_ft_df.index)), 2 - ) - logger.info('{percent_outliers}%% of data points were identified as outliers') + inlier_metric = 2 * (inlier_metric - inlier_metric.min()) / \ + (inlier_metric.max() - inlier_metric.min()) - 1 + + if set_ in ('train', 'test'): + inlier_metric = inlier_metric.iloc[no_prev_pts:] + compute_df = compute_df.iloc[no_prev_pts:] + self.remove_beginning_points_from_data_dict(set_, no_prev_pts) + self.data_dictionary[f'{set_}_features'] = pd.concat( + [compute_df, inlier_metric], axis=1) + else: + self.data_dictionary['prediction_features'] = pd.concat( + [compute_df, inlier_metric], axis=1) + self.data_dictionary['prediction_features'].fillna(0, inplace=True) return None + def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10): + features = self.data_dictionary[f'{set_}_features'] + weights = self.data_dictionary[f'{set_}_weights'] + labels = self.data_dictionary[f'{set_}_labels'] + self.data_dictionary[f'{set_}_weights'] = weights[no_prev_pts:] + self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:] + self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:] + def find_features(self, dataframe: DataFrame) -> None: """ Find features in the strategy provided dataframe diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 4106f24e0..e6e019b66 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -66,7 +66,6 @@ class IFreqaiModel(ABC): "data_split_parameters", {}) self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get( "model_training_parameters", {}) - self.feature_parameters = config.get("freqai", {}).get("feature_parameters") self.retrain = False self.first = True self.set_full_path() @@ -74,11 +73,14 @@ class IFreqaiModel(ABC): self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) self.identifier: str = self.freqai_info.get("identifier", "no_id_provided") self.scanning = False + self.ft_params = self.freqai_info["feature_parameters"] self.keras: bool = self.freqai_info.get("keras", False) - if self.keras and self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0): - self.freqai_info["feature_parameters"]["DI_threshold"] = 0 + if self.keras and self.ft_params.get("DI_threshold", 0): + self.ft_params["DI_threshold"] = 0 logger.warning("DI threshold is not configured for Keras models yet. Deactivating.") self.CONV_WIDTH = self.freqai_info.get("conv_width", 2) + if self.ft_params.get("inlier_metric_window", 0): + self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2 self.pair_it = 0 self.pair_it_train = 0 self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist")) @@ -403,18 +405,20 @@ class IFreqaiModel(ABC): example of how outlier data points are dropped from the dataframe used for training. """ - if self.freqai_info["feature_parameters"].get( + ft_params = self.freqai_info["feature_parameters"] + + if ft_params.get( "principal_component_analysis", False ): dk.principal_component_analysis() - if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False): + if ft_params.get("use_SVM_to_remove_outliers", False): dk.use_SVM_to_remove_outliers(predict=False) - if self.freqai_info["feature_parameters"].get("DI_threshold", 0): + if ft_params.get("DI_threshold", 0): dk.data["avg_mean_dist"] = dk.compute_distances() - if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False): + if ft_params.get("use_DBSCAN_to_remove_outliers", False): if dk.pair in self.dd.old_DBSCAN_eps: eps = self.dd.old_DBSCAN_eps[dk.pair] else: @@ -422,6 +426,11 @@ class IFreqaiModel(ABC): dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps) self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps'] + if ft_params.get('inlier_metric_window', 0): + dk.compute_inlier_metric(set_='train') + if self.freqai_info["data_split_parameters"]["test_size"] > 0: + dk.compute_inlier_metric(set_='test') + def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None: """ Base data cleaning method for predict. @@ -433,18 +442,23 @@ class IFreqaiModel(ABC): of how the do_predict vector is modified. do_predict is ultimately passed back to strategy for buy signals. """ - if self.freqai_info["feature_parameters"].get( + ft_params = self.freqai_info["feature_parameters"] + + if ft_params.get('inlier_metric_window', 0): + dk.compute_inlier_metric(set_='predict') + + if ft_params.get( "principal_component_analysis", False ): dk.pca_transform(dataframe) - if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False): + if ft_params.get("use_SVM_to_remove_outliers", False): dk.use_SVM_to_remove_outliers(predict=True) - if self.freqai_info["feature_parameters"].get("DI_threshold", 0): + if ft_params.get("DI_threshold", 0): dk.check_if_pred_in_training_spaces() - if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False): + if ft_params.get("use_DBSCAN_to_remove_outliers", False): dk.use_DBSCAN_to_remove_outliers(predict=True) def model_exists( From a58dd0bbf9347a69fecde43e5735bc1851de3b9b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 19 Aug 2022 18:35:24 +0200 Subject: [PATCH 03/31] add noise feature, improve docstrings --- freqtrade/freqai/data_kitchen.py | 11 +++++++++++ freqtrade/freqai/freqai_interface.py | 18 +++++++----------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 80919626c..0158996c7 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -821,6 +821,17 @@ class FreqaiDataKitchen: self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:] self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:] + def add_noise_to_training_features(self) -> None: + """ + Add noise to train features to reduce the risk of overfitting. + """ + mu = 0 # no shift + sigma = self.freqai_config["feature_parameters"]["noise_standard_deviation"] + compute_df = self.data_dictionary['train_features'] + noise = np.random.normal(mu, sigma, [compute_df.shape[0], compute_df.shape[1]]) + self.data_dictionary['train_features'] += noise + return + def find_features(self, dataframe: DataFrame) -> None: """ Find features in the strategy provided dataframe diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index e6e019b66..239cb1869 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -399,10 +399,9 @@ class IFreqaiModel(ABC): def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None: """ - Base data cleaning method for train - Any function inside this method should drop training data points from the filtered_dataframe - based on user decided logic. See FreqaiDataKitchen::use_SVM_to_remove_outliers() for an - example of how outlier data points are dropped from the dataframe used for training. + Base data cleaning method for train. + Functions here improve/modify the input data by identifying outliers, + computing additional metrics, adding noise, reducing dimensionality etc. """ ft_params = self.freqai_info["feature_parameters"] @@ -431,16 +430,13 @@ class IFreqaiModel(ABC): if self.freqai_info["data_split_parameters"]["test_size"] > 0: dk.compute_inlier_metric(set_='test') + if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0): + dk.add_noise_to_training_features() + def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None: """ Base data cleaning method for predict. - These functions each modify dk.do_predict, which is a dataframe with equal length - to the number of candles coming from and returning to the strategy. Inside do_predict, - 1 allows prediction and < 0 signals to the strategy that the model is not confident in - the prediction. - See FreqaiDataKitchen::remove_outliers() for an example - of how the do_predict vector is modified. do_predict is ultimately passed back to strategy - for buy signals. + Functions here are complementary to the functions of data_cleaning_train. """ ft_params = self.freqai_info["feature_parameters"] From 7f52908e87a61ed60c7fe3f2b9b933b6211c60da Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 30 Aug 2022 18:55:58 +0200 Subject: [PATCH 04/31] ensure the lost points are prepended for FreqUI --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 239cb1869..893f960ea 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -587,7 +587,7 @@ class IFreqaiModel(ABC): # # for keras type models, the conv_window needs to be prepended so # # viewing is correct in frequi - if self.freqai_info.get('keras', False): + if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0): n_lost_points = self.freqai_info.get('conv_width', 2) zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))), columns=hist_preds_df.columns) From 0b8482360f0ec1aea6e61ac08b3746584c03800a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 30 Aug 2022 20:32:49 +0200 Subject: [PATCH 05/31] add documentation for inlier metric --- docs/freqai.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/freqai.md b/docs/freqai.md index e4a451324..3fc76e9f0 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -115,6 +115,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan).
**Datatype:** Boolean. | `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact.
**Datatype:** float. Default: `30` | `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it.
**Datatype:** bool. Default: False +| `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** int. Default: 0 +| `inlier_metric_weibull_cutoff` | If the `inlier_metric_window` is set, this value is used to determine the tail cutoff in the weibull distribution fit. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** float. Default: 0.95 | | **Data split parameters** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. @@ -636,6 +638,20 @@ testing; the other points are used for training. The test data is used to evaluate the performance of the model after training. If the test score is high, the model is able to capture the behavior of the data well. If the test score is low, either the model either does not capture the complexity of the data, the test data is significantly different from the train data, or a different model should be used. +### Using the `inlier_metric` + +The `inlier_metric` is a metric aimed at quantifying how different a prediction data point is from the most recent historic data points. + +User can set `inlier_metric_window` to set the look back window. FreqAI will compute the distance between the present prediction point and each of the previous data points (total of `inlier_metric_window` points). + +This function goes one step further - during training, it computes the `inlier_metric` for all training data points and builds weibull distributions for each each lookback point. If one of the distances falls in the tail of the respective weibull distribution, it is considered an "outlier." If the distance to the lookback point is not in the tail, it is considered an "inlier." Inliers receive a value of 1, and outliers receive a value of 0. + +FreqAI adds this `inlier_metric` score to the training features! Thus, your model is trained to recognize how this temporal inlier metric is evolving. + +Users can control the weibull threshold using the `inlier_metric_weibull_cutoff` + +This function does not currently remove outliers from the data set. + ### Controlling the model learning process Model training parameters are unique to the machine learning library selected by the user. FreqAI allows the user to set any parameter for any library using the `model_training_parameters` dictionary in the user configuration file. The example configuration file (found in `config_examples/config_freqai.example.json`) show some of the example parameters associated with `Catboost` and `LightGBM`, but the user can add any parameters available in those libraries. From 7e8e29e42d4c2b0eb058d9408c27ebd220eb9f68 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 30 Aug 2022 20:41:37 +0200 Subject: [PATCH 06/31] use continuous value for inlier_metric --- docs/freqai.md | 9 +++------ freqtrade/freqai/data_kitchen.py | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index 3fc76e9f0..2c6efa3b9 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -116,7 +116,6 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact.
**Datatype:** float. Default: `30` | `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it.
**Datatype:** bool. Default: False | `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** int. Default: 0 -| `inlier_metric_weibull_cutoff` | If the `inlier_metric_window` is set, this value is used to determine the tail cutoff in the weibull distribution fit. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** float. Default: 0.95 | | **Data split parameters** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. @@ -644,13 +643,11 @@ The `inlier_metric` is a metric aimed at quantifying how different a prediction User can set `inlier_metric_window` to set the look back window. FreqAI will compute the distance between the present prediction point and each of the previous data points (total of `inlier_metric_window` points). -This function goes one step further - during training, it computes the `inlier_metric` for all training data points and builds weibull distributions for each each lookback point. If one of the distances falls in the tail of the respective weibull distribution, it is considered an "outlier." If the distance to the lookback point is not in the tail, it is considered an "inlier." Inliers receive a value of 1, and outliers receive a value of 0. +This function goes one step further - during training, it computes the `inlier_metric` for all training data points and builds weibull distributions for each each lookback point. The cumulative distribution function for the weibull distribution is used to produce a quantile for each of the data points. The quantiles for each lookback point are averaged to create the `inlier_metric`. -FreqAI adds this `inlier_metric` score to the training features! Thus, your model is trained to recognize how this temporal inlier metric is evolving. +FreqAI adds this `inlier_metric` score to the training features! In other words, your model is trained to recognize how this temporal inlier metric is related to the user set labels. -Users can control the weibull threshold using the `inlier_metric_weibull_cutoff` - -This function does not currently remove outliers from the data set. +This function does **not** remove outliers from the data set. ### Controlling the model learning process diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 0158996c7..9d4a69287 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -735,7 +735,6 @@ class FreqaiDataKitchen: import scipy.stats as ss no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"] - weib_pct = self.freqai_config["feature_parameters"]["inlier_metric_weibull_cutoff"] if set_ == 'train': compute_df = copy.deepcopy(self.data_dictionary['train_features']) @@ -780,12 +779,10 @@ class FreqaiDataKitchen: for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) - cutoff = ss.weibull_min.ppf(weib_pct, *fit_params) - is_inlier = np.where( - current_distances <= cutoff, 1, 0 - ) + quantiles = ss.weibull_min.cdf(current_distances, *fit_params) + df_inlier = pd.DataFrame( - {key + '_IsInlier': is_inlier}, index=distances.index + {key: quantiles}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 From c9be66b5b639f151abb1d9a5e76267d752eddb3b Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 3 Sep 2022 15:52:29 +0200 Subject: [PATCH 07/31] increase test coverage for dk, improve function naming, extra cleaning --- freqtrade/freqai/data_kitchen.py | 30 ++++++----- freqtrade/freqai/freqai_interface.py | 10 ++-- tests/freqai/conftest.py | 32 +++++++++++ tests/freqai/test_freqai_datakitchen.py | 72 ++++++++++++++++++++++++- tests/freqai/test_freqai_interface.py | 34 ++++++------ 5 files changed, 143 insertions(+), 35 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 9d4a69287..fce9e8480 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1,7 +1,7 @@ import copy -import datetime import logging import shutil +from datetime import datetime, timezone from pathlib import Path from typing import Any, Dict, List, Tuple @@ -345,7 +345,7 @@ class FreqaiDataKitchen: def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame: """ - Normalize a set of data using the mean and standard deviation from + Denormalize a set of data using the mean and standard deviation from the associated training data. :param df: Dataframe of predictions to be denormalized """ @@ -384,7 +384,7 @@ class FreqaiDataKitchen: config_timerange = TimeRange.parse_timerange(self.config["timerange"]) if config_timerange.stopts == 0: config_timerange.stopts = int( - datetime.datetime.now(tz=datetime.timezone.utc).timestamp() + datetime.now(tz=timezone.utc).timestamp() ) timerange_train = copy.deepcopy(full_timerange) timerange_backtest = copy.deepcopy(full_timerange) @@ -401,8 +401,8 @@ class FreqaiDataKitchen: timerange_train.stopts = timerange_train.startts + train_period_days first = False - start = datetime.datetime.utcfromtimestamp(timerange_train.startts) - stop = datetime.datetime.utcfromtimestamp(timerange_train.stopts) + start = datetime.utcfromtimestamp(timerange_train.startts) + stop = datetime.utcfromtimestamp(timerange_train.stopts) tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")) tr_training_list_timerange.append(copy.deepcopy(timerange_train)) @@ -415,8 +415,8 @@ class FreqaiDataKitchen: if timerange_backtest.stopts > config_timerange.stopts: timerange_backtest.stopts = config_timerange.stopts - start = datetime.datetime.utcfromtimestamp(timerange_backtest.startts) - stop = datetime.datetime.utcfromtimestamp(timerange_backtest.stopts) + start = datetime.utcfromtimestamp(timerange_backtest.startts) + stop = datetime.utcfromtimestamp(timerange_backtest.stopts) tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")) tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest)) @@ -436,8 +436,8 @@ class FreqaiDataKitchen: it is sliced down to just the present training period. """ - start = datetime.datetime.fromtimestamp(timerange.startts, tz=datetime.timezone.utc) - stop = datetime.datetime.fromtimestamp(timerange.stopts, tz=datetime.timezone.utc) + start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc) + stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc) df = df.loc[df["date"] >= start, :] df = df.loc[df["date"] <= stop, :] @@ -808,6 +808,8 @@ class FreqaiDataKitchen: [compute_df, inlier_metric], axis=1) self.data_dictionary['prediction_features'].fillna(0, inplace=True) + logger.info('Inlier metric computed and added to features.') + return None def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10): @@ -948,14 +950,14 @@ class FreqaiDataKitchen: "Please indicate the end date of your desired backtesting. " "timerange.") # backtest_timerange.stopts = int( - # datetime.datetime.now(tz=datetime.timezone.utc).timestamp() + # datetime.now(tz=timezone.utc).timestamp() # ) backtest_timerange.startts = ( backtest_timerange.startts - backtest_period_days * SECONDS_IN_DAY ) - start = datetime.datetime.utcfromtimestamp(backtest_timerange.startts) - stop = datetime.datetime.utcfromtimestamp(backtest_timerange.stopts) + start = datetime.utcfromtimestamp(backtest_timerange.startts) + stop = datetime.utcfromtimestamp(backtest_timerange.stopts) full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d") self.full_path = Path( @@ -981,7 +983,7 @@ class FreqaiDataKitchen: :return: bool = If the model is expired or not. """ - time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp() + time = datetime.now(tz=timezone.utc).timestamp() elapsed_time = (time - trained_timestamp) / 3600 # hours max_time = self.freqai_config.get("expiration_hours", 0) if max_time > 0: @@ -993,7 +995,7 @@ class FreqaiDataKitchen: self, trained_timestamp: int ) -> Tuple[bool, TimeRange, TimeRange]: - time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp() + time = datetime.now(tz=timezone.utc).timestamp() trained_timerange = TimeRange() data_load_timerange = TimeRange() diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 893f960ea..fd0554248 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -1,10 +1,10 @@ # import contextlib -import datetime import logging import shutil import threading import time from abc import ABC, abstractmethod +from datetime import datetime from pathlib import Path from threading import Lock from typing import Any, Dict, Tuple @@ -174,7 +174,7 @@ class IFreqaiModel(ABC): if retrain: self.train_timer('start') - self.train_model_in_series( + self.extract_data_and_train_model( new_trained_timerange, pair, strategy, dk, data_load_timerange ) self.train_timer('stop') @@ -214,10 +214,10 @@ class IFreqaiModel(ABC): dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe) trained_timestamp = tr_train - tr_train_startts_str = datetime.datetime.utcfromtimestamp(tr_train.startts).strftime( + tr_train_startts_str = datetime.utcfromtimestamp(tr_train.startts).strftime( "%Y-%m-%d %H:%M:%S" ) - tr_train_stopts_str = datetime.datetime.utcfromtimestamp(tr_train.stopts).strftime( + tr_train_stopts_str = datetime.utcfromtimestamp(tr_train.stopts).strftime( "%Y-%m-%d %H:%M:%S" ) logger.info( @@ -495,7 +495,7 @@ class IFreqaiModel(ABC): Path(self.full_path, Path(self.config["config_files"][0]).name), ) - def train_model_in_series( + def extract_data_and_train_model( self, new_trained_timerange: TimeRange, pair: str, diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index dd148da77..6528347e8 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -82,6 +82,38 @@ def get_patched_freqaimodel(mocker, freqaiconf): return freqaimodel +def make_unfiltered_dataframe(mocker, freqai_conf): + freqai_conf.update({"timerange": "20180110-20180130"}) + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = True + freqai.dk = FreqaiDataKitchen(freqai_conf) + freqai.dk.pair = "ADA/BTC" + timerange = TimeRange.parse_timerange("20180110-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + + freqai.dd.pair_dict = MagicMock() + + data_load_timerange = TimeRange.parse_timerange("20180110-20180130") + new_timerange = TimeRange.parse_timerange("20180120-20180130") + + corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( + data_load_timerange, freqai.dk.pair, freqai.dk + ) + + unfiltered_dataframe = freqai.dk.use_strategy_to_populate_indicators( + strategy, corr_dataframes, base_dataframes, freqai.dk.pair + ) + + unfiltered_dataframe = freqai.dk.slice_dataframe(new_timerange, unfiltered_dataframe) + + return freqai, unfiltered_dataframe + + def make_data_dictionary(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 9ef955695..2204e94c6 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -6,7 +6,8 @@ import pytest from freqtrade.exceptions import OperationalException from tests.conftest import log_has_re -from tests.freqai.conftest import get_patched_data_kitchen, make_data_dictionary +from tests.freqai.conftest import (get_patched_data_kitchen, make_data_dictionary, + make_unfiltered_dataframe) @pytest.mark.parametrize( @@ -94,3 +95,72 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, "SVM detected 8.46%", caplog, ) + + +def test_compute_inlier_metric(mocker, freqai_conf, caplog): + freqai = make_data_dictionary(mocker, freqai_conf) + freqai_conf['freqai']['feature_parameters'].update({"inlier_metric_window": 10}) + freqai.dk.compute_inlier_metric(set_='train') + assert log_has_re( + "Inlier metric computed and added to features.", + caplog, + ) + + +def test_add_noise_to_training_features(mocker, freqai_conf): + freqai = make_data_dictionary(mocker, freqai_conf) + freqai_conf['freqai']['feature_parameters'].update({"noise_standard_deviation": 0.1}) + freqai.dk.add_noise_to_training_features() + + +def test_remove_beginning_points_from_data_dict(mocker, freqai_conf): + freqai = make_data_dictionary(mocker, freqai_conf) + freqai.dk.remove_beginning_points_from_data_dict(set_='train') + + +def test_principal_component_analysis(mocker, freqai_conf, caplog): + freqai = make_data_dictionary(mocker, freqai_conf) + freqai.dk.principal_component_analysis() + assert log_has_re( + "reduced feature dimension by", + caplog, + ) + + +def test_normalize_data(mocker, freqai_conf): + freqai = make_data_dictionary(mocker, freqai_conf) + data_dict = freqai.dk.data_dictionary + freqai.dk.normalize_data(data_dict) + assert len(freqai.dk.data) == 56 + + +def test_filter_features(mocker, freqai_conf): + freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + freqai.dk.find_features(unfiltered_dataframe) + + filtered_df, labels = freqai.dk.filter_features( + unfiltered_dataframe, + freqai.dk.training_features_list, + freqai.dk.label_list, + training_filter=True, + ) + + assert len(filtered_df.columns) == 26 + + +def test_make_train_test_datasets(mocker, freqai_conf): + freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + freqai.dk.find_features(unfiltered_dataframe) + + features_filtered, labels_filtered = freqai.dk.filter_features( + unfiltered_dataframe, + freqai.dk.training_features_list, + freqai.dk.label_list, + training_filter=True, + ) + + data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered) + + assert data_dictionary + assert len(data_dictionary) == 7 + assert len(data_dictionary['train_features'].index) == 1916 diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 792ffc467..927af2a02 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -17,7 +17,7 @@ def is_arm() -> bool: return "arm" in machine or "aarch64" in machine -def test_train_model_in_series_LightGBM(mocker, freqai_conf): +def test_extract_data_and_train_model_LightGBM(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) @@ -35,7 +35,8 @@ def test_train_model_in_series_LightGBM(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model( + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() @@ -45,7 +46,7 @@ def test_train_model_in_series_LightGBM(mocker, freqai_conf): shutil.rmtree(Path(freqai.dk.full_path)) -def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf): +def test_extract_data_and_train_model_LightGBMMultiModel(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_multimodel_strat"}) freqai_conf.update({"freqaimodel": "LightGBMRegressorMultiTarget"}) @@ -64,7 +65,8 @@ def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model( + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) assert len(freqai.dk.label_list) == 2 assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() @@ -77,7 +79,7 @@ def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf): @pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...") -def test_train_model_in_series_Catboost(mocker, freqai_conf): +def test_extract_data_and_train_model_Catboost(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"freqaimodel": "CatboostRegressor"}) # freqai_conf.get('freqai', {}).update( @@ -98,8 +100,8 @@ def test_train_model_in_series_Catboost(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", - strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model(new_timerange, "ADA/BTC", + strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists() @@ -110,7 +112,7 @@ def test_train_model_in_series_Catboost(mocker, freqai_conf): @pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...") -def test_train_model_in_series_CatboostClassifier(mocker, freqai_conf): +def test_extract_data_and_train_model_CatboostClassifier(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"freqaimodel": "CatboostClassifier"}) freqai_conf.update({"strategy": "freqai_test_classifier"}) @@ -130,8 +132,8 @@ def test_train_model_in_series_CatboostClassifier(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", - strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model(new_timerange, "ADA/BTC", + strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists() @@ -141,7 +143,7 @@ def test_train_model_in_series_CatboostClassifier(mocker, freqai_conf): shutil.rmtree(Path(freqai.dk.full_path)) -def test_train_model_in_series_LightGBMClassifier(mocker, freqai_conf): +def test_extract_data_and_train_model_LightGBMClassifier(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"freqaimodel": "LightGBMClassifier"}) freqai_conf.update({"strategy": "freqai_test_classifier"}) @@ -161,8 +163,8 @@ def test_train_model_in_series_LightGBMClassifier(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", - strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model(new_timerange, "ADA/BTC", + strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists() @@ -289,7 +291,8 @@ def test_follow_mode(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model( + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() @@ -338,7 +341,8 @@ def test_principal_component_analysis(mocker, freqai_conf): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model( + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_pca_object.pkl") From fa8d5b9834ab1ba872e5ca2397e24ed3df9968a7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sat, 3 Sep 2022 16:05:18 +0200 Subject: [PATCH 08/31] add documentation for noise_standard_deviation` --- docs/freqai.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/freqai.md b/docs/freqai.md index 2c6efa3b9..d504c93d6 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -116,6 +116,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact.
**Datatype:** float. Default: `30` | `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it.
**Datatype:** bool. Default: False | `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** int. Default: 0 +| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5%. Good for preventing overfitting.
**Datatype:** int. Default: 0 | | **Data split parameters** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. From 4b28d0495f46337700686f1b430827336285511a Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 6 Sep 2022 19:46:58 +0200 Subject: [PATCH 09/31] fix timestamping, move imports, add words to doc --- docs/freqai.md | 2 +- freqtrade/freqai/data_kitchen.py | 26 ++++++++++++-------------- freqtrade/freqai/freqai_interface.py | 15 +++++++-------- tests/freqai/conftest.py | 12 +++++------- 4 files changed, 25 insertions(+), 30 deletions(-) diff --git a/docs/freqai.md b/docs/freqai.md index d504c93d6..a8379106a 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -116,7 +116,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact.
**Datatype:** float. Default: `30` | `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it.
**Datatype:** bool. Default: False | `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric)
**Datatype:** int. Default: 0 -| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5%. Good for preventing overfitting.
**Datatype:** int. Default: 0 +| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5% (i.e. the percent of data falling within the first standard deviation). Good for preventing overfitting.
**Datatype:** int. Default: 0 | | **Data split parameters** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | Fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index fce9e8480..8ef2d6aea 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -2,12 +2,14 @@ import copy import logging import shutil from datetime import datetime, timezone +from math import cos, sin from pathlib import Path from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd +import scipy.stats as stats from pandas import DataFrame from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -401,8 +403,8 @@ class FreqaiDataKitchen: timerange_train.stopts = timerange_train.startts + train_period_days first = False - start = datetime.utcfromtimestamp(timerange_train.startts) - stop = datetime.utcfromtimestamp(timerange_train.stopts) + start = datetime.fromtimestamp(timerange_train.startts, tz=timezone.utc) + stop = datetime.fromtimestamp(timerange_train.stopts, tz=timezone.utc) tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")) tr_training_list_timerange.append(copy.deepcopy(timerange_train)) @@ -415,8 +417,8 @@ class FreqaiDataKitchen: if timerange_backtest.stopts > config_timerange.stopts: timerange_backtest.stopts = config_timerange.stopts - start = datetime.utcfromtimestamp(timerange_backtest.startts) - stop = datetime.utcfromtimestamp(timerange_backtest.stopts) + start = datetime.fromtimestamp(timerange_backtest.startts, tz=timezone.utc) + stop = datetime.fromtimestamp(timerange_backtest.stopts, tz=timezone.utc) tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")) tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest)) @@ -630,8 +632,6 @@ class FreqaiDataKitchen: is an outlier. """ - from math import cos, sin - if predict: if not self.data['DBSCAN_eps']: return @@ -732,8 +732,6 @@ class FreqaiDataKitchen: into previous timepoints. """ - import scipy.stats as ss - no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"] if set_ == 'train': @@ -778,8 +776,8 @@ class FreqaiDataKitchen: inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() - fit_params = ss.weibull_min.fit(current_distances) - quantiles = ss.weibull_min.cdf(current_distances, *fit_params) + fit_params = stats.weibull_min.fit(current_distances) + quantiles = stats.weibull_min.cdf(current_distances, *fit_params) df_inlier = pd.DataFrame( {key: quantiles}, index=distances.index @@ -794,8 +792,8 @@ class FreqaiDataKitchen: index=compute_df.index ) - inlier_metric = 2 * (inlier_metric - inlier_metric.min()) / \ - (inlier_metric.max() - inlier_metric.min()) - 1 + inlier_metric = (2 * (inlier_metric - inlier_metric.min()) / + (inlier_metric.max() - inlier_metric.min()) - 1) if set_ in ('train', 'test'): inlier_metric = inlier_metric.iloc[no_prev_pts:] @@ -956,8 +954,8 @@ class FreqaiDataKitchen: backtest_timerange.startts = ( backtest_timerange.startts - backtest_period_days * SECONDS_IN_DAY ) - start = datetime.utcfromtimestamp(backtest_timerange.startts) - stop = datetime.utcfromtimestamp(backtest_timerange.stopts) + start = datetime.fromtimestamp(backtest_timerange.startts, tz=timezone.utc) + stop = datetime.fromtimestamp(backtest_timerange.stopts, tz=timezone.utc) full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d") self.full_path = Path( diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index fd0554248..9b3e853ef 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -1,10 +1,9 @@ -# import contextlib import logging import shutil import threading import time from abc import ABC, abstractmethod -from datetime import datetime +from datetime import datetime, timezone from pathlib import Path from threading import Lock from typing import Any, Dict, Tuple @@ -214,12 +213,12 @@ class IFreqaiModel(ABC): dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe) trained_timestamp = tr_train - tr_train_startts_str = datetime.utcfromtimestamp(tr_train.startts).strftime( - "%Y-%m-%d %H:%M:%S" - ) - tr_train_stopts_str = datetime.utcfromtimestamp(tr_train.stopts).strftime( - "%Y-%m-%d %H:%M:%S" - ) + tr_train_startts_str = datetime.fromtimestamp( + tr_train.startts, + tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + tr_train_stopts_str = datetime.fromtimestamp( + tr_train.stopts, + tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S") logger.info( f"Training {metadata['pair']}, {self.pair_it}/{self.total_pairs} pairs" f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} " diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 6528347e8..ffdc52ebc 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -93,12 +93,11 @@ def make_unfiltered_dataframe(mocker, freqai_conf): freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.pair = "ADA/BTC" - timerange = TimeRange.parse_timerange("20180110-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) + data_load_timerange = TimeRange.parse_timerange("20180110-20180130") + freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk) freqai.dd.pair_dict = MagicMock() - - data_load_timerange = TimeRange.parse_timerange("20180110-20180130") + new_timerange = TimeRange.parse_timerange("20180120-20180130") corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( @@ -125,12 +124,11 @@ def make_data_dictionary(mocker, freqai_conf): freqai.live = True freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.pair = "ADA/BTC" - timerange = TimeRange.parse_timerange("20180110-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) + data_load_timerange = TimeRange.parse_timerange("20180110-20180130") + freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk) freqai.dd.pair_dict = MagicMock() - data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( From e83c9b276dab1659da3ea245792fe858b43ed054 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 6 Sep 2022 19:56:52 +0200 Subject: [PATCH 10/31] fix whitespace --- freqtrade/freqai/data_kitchen.py | 2 +- tests/freqai/conftest.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 8ef2d6aea..471634c85 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -import scipy.stats as stats +from scipy import stats from pandas import DataFrame from sklearn import linear_model from sklearn.cluster import DBSCAN diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index ffdc52ebc..2bd744455 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -97,7 +97,7 @@ def make_unfiltered_dataframe(mocker, freqai_conf): freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk) freqai.dd.pair_dict = MagicMock() - + new_timerange = TimeRange.parse_timerange("20180120-20180130") corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( From d44296783e6264e25a3c20cfc7f5e104883f8ce2 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Tue, 6 Sep 2022 20:10:12 +0200 Subject: [PATCH 11/31] isort datakitchen --- freqtrade/freqai/data_kitchen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 471634c85..2ed0d73af 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,8 +9,8 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from scipy import stats from pandas import DataFrame +from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN from sklearn.metrics.pairwise import pairwise_distances From 95a33ab2e639d8510c45e731946c5ed2a9189493 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 06:34:03 +0200 Subject: [PATCH 12/31] Add amount_to_contract helper in the exchange --- freqtrade/exchange/exchange.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index c3dca43a8..af072d555 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -446,6 +446,15 @@ class Exchange: contract_size = self.get_contract_size(pair) return contracts_to_amount(num_contracts, contract_size) + def amount_to_contract_precision(self, pair: str, amount: float) -> float: + """ + Helper wrapper around amount_to_contract_precision + """ + contract_size = self.get_contract_size(pair) + + return amount_to_contract_precision(amount, self.get_precision_amount(pair), + self.precisionMode, contract_size) + def set_sandbox(self, api: ccxt.Exchange, exchange_config: dict, name: str) -> None: if exchange_config.get('sandbox'): if api.urls.get('test'): From 83d9f3aeba221bb7a1934a193de66bb4c3887019 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 06:35:58 +0200 Subject: [PATCH 13/31] Add test showing #7365 --- tests/test_integration.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tests/test_integration.py b/tests/test_integration.py index dd3488f81..77ed822d1 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -485,7 +485,7 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non assert len(trade.orders) == 1 assert pytest.approx(trade.stake_amount) == 60 assert pytest.approx(trade.amount) == 30.0 - assert log_has_re("Remaining amount of 1.6.* would be too small.", caplog) + assert log_has_re("Remaining amount of 1.6.* would be smaller than the minimum of 10.", caplog) freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20) @@ -504,9 +504,21 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-50) freqtrade.process() assert log_has_re("Adjusting amount to trade.amount as it is higher.*", caplog) - assert log_has_re("Remaining amount of 0.0 would be too small.", caplog) + assert log_has_re("Remaining amount of 0.0 would be smaller than the minimum of 10.", caplog) trade = Trade.get_trades().first() assert len(trade.orders) == 2 assert trade.orders[-1].ft_order_side == 'sell' assert pytest.approx(trade.stake_amount) == 40.198 assert trade.is_open + + # use amount that would trunc to 0.0 once selling + mocker.patch("freqtrade.exchange.Exchange.amount_to_contract_precision", + lambda s, p, v: round(v, 1)) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-0.01) + freqtrade.process() + trade = Trade.get_trades().first() + assert len(trade.orders) == 2 + assert trade.orders[-1].ft_order_side == 'sell' + assert pytest.approx(trade.stake_amount) == 40.198 + assert trade.is_open + assert log_has_re('Amount to sell is 0.0 due to exchange limits - not selling.', caplog) From 4d69df08dde96d067d9daebbf2db3a61a24dd44a Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 06:43:08 +0200 Subject: [PATCH 14/31] trunc to amount precision before checking valid partial exits closes #7368 --- freqtrade/freqtradebot.py | 11 +++++++++-- freqtrade/optimize/backtesting.py | 6 +++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 1d171ae89..a2f39afd6 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -583,7 +583,9 @@ class FreqtradeBot(LoggingMixin): if stake_amount is not None and stake_amount < 0.0: # We should decrease our position - amount = abs(float(FtPrecise(stake_amount) / FtPrecise(current_exit_rate))) + amount = self.exchange.amount_to_contract_precision( + trade.pair, + abs(float(FtPrecise(stake_amount) / FtPrecise(current_exit_rate)))) if amount > trade.amount: # This is currently ineffective as remaining would become < min tradable # Fixing this would require checking for 0.0 there - @@ -592,9 +594,14 @@ class FreqtradeBot(LoggingMixin): f"Adjusting amount to trade.amount as it is higher. {amount} > {trade.amount}") amount = trade.amount + if amount == 0.0: + logger.info("Amount to sell is 0.0 due to exchange limits - not selling.") + return + remaining = (trade.amount - amount) * current_exit_rate if remaining < min_exit_stake: - logger.info(f'Remaining amount of {remaining} would be too small.') + logger.info(f"Remaining amount of {remaining} would be smaller " + f"than the minimum of {min_exit_stake}.") return self.execute_trade_exit(trade, current_exit_rate, exit_check=ExitCheckTuple( diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 8f6b6b332..97418b72c 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -537,7 +537,11 @@ class Backtesting: return pos_trade if stake_amount is not None and stake_amount < 0.0: - amount = abs(stake_amount) / current_rate + amount = amount_to_contract_precision( + abs(stake_amount) / current_rate, trade.amount_precision, + self.precision_mode, trade.contract_size) + if amount == 0.0: + return trade if amount > trade.amount: # This is currently ineffective as remaining would become < min tradable amount = trade.amount From 3454a52b9523ed567f0c009cc7b3bf8c6100b75f Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 06:55:22 +0200 Subject: [PATCH 15/31] Explicitly test amount_to_contract_precision --- tests/exchange/test_exchange.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index 3352019a9..3b903f8ee 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -4456,6 +4456,39 @@ def test__amount_to_contracts( assert result_amount == param_amount +@pytest.mark.parametrize('pair,amount,expected_spot,expected_fut', [ + # Contract size of 0.01 + ('ADA/USDT:USDT', 40, 40, 40), + ('ADA/USDT:USDT', 10.4445555, 10.4, 10.444), + ('LTC/ETH', 30, 30, 30), + ('LTC/USD', 30, 30, 30), + # contract size of 10 + ('ETH/USDT:USDT', 10.111, 10.1, 10), + ('ETH/USDT:USDT', 10.188, 10.1, 10), + ('ETH/USDT:USDT', 10.988, 10.9, 10), +]) +def test_amount_to_contract_precision( + mocker, + default_conf, + pair, + amount, + expected_spot, + expected_fut, +): + api_mock = MagicMock() + default_conf['trading_mode'] = 'spot' + default_conf['margin_mode'] = 'isolated' + exchange = get_patched_exchange(mocker, default_conf, api_mock) + + result_size = exchange.amount_to_contract_precision(pair, amount) + assert result_size == expected_spot + + default_conf['trading_mode'] = 'futures' + exchange = get_patched_exchange(mocker, default_conf, api_mock) + result_size = exchange.amount_to_contract_precision(pair, amount) + assert result_size == expected_fut + + @pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [ # Bittrex ('bittrex', 2.0, False, 'spot', None), From 2c9b7659539ddbf9063f69f4a06429c3790552fc Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 7 Sep 2022 09:35:37 -0600 Subject: [PATCH 16/31] add suffix parameter --- freqtrade/strategy/strategy_helper.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py index 43728dc1f..55afbf7a8 100644 --- a/freqtrade/strategy/strategy_helper.py +++ b/freqtrade/strategy/strategy_helper.py @@ -1,3 +1,5 @@ +from typing import Optional + import pandas as pd from freqtrade.exchange import timeframe_to_minutes @@ -6,7 +8,8 @@ from freqtrade.exchange import timeframe_to_minutes def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, timeframe: str, timeframe_inf: str, ffill: bool = True, append_timeframe: bool = True, - date_column: str = 'date') -> pd.DataFrame: + date_column: str = 'date', + suffix: Optional[str] = None) -> pd.DataFrame: """ Correctly merge informative samples to the original dataframe, avoiding lookahead bias. @@ -50,10 +53,17 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, # Rename columns to be unique date_merge = 'date_merge' - if append_timeframe: + if append_timeframe and not suffix: date_merge = f'date_merge_{timeframe_inf}' informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] + elif suffix: + date_merge = f'date_merge_{suffix}' + informative.columns = [f"{col}_{suffix}" for col in informative.columns] + + elif suffix and append_timeframe: + raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") + # Combine the 2 dataframes # all indicators on the informative sample MUST be calculated before this point if ffill: From 322f00e3e8c7c183e350e84e4c740e0d34130c68 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 18:19:25 +0200 Subject: [PATCH 17/31] Fix empty entry message if order doesn't fill immediately closes #7368 --- freqtrade/freqtradebot.py | 2 +- tests/test_freqtradebot.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index a2f39afd6..ec32cae0e 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -919,7 +919,7 @@ class FreqtradeBot(LoggingMixin): 'stake_amount': trade.stake_amount, 'stake_currency': self.config['stake_currency'], 'fiat_currency': self.config.get('fiat_display_currency', None), - 'amount': order.safe_amount_after_fee, + 'amount': order.safe_amount_after_fee if fill else order.amount, 'open_date': trade.open_date or datetime.utcnow(), 'current_rate': current_rate, 'sub_trade': sub_trade, diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index aff0504b3..30b0b75b6 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -3655,6 +3655,7 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit( assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value assert rpc_mock.call_count == 3 assert rpc_mock.call_args_list[0][0][0]['type'] == RPCMessageType.ENTRY + assert rpc_mock.call_args_list[0][0][0]['amount'] > 20 assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY_FILL assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.EXIT_FILL From ea7bdac9edc7b3f5c5e039e509b8d368f7045a3e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 7 Sep 2022 18:45:16 +0200 Subject: [PATCH 18/31] ensure inlier metric can be combined with other cleaning methods --- freqtrade/freqai/freqai_interface.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 5ccc9d1b2..32e42e115 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -427,6 +427,11 @@ class IFreqaiModel(ABC): ft_params = self.freqai_info["feature_parameters"] + if ft_params.get('inlier_metric_window', 0): + dk.compute_inlier_metric(set_='train') + if self.freqai_info["data_split_parameters"]["test_size"] > 0: + dk.compute_inlier_metric(set_='test') + if ft_params.get( "principal_component_analysis", False ): @@ -446,11 +451,6 @@ class IFreqaiModel(ABC): dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps) self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps'] - if ft_params.get('inlier_metric_window', 0): - dk.compute_inlier_metric(set_='train') - if self.freqai_info["data_split_parameters"]["test_size"] > 0: - dk.compute_inlier_metric(set_='test') - if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0): dk.add_noise_to_training_features() From 48cadbf933e829c8335a3ef593aa324b7d08a659 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 7 Sep 2022 09:52:22 -0600 Subject: [PATCH 19/31] remove duplicate line, change window to timeperiod --- freqtrade/templates/FreqaiExampleStrategy.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/freqtrade/templates/FreqaiExampleStrategy.py b/freqtrade/templates/FreqaiExampleStrategy.py index 0e822a028..b172ab805 100644 --- a/freqtrade/templates/FreqaiExampleStrategy.py +++ b/freqtrade/templates/FreqaiExampleStrategy.py @@ -92,12 +92,10 @@ class FreqaiExampleStrategy(IStrategy): t = int(t) informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t) informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t) informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t) - informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - bollinger = qtpylib.bollinger_bands( qtpylib.typical_price(informative), window=t, stds=2.2 ) From 5d338e697c6df2d7d3d5109fb7ce29309f6dcdf0 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 7 Sep 2022 09:59:46 -0600 Subject: [PATCH 20/31] change window to timeperiod in hybrid --- freqtrade/templates/FreqaiHybridExampleStrategy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/templates/FreqaiHybridExampleStrategy.py b/freqtrade/templates/FreqaiHybridExampleStrategy.py index 5d1e149dd..286ff012f 100644 --- a/freqtrade/templates/FreqaiHybridExampleStrategy.py +++ b/freqtrade/templates/FreqaiHybridExampleStrategy.py @@ -135,7 +135,7 @@ class FreqaiExampleHybridStrategy(IStrategy): t = int(t) informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t) informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t) - informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t) + informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t) informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t) informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t) informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t) From e51d352777d9e06518da0f28c974dceabf4afd0c Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 7 Sep 2022 19:11:54 +0200 Subject: [PATCH 21/31] ensure pca is handling same DF as inlier --- freqtrade/freqai/freqai_interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 32e42e115..c5ac17a3a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -467,7 +467,7 @@ class IFreqaiModel(ABC): if ft_params.get( "principal_component_analysis", False ): - dk.pca_transform(dataframe) + dk.pca_transform(self.dk.data_dictionary['prediction_features']) if ft_params.get("use_SVM_to_remove_outliers", False): dk.use_SVM_to_remove_outliers(predict=True) From a9fd12b816d5a36a8233f508dec92d3f4bb4ab40 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 18:28:04 +0200 Subject: [PATCH 22/31] Allow more dynamic timeframes by disabling "choice" part of #7366 --- freqtrade/commands/cli_options.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 3d094da36..4240a8014 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -455,8 +455,6 @@ AVAILABLE_CLI_OPTIONS = { '-t', '--timeframes', help='Specify which tickers to download. Space-separated list. ' 'Default: `1m 5m`.', - choices=['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', - '6h', '8h', '12h', '1d', '3d', '1w', '2w', '1M', '1y'], default=['1m', '5m'], nargs='+', ), From f8e7ed5d7d8f05cd470517a630431725cff48280 Mon Sep 17 00:00:00 2001 From: Matthias Date: Wed, 7 Sep 2022 18:28:14 +0200 Subject: [PATCH 23/31] Reduce shutdown verbosity of exchange --- freqtrade/exchange/exchange.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index af072d555..569dcad9b 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -205,7 +205,7 @@ class Exchange: logger.debug("Exchange object destroyed, closing async loop") if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) and self._api_async.session): - logger.info("Closing async ccxt session.") + logger.debug("Closing async ccxt session.") self.loop.run_until_complete(self._api_async.close()) def validate_config(self, config): From 1ef1fc269e2b682a2d8053340e8349436a132269 Mon Sep 17 00:00:00 2001 From: Timothy Pogue Date: Wed, 7 Sep 2022 15:26:38 -0600 Subject: [PATCH 24/31] docstring and tests --- freqtrade/strategy/strategy_helper.py | 4 +++- tests/strategy/test_strategy_helpers.py | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py index 55afbf7a8..53f625001 100644 --- a/freqtrade/strategy/strategy_helper.py +++ b/freqtrade/strategy/strategy_helper.py @@ -31,6 +31,8 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, :param ffill: Forwardfill missing values - optional but usually required :param append_timeframe: Rename columns by appending timeframe. :param date_column: A custom date column name. + :param suffix: A string suffix to add at the end of the informative columns. If specified, + append_timeframe must be false. :return: Merged dataframe :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe """ @@ -57,7 +59,7 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, date_merge = f'date_merge_{timeframe_inf}' informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] - elif suffix: + elif suffix and not append_timeframe: date_merge = f'date_merge_{suffix}' informative.columns = [f"{col}_{suffix}" for col in informative.columns] diff --git a/tests/strategy/test_strategy_helpers.py b/tests/strategy/test_strategy_helpers.py index a7c2da26a..8cb990e87 100644 --- a/tests/strategy/test_strategy_helpers.py +++ b/tests/strategy/test_strategy_helpers.py @@ -117,6 +117,29 @@ def test_merge_informative_pair_lower(): merge_informative_pair(data, informative, '1h', '15m', ffill=True) +def test_merge_informative_pair_suffix(): + data = generate_test_data('15m', 20) + informative = generate_test_data('1h', 20) + + result = merge_informative_pair(data, informative, '15m', '1h', + append_timeframe=False, suffix="suf") + + assert 'date' in result.columns + assert result['date'].equals(data['date']) + assert 'date_suf' in result.columns + + assert 'open_suf' in result.columns + assert 'open_1h' not in result.columns + + +def test_merge_informative_pair_suffix_append_timeframe(): + data = generate_test_data('15m', 20) + informative = generate_test_data('1h', 20) + + with pytest.raises(ValueError, match=r"You can not specify `append_timeframe` .*"): + merge_informative_pair(data, informative, '15m', '1h', suffix="suf") + + def test_stoploss_from_open(): open_price_ranges = [ [0.01, 1.00, 30], From f3417a869069bdb01da844017b00ebf30ee6f208 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 06:59:14 +0200 Subject: [PATCH 25/31] Revert condition sequence to simplify conditions --- freqtrade/strategy/strategy_helper.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py index 53f625001..aa753a829 100644 --- a/freqtrade/strategy/strategy_helper.py +++ b/freqtrade/strategy/strategy_helper.py @@ -55,17 +55,16 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, # Rename columns to be unique date_merge = 'date_merge' - if append_timeframe and not suffix: + if suffix and append_timeframe: + raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") + elif append_timeframe: date_merge = f'date_merge_{timeframe_inf}' informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] - elif suffix and not append_timeframe: + elif suffix: date_merge = f'date_merge_{suffix}' informative.columns = [f"{col}_{suffix}" for col in informative.columns] - elif suffix and append_timeframe: - raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") - # Combine the 2 dataframes # all indicators on the informative sample MUST be calculated before this point if ffill: From 4e15611b055a7afb5dfc00fd687967fa52bca1bf Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:18:38 +0200 Subject: [PATCH 26/31] Don't crash in case of funding fee fetch error --- freqtrade/exchange/exchange.py | 11 +++++-- freqtrade/freqtradebot.py | 57 ++++++++++++++++++++-------------- 2 files changed, 42 insertions(+), 26 deletions(-) diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index 569dcad9b..be5af91db 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -2509,8 +2509,13 @@ class Exchange: cache=False, drop_incomplete=False, ) - funding_rates = candle_histories[funding_comb] - mark_rates = candle_histories[mark_comb] + try: + # we can't assume we always get histories - for example during exchange downtimes + funding_rates = candle_histories[funding_comb] + mark_rates = candle_histories[mark_comb] + except KeyError: + raise ExchangeError("Could not find funding rates") from None + funding_mark_rates = self.combine_funding_and_mark( funding_rates=funding_rates, mark_rates=mark_rates) @@ -2590,6 +2595,8 @@ class Exchange: :param is_short: trade direction :param amount: Trade amount :param open_date: Open date of the trade + :return: funding fee since open_date + :raies: ExchangeError if something goes wrong. """ if self.trading_mode == TradingMode.FUTURES: if self._config['dry_run']: diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index ec32cae0e..61c323ed3 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -281,14 +281,17 @@ class FreqtradeBot(LoggingMixin): def update_funding_fees(self): if self.trading_mode == TradingMode.FUTURES: trades = Trade.get_open_trades() - for trade in trades: - funding_fees = self.exchange.get_funding_fees( - pair=trade.pair, - amount=trade.amount, - is_short=trade.is_short, - open_date=trade.date_last_filled_utc - ) - trade.funding_fees = funding_fees + try: + for trade in trades: + funding_fees = self.exchange.get_funding_fees( + pair=trade.pair, + amount=trade.amount, + is_short=trade.is_short, + open_date=trade.date_last_filled_utc + ) + trade.funding_fees = funding_fees + except ExchangeError: + logger.warning("Could not update funding fees for open trades.") else: return 0.0 @@ -671,14 +674,12 @@ class FreqtradeBot(LoggingMixin): if not stake_amount: return False - if pos_adjust: - logger.info(f"Position adjust: about to create a new order for {pair} with stake: " - f"{stake_amount} for {trade}") - else: - logger.info( - f"{name} signal found: about create a new trade for {pair} with stake_amount: " - f"{stake_amount} ...") - + msg = (f"Position adjust: about to create a new order for {pair} with stake: " + f"{stake_amount} for {trade}" if pos_adjust + else + f"{name} signal found: about create a new trade for {pair} with stake_amount: " + f"{stake_amount} ...") + logger.info(msg) amount = (stake_amount / enter_limit_requested) * leverage order_type = ordertype or self.strategy.order_types['entry'] @@ -741,8 +742,12 @@ class FreqtradeBot(LoggingMixin): # This is a new trade if trade is None: - funding_fees = self.exchange.get_funding_fees( - pair=pair, amount=amount, is_short=is_short, open_date=open_date) + try: + funding_fees = self.exchange.get_funding_fees( + pair=pair, amount=amount, is_short=is_short, open_date=open_date) + except ExchangeError: + logger.warning("Could not update funding fee.") + trade = Trade( pair=pair, base_currency=base_currency, @@ -1493,12 +1498,16 @@ class FreqtradeBot(LoggingMixin): :param exit_check: CheckTuple with signal and reason :return: True if it succeeds False """ - trade.funding_fees = self.exchange.get_funding_fees( - pair=trade.pair, - amount=trade.amount, - is_short=trade.is_short, - open_date=trade.date_last_filled_utc, - ) + try: + trade.funding_fees = self.exchange.get_funding_fees( + pair=trade.pair, + amount=trade.amount, + is_short=trade.is_short, + open_date=trade.date_last_filled_utc, + ) + except ExchangeError: + logger.warning("Could not update funding fee.") + exit_type = 'exit' exit_reason = exit_tag or exit_check.exit_reason if exit_check.exit_type in ( From 39b6cadd14e8253541c2836daed435059d20670d Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:24:57 +0200 Subject: [PATCH 27/31] Test keyerror case for funding_Fee calculation --- freqtrade/exchange/exchange.py | 2 +- tests/exchange/test_exchange.py | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index be5af91db..33a56c530 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -2514,7 +2514,7 @@ class Exchange: funding_rates = candle_histories[funding_comb] mark_rates = candle_histories[mark_comb] except KeyError: - raise ExchangeError("Could not find funding rates") from None + raise ExchangeError("Could not find funding rates.") from None funding_mark_rates = self.combine_funding_and_mark( funding_rates=funding_rates, mark_rates=mark_rates) diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index 3b903f8ee..71690ecdf 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -11,8 +11,9 @@ import pytest from pandas import DataFrame from freqtrade.enums import CandleType, MarginMode, TradingMode -from freqtrade.exceptions import (DDosProtection, DependencyException, InvalidOrderException, - OperationalException, PricingError, TemporaryError) +from freqtrade.exceptions import (DDosProtection, DependencyException, ExchangeError, + InvalidOrderException, OperationalException, PricingError, + TemporaryError) from freqtrade.exchange import (Binance, Bittrex, Exchange, Kraken, amount_to_precision, date_minus_candles, market_is_active, price_to_precision, timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, @@ -4179,17 +4180,24 @@ def test__fetch_and_calculate_funding_fees( type(api_mock).has = PropertyMock(return_value={'fetchOHLCV': True}) type(api_mock).has = PropertyMock(return_value={'fetchFundingRateHistory': True}) - exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) + ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) mocker.patch('freqtrade.exchange.Exchange.timeframes', PropertyMock( return_value=['1h', '4h', '8h'])) - funding_fees = exchange._fetch_and_calculate_funding_fees( + funding_fees = ex._fetch_and_calculate_funding_fees( pair='ADA/USDT', amount=amount, is_short=True, open_date=d1, close_date=d2) assert pytest.approx(funding_fees) == expected_fees # Fees for Longs are inverted - funding_fees = exchange._fetch_and_calculate_funding_fees( + funding_fees = ex._fetch_and_calculate_funding_fees( pair='ADA/USDT', amount=amount, is_short=False, open_date=d1, close_date=d2) assert pytest.approx(funding_fees) == -expected_fees + # Return empty "refresh_latest" + mocker.patch("freqtrade.exchange.Exchange.refresh_latest_ohlcv", return_value={}) + ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) + with pytest.raises(ExchangeError, match="Could not find funding rates."): + ex._fetch_and_calculate_funding_fees( + pair='ADA/USDT', amount=amount, is_short=False, open_date=d1, close_date=d2) + @pytest.mark.parametrize('exchange,expected_fees', [ ('binance', -0.0009140999999999999), From 791f61c0899647f3c04ac072305e73187e266268 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:13:15 +0000 Subject: [PATCH 28/31] Add test case for funding fee update failure --- freqtrade/freqtradebot.py | 2 -- tests/test_freqtradebot.py | 10 ++++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 61c323ed3..269c562d7 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -292,8 +292,6 @@ class FreqtradeBot(LoggingMixin): trade.funding_fees = funding_fees except ExchangeError: logger.warning("Could not update funding fees for open trades.") - else: - return 0.0 def startup_backpopulate_precision(self): diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 30b0b75b6..5f943504c 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -5430,6 +5430,16 @@ def test_update_funding_fees( )) +def test_update_funding_fees_error(mocker, default_conf, caplog): + mocker.patch('freqtrade.exchange.Exchange.get_funding_fees', side_effect=ExchangeError()) + default_conf['trading_mode'] = 'futures' + default_conf['margin_mode'] = 'isolated' + freqtrade = get_patched_freqtradebot(mocker, default_conf) + freqtrade.update_funding_fees() + + log_has("Could not update funding fees for open trades.", caplog) + + def test_position_adjust(mocker, default_conf_usdt, fee) -> None: patch_RPCManager(mocker) patch_exchange(mocker) From 9ef0ffe277e40159c207bb19782504d4bff492ff Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:19:17 +0000 Subject: [PATCH 29/31] Update tests for funding-Fee exceptions --- freqtrade/freqtradebot.py | 3 ++- tests/test_freqtradebot.py | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index 269c562d7..6c001a8d6 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -740,11 +740,12 @@ class FreqtradeBot(LoggingMixin): # This is a new trade if trade is None: + funding_fees = 0.0 try: funding_fees = self.exchange.get_funding_fees( pair=pair, amount=amount, is_short=is_short, open_date=open_date) except ExchangeError: - logger.warning("Could not update funding fee.") + logger.warning("Could not find funding fee.") trade = Trade( pair=pair, diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 5f943504c..565797d81 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -506,7 +506,7 @@ def test_create_trades_multiple_trades( def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker, - limit_buy_order_usdt_open) -> None: + limit_buy_order_usdt_open, caplog) -> None: patch_RPCManager(mocker) patch_exchange(mocker) default_conf_usdt['max_open_trades'] = 4 @@ -515,6 +515,7 @@ def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker, fetch_ticker=ticker_usdt, create_order=MagicMock(return_value=limit_buy_order_usdt_open), get_fee=fee, + get_funding_fees=MagicMock(side_effect=ExchangeError()), ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade) @@ -522,6 +523,7 @@ def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker, # Create 2 existing trades freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount']) freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount']) + assert log_has("Could not find funding fee.", caplog) assert len(Trade.get_open_trades()) == 2 # Change order_id for new orders @@ -3666,7 +3668,7 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit( (True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'), ]) def test_execute_trade_exit_market_order( - default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, + default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, caplog, limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker ) -> None: """ @@ -3694,6 +3696,7 @@ def test_execute_trade_exit_market_order( fetch_ticker=ticker_usdt, get_fee=fee, _is_dry_limit_order_filled=MagicMock(return_value=True), + get_funding_fees=MagicMock(side_effect=ExchangeError()), ) patch_whitelist(mocker, default_conf_usdt) freqtrade = FreqtradeBot(default_conf_usdt) @@ -3719,6 +3722,7 @@ def test_execute_trade_exit_market_order( limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'], exit_check=ExitCheckTuple(exit_type=ExitType.ROI) ) + assert log_has("Could not update funding fee.", caplog) assert not trade.is_open assert pytest.approx(trade.close_profit) == profit_ratio From 5e42defafc005de4d475776ae86ce6e242e8c8ed Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:36:17 +0000 Subject: [PATCH 30/31] Update telegram docs to disable partial exit notifications --- docs/telegram-usage.md | 91 +++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/docs/telegram-usage.md b/docs/telegram-usage.md index ece8700de..b9324def4 100644 --- a/docs/telegram-usage.md +++ b/docs/telegram-usage.md @@ -90,7 +90,8 @@ Example configuration showing the different settings: "trailing_stop_loss": "on", "stop_loss": "on", "stoploss_on_exchange": "on", - "custom_exit": "silent" + "custom_exit": "silent", + "partial_exit": "on" }, "entry_cancel": "silent", "exit_cancel": "on", @@ -138,7 +139,7 @@ You can create your own keyboard in `config.json`: "enabled": true, "token": "your_telegram_token", "chat_id": "your_telegram_chat_id", - "keyboard": [ + "keyboard": [ ["/daily", "/stats", "/balance", "/profit"], ["/status table", "/performance"], ["/reload_config", "/count", "/logs"] @@ -225,16 +226,16 @@ Once all positions are sold, run `/stop` to completely stop the bot. For each open trade, the bot will send you the following message. Enter Tag is configurable via Strategy. -> **Trade ID:** `123` `(since 1 days ago)` -> **Current Pair:** CVC/BTC +> **Trade ID:** `123` `(since 1 days ago)` +> **Current Pair:** CVC/BTC > **Direction:** Long > **Leverage:** 1.0 -> **Amount:** `26.64180098` +> **Amount:** `26.64180098` > **Enter Tag:** Awesome Long Signal -> **Open Rate:** `0.00007489` -> **Current Rate:** `0.00007489` -> **Current Profit:** `12.95%` -> **Stoploss:** `0.00007389 (-0.02%)` +> **Open Rate:** `0.00007489` +> **Current Rate:** `0.00007489` +> **Current Profit:** `12.95%` +> **Stoploss:** `0.00007389 (-0.02%)` ### /status table @@ -261,26 +262,26 @@ current max Return a summary of your profit/loss and performance. -> **ROI:** Close trades -> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)` -> ∙ `62.968 USD` -> **ROI:** All trades -> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)` -> ∙ `33.095 EUR` -> -> **Total Trade Count:** `138` -> **First Trade opened:** `3 days ago` -> **Latest Trade opened:** `2 minutes ago` -> **Avg. Duration:** `2:33:45` -> **Best Performing:** `PAY/BTC: 50.23%` -> **Trading volume:** `0.5 BTC` -> **Profit factor:** `1.04` -> **Max Drawdown:** `9.23% (0.01255 BTC)` +> **ROI:** Close trades +> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)` +> ∙ `62.968 USD` +> **ROI:** All trades +> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)` +> ∙ `33.095 EUR` +> +> **Total Trade Count:** `138` +> **First Trade opened:** `3 days ago` +> **Latest Trade opened:** `2 minutes ago` +> **Avg. Duration:** `2:33:45` +> **Best Performing:** `PAY/BTC: 50.23%` +> **Trading volume:** `0.5 BTC` +> **Profit factor:** `1.04` +> **Max Drawdown:** `9.23% (0.01255 BTC)` -The relative profit of `1.2%` is the average profit per trade. -The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`. -Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits. -Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy. +The relative profit of `1.2%` is the average profit per trade. +The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`. +Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits. +Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy. Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`. ### /forceexit @@ -309,27 +310,27 @@ Note that for this to work, `force_entry_enable` needs to be set to true. ### /performance Return the performance of each crypto-currency the bot has sold. -> Performance: -> 1. `RCN/BTC 0.003 BTC (57.77%) (1)` -> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)` -> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)` -> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)` -> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)` -> ... +> Performance: +> 1. `RCN/BTC 0.003 BTC (57.77%) (1)` +> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)` +> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)` +> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)` +> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)` +> ... ### /balance Return the balance of all crypto-currency your have on the exchange. -> **Currency:** BTC -> **Available:** 3.05890234 -> **Balance:** 3.05890234 -> **Pending:** 0.0 +> **Currency:** BTC +> **Available:** 3.05890234 +> **Balance:** 3.05890234 +> **Pending:** 0.0 -> **Currency:** CVC -> **Available:** 86.64180098 -> **Balance:** 86.64180098 -> **Pending:** 0.0 +> **Currency:** CVC +> **Available:** 86.64180098 +> **Balance:** 86.64180098 +> **Pending:** 0.0 ### /daily @@ -376,7 +377,7 @@ Month (count) Profit BTC Profit USD Profit % Shows the current whitelist -> Using whitelist `StaticPairList` with 22 pairs +> Using whitelist `StaticPairList` with 22 pairs > `IOTA/BTC, NEO/BTC, TRX/BTC, VET/BTC, ADA/BTC, ETC/BTC, NCASH/BTC, DASH/BTC, XRP/BTC, XVG/BTC, EOS/BTC, LTC/BTC, OMG/BTC, BTG/BTC, LSK/BTC, ZEC/BTC, HOT/BTC, IOTX/BTC, XMR/BTC, AST/BTC, XLM/BTC, NANO/BTC` ### /blacklist [pair] @@ -386,7 +387,7 @@ If Pair is set, then this pair will be added to the pairlist. Also supports multiple pairs, separated by a space. Use `/reload_config` to reset the blacklist. -> Using blacklist `StaticPairList` with 2 pairs +> Using blacklist `StaticPairList` with 2 pairs >`DODGE/BTC`, `HOT/BTC`. ### /edge From 08726a264be3abdb69569170915c23cd1459c0b2 Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 8 Sep 2022 07:48:36 +0000 Subject: [PATCH 31/31] Update FAQ to point out multiple parallel positions per trade --- docs/faq.md | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 381bbceb5..a72268ef9 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -4,7 +4,7 @@ Freqtrade supports spot trading only. -### Can I open short positions? +### Can my bot open short positions? Freqtrade can open short positions in futures markets. This requires the strategy to be made for this - and `"trading_mode": "futures"` in the configuration. @@ -12,9 +12,9 @@ Please make sure to read the [relevant documentation page](leverage.md) first. In spot markets, you can in some cases use leveraged spot tokens, which reflect an inverted pair (eg. BTCUP/USD, BTCDOWN/USD, ETHBULL/USD, ETHBEAR/USD,...) which can be traded with Freqtrade. -### Can I trade options or futures? +### Can my bot trade options or futures? -Futures trading is supported for selected exchanges. +Futures trading is supported for selected exchanges. Please refer to the [documentation start page](index.md#supported-futures-exchanges-experimental) for an uptodate list of supported exchanges. ## Beginner Tips & Tricks @@ -22,6 +22,13 @@ Futures trading is supported for selected exchanges. ## Freqtrade common issues +### Can freqtrade open multiple positions on the same pair in parallel? + +No. Freqtrade will only open one position per pair at a time. +You can however use the [`adjust_trade_position()` callback](strategy-callbacks.md#adjust-trade-position) to adjust an open position. + +Backtesting provides an option for this in `--eps` - however this is only there to highlight "hidden" signals, and will not work in live. + ### The bot does not start Running the bot with `freqtrade trade --config config.json` shows the output `freqtrade: command not found`. @@ -30,7 +37,7 @@ This could be caused by the following reasons: * The virtual environment is not active. * Run `source .env/bin/activate` to activate the virtual environment. -* The installation did not work correctly. +* The installation did not complete successfully. * Please check the [Installation documentation](installation.md). ### I have waited 5 minutes, why hasn't the bot made any trades yet? @@ -67,7 +74,7 @@ This is not a bot-problem, but will also happen while manual trading. While freqtrade can handle this (it'll sell 99 COIN), fees are often below the minimum tradable lot-size (you can only trade full COIN, not 0.9 COIN). Leaving the dust (0.9 COIN) on the exchange makes usually sense, as the next time freqtrade buys COIN, it'll eat into the remaining small balance, this time selling everything it bought, and therefore slowly declining the dust balance (although it most likely will never reach exactly 0). -Where possible (e.g. on binance), the use of the exchange's dedicated fee currency will fix this. +Where possible (e.g. on binance), the use of the exchange's dedicated fee currency will fix this. On binance, it's sufficient to have BNB in your account, and have "Pay fees in BNB" enabled in your profile. Your BNB balance will slowly decline (as it's used to pay fees) - but you'll no longer encounter dust (Freqtrade will include the fees in the profit calculations). Other exchanges don't offer such possibilities, where it's simply something you'll have to accept or move to a different exchange. @@ -109,7 +116,7 @@ This warning can point to one of the below problems: ### I'm getting the "RESTRICTED_MARKET" message in the log -Currently known to happen for US Bittrex users. +Currently known to happen for US Bittrex users. Read [the Bittrex section about restricted markets](exchanges.md#restricted-markets) for more information. @@ -177,8 +184,8 @@ The GPU improvements would only apply to pandas-native calculations - or ones wr For hyperopt, freqtrade is using scikit-optimize, which is built on top of scikit-learn. Their statement about GPU support is [pretty clear](https://scikit-learn.org/stable/faq.html#will-you-add-gpu-support). -GPU's also are only good at crunching numbers (floating point operations). -For hyperopt, we need both number-crunching (find next parameters) and running python code (running backtesting). +GPU's also are only good at crunching numbers (floating point operations). +For hyperopt, we need both number-crunching (find next parameters) and running python code (running backtesting). As such, GPU's are not too well suited for most parts of hyperopt. The benefit of using GPU would therefore be pretty slim - and will not justify the complexity introduced by trying to add GPU support. @@ -219,9 +226,9 @@ already 8\*10^9\*10 evaluations. A roughly total of 80 billion evaluations. Did you run 100 000 evaluations? Congrats, you've done roughly 1 / 100 000 th of the search space, assuming that the bot never tests the same parameters more than once. -* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 100000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades. +* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 100000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades. -Example: 4% profit 650 times vs 0,3% profit a trade 10000 times in a year. If we assume you set the --timerange to 365 days. +Example: 4% profit 650 times vs 0,3% profit a trade 10000 times in a year. If we assume you set the --timerange to 365 days. Example: `freqtrade --config config.json --strategy SampleStrategy --hyperopt SampleHyperopt -e 1000 --timerange 20190601-20200601`