Merge branch 'develop' into add-inlier-metric
This commit is contained in:
commit
d7585161b2
@ -53,7 +53,6 @@
|
|||||||
],
|
],
|
||||||
"freqai": {
|
"freqai": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"startup_candles": 10000,
|
|
||||||
"purge_old_models": true,
|
"purge_old_models": true,
|
||||||
"train_period_days": 15,
|
"train_period_days": 15,
|
||||||
"backtest_period_days": 7,
|
"backtest_period_days": 7,
|
||||||
@ -75,8 +74,10 @@
|
|||||||
"weight_factor": 0.9,
|
"weight_factor": 0.9,
|
||||||
"principal_component_analysis": false,
|
"principal_component_analysis": false,
|
||||||
"use_SVM_to_remove_outliers": true,
|
"use_SVM_to_remove_outliers": true,
|
||||||
"indicator_max_period_candles": 20,
|
"indicator_periods_candles": [
|
||||||
"indicator_periods_candles": [10, 20]
|
10,
|
||||||
|
20
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"data_split_parameters": {
|
"data_split_parameters": {
|
||||||
"test_size": 0.33,
|
"test_size": 0.33,
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 191 KiB After Width: | Height: | Size: 185 KiB |
@ -61,8 +61,8 @@ Binance supports [time_in_force](configuration.md#understand-order_time_in_force
|
|||||||
|
|
||||||
### Binance Blacklist
|
### Binance Blacklist
|
||||||
|
|
||||||
For Binance, please add `"BNB/<STAKE>"` to your blacklist to avoid issues.
|
For Binance, it is suggested to add `"BNB/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `BNB` on the account or unless you're willing to disable using `BNB` for fees.
|
||||||
Accounts having BNB accounts use this to pay for fees - if your first trade happens to be on `BNB`, further trades will consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
Binance accounts may use `BNB` for fees, and if a trade happens to be on `BNB`, further trades may consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
||||||
|
|
||||||
### Binance Futures
|
### Binance Futures
|
||||||
|
|
||||||
@ -205,8 +205,8 @@ Kucoin supports [time_in_force](configuration.md#understand-order_time_in_force)
|
|||||||
|
|
||||||
### Kucoin Blacklists
|
### Kucoin Blacklists
|
||||||
|
|
||||||
For Kucoin, please add `"KCS/<STAKE>"` to your blacklist to avoid issues.
|
For Kucoin, it is suggested to add `"KCS/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `KCS` on the account or unless you're willing to disable using `KCS` for fees.
|
||||||
Accounts having KCS accounts use this to pay for fees - if your first trade happens to be on `KCS`, further trades will consume this position and make the initial KCS trade unsellable as the expected amount is not there anymore.
|
Kucoin accounts may use `KCS` for fees, and if a trade happens to be on `KCS`, further trades may consume this position and make the initial `KCS` trade unsellable as the expected amount is not there anymore.
|
||||||
|
|
||||||
## Huobi
|
## Huobi
|
||||||
|
|
||||||
|
@ -89,10 +89,10 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| | **General configuration parameters**
|
| | **General configuration parameters**
|
||||||
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
||||||
| `startup_candles` | Number of candles needed for *backtesting only* to ensure all indicators are non NaNs at the start of the first train period. <br> **Datatype:** Positive integer.
|
|
||||||
| `purge_old_models` | Delete obsolete models (otherwise, all historic models will remain on disk). <br> **Datatype:** Boolean. Default: `False`.
|
| `purge_old_models` | Delete obsolete models (otherwise, all historic models will remain on disk). <br> **Datatype:** Boolean. Default: `False`.
|
||||||
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
||||||
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the window defined above, and retraining the model. This can be fractional days, but beware that the user-provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the window defined above, and retraining the model. This can be fractional days, but beware that the user-provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
||||||
|
| `save_backtest_models` | Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when users wish to tune entry/exit parameters). If a user wishes to save models to disk when running backtesting, they should activate `save_backtest_models`. A user may wish to do this if they plan to use the same model files for starting a dry/live instance with the same `identifier`. <br> **Datatype:** Boolean. Default: `False`.
|
||||||
| `identifier` | **Required.** <br> A unique name for the current model. This can be reused to reload pre-trained models/data. <br> **Datatype:** String.
|
| `identifier` | **Required.** <br> A unique name for the current model. This can be reused to reload pre-trained models/data. <br> **Datatype:** String.
|
||||||
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> Default set to 0, which means the model will retrain as often as possible. <br> **Datatype:** Float > 0.
|
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> Default set to 0, which means the model will retrain as often as possible. <br> **Datatype:** Float > 0.
|
||||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> Defaults set to 0, which means models never expire. <br> **Datatype:** Positive integer.
|
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> Defaults set to 0, which means models never expire. <br> **Datatype:** Positive integer.
|
||||||
@ -105,7 +105,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). The user can create custom labels, making use of this parameter or not. <br> **Datatype:** Positive integer.
|
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). The user can create custom labels, making use of this parameter or not. <br> **Datatype:** Positive integer.
|
||||||
| `include_shifted_candles` | Add features from previous candles to subsequent candles to add historical information. FreqAI takes all features from the `include_shifted_candles` previous candles, duplicates and shifts them so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
| `include_shifted_candles` | Add features from previous candles to subsequent candles to add historical information. FreqAI takes all features from the `include_shifted_candles` previous candles, duplicates and shifts them so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
||||||
| `weight_factor` | Used to set weights for training data points according to their recency. See details about how it works [here](#controlling-the-model-learning-process). <br> **Datatype:** Positive float (typically < 1).
|
| `weight_factor` | Used to set weights for training data points according to their recency. See details about how it works [here](#controlling-the-model-learning-process). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `indicator_max_period_candles` | The maximum period used in `populate_any_indicators()` for indicator creation. FreqAI uses this information in combination with the maximum timeframe to calculate how many data points that should be downloaded so that the first data point does not have a NaN. <br> **Datatype:** Positive integer.
|
| `indicator_max_period_candles` | **No longer used**. User must use the strategy set `startup_candle_count` which defines the maximum *period* used in `populate_any_indicators()` for indicator creation (timeframe independent). FreqAI uses this information in combination with the maximum timeframe to calculate how many data points it should download so that the first data point does not have a NaN <br> **Datatype:** positive integer.
|
||||||
| `indicator_periods_candles` | Calculate indicators for `indicator_periods_candles` time periods and add them to the feature set. <br> **Datatype:** List of positive integers.
|
| `indicator_periods_candles` | Calculate indicators for `indicator_periods_candles` time periods and add them to the feature set. <br> **Datatype:** List of positive integers.
|
||||||
| `stratify_training_data` | This value is used to indicate the grouping of the data. For example, 2 would set every 2nd data point into a separate dataset to be pulled from during training/testing. See details about how it works [here](#stratifying-the-data-for-training-and-testing-the-model) <br> **Datatype:** Positive integer.
|
| `stratify_training_data` | This value is used to indicate the grouping of the data. For example, 2 would set every 2nd data point into a separate dataset to be pulled from during training/testing. See details about how it works [here](#stratifying-the-data-for-training-and-testing-the-model) <br> **Datatype:** Positive integer.
|
||||||
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean.
|
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean.
|
||||||
@ -113,16 +113,16 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training data set, as well as from incoming data points. See details about how it works [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training data set, as well as from incoming data points. See details about how it works [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
| `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||||
| `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact. <br> **Datatype:** float. Default: `30`
|
|
||||||
| `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it. <br> **Datatype:** bool. Default: False
|
|
||||||
| `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric) <br> **Datatype:** int. Default: 0
|
| `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric) <br> **Datatype:** int. Default: 0
|
||||||
| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5% (i.e. the percent of data falling within the first standard deviation). Good for preventing overfitting. <br> **Datatype:** int. Default: 0
|
| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5% (i.e. the percent of data falling within the first standard deviation). Good for preventing overfitting. <br> **Datatype:** int. Default: 0
|
||||||
|
| `outlier_protection_percentage` | If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact. If the outlier protection is triggered, no predictions will be made based on the training data. <br> **Datatype:** Float. Default: `30`
|
||||||
|
| `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. Default: False
|
||||||
| | **Data split parameters**
|
| | **Data split parameters**
|
||||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||||
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||||
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br>
|
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean.
|
||||||
| | **Model training parameters**
|
| | **Model training parameters**
|
||||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.**Datatype:** Boolean.
|
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.
|
||||||
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
||||||
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
||||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||||
@ -169,7 +169,6 @@ The user interface is isolated to the typical Freqtrade config file. A FreqAI co
|
|||||||
],
|
],
|
||||||
"label_period_candles": 24,
|
"label_period_candles": 24,
|
||||||
"include_shifted_candles": 2,
|
"include_shifted_candles": 2,
|
||||||
"indicator_max_period_candles": 20,
|
|
||||||
"indicator_periods_candles": [10, 20]
|
"indicator_periods_candles": [10, 20]
|
||||||
},
|
},
|
||||||
"data_split_parameters" : {
|
"data_split_parameters" : {
|
||||||
@ -186,6 +185,9 @@ The user interface is isolated to the typical Freqtrade config file. A FreqAI co
|
|||||||
The FreqAI strategy requires the user to include the following lines of code in the standard Freqtrade strategy:
|
The FreqAI strategy requires the user to include the following lines of code in the standard Freqtrade strategy:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
# user should define the maximum startup candle count (the largest number of candles
|
||||||
|
# passed to any single indicator)
|
||||||
|
startup_candle_count: int = 20
|
||||||
|
|
||||||
def informative_pairs(self):
|
def informative_pairs(self):
|
||||||
whitelist_pairs = self.dp.current_whitelist()
|
whitelist_pairs = self.dp.current_whitelist()
|
||||||
@ -279,6 +281,17 @@ The FreqAI strategy requires the user to include the following lines of code in
|
|||||||
|
|
||||||
Notice how the `populate_any_indicators()` is where the user adds their own features ([more information](#feature-engineering)) and labels ([more information](#setting-classifier-targets)). See a full example at `templates/FreqaiExampleStrategy.py`.
|
Notice how the `populate_any_indicators()` is where the user adds their own features ([more information](#feature-engineering)) and labels ([more information](#setting-classifier-targets)). See a full example at `templates/FreqaiExampleStrategy.py`.
|
||||||
|
|
||||||
|
### Setting the `startup_candle_count`
|
||||||
|
Users need to take care to set the `startup_candle_count` in their strategy the same way they would for any normal Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling on the `dataprovider` to avoid any NaNs at the beginning of the first training. Users can easily set this value by identifying the longest period (in candle units) that they pass to their indicator creation functions (e.g. talib functions). In the present example, the user would pass 20 to as this value (since it is the maximum value in their `indicators_periods_candles`).
|
||||||
|
|
||||||
|
!!! Note
|
||||||
|
Typically it is best for users to be safe and multiply their expected `startup_candle_count` by 2. There are instances where the talib functions actually require more data than just the passed `period`. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Look out for this log message to confirm that your data is clean:
|
||||||
|
|
||||||
|
```
|
||||||
|
2022-08-31 15:14:04 - freqtrade.freqai.data_kitchen - INFO - dropped 0 training points due to NaNs in populated dataset 4319.
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Creating a dynamic target
|
## Creating a dynamic target
|
||||||
|
|
||||||
The `&*_std/mean` return values describe the statistical fit of the user defined label *during the most recent training*. This value allows the user to know the rarity of a given prediction. For example, `templates/FreqaiExampleStrategy.py`, creates a `target_roi` which is based on filtering out predictions that are below a given z-score of 1.25.
|
The `&*_std/mean` return values describe the statistical fit of the user defined label *during the most recent training*. This value allows the user to know the rarity of a given prediction. For example, `templates/FreqaiExampleStrategy.py`, creates a `target_roi` which is based on filtering out predictions that are below a given z-score of 1.25.
|
||||||
@ -504,10 +517,10 @@ and if a full `live_retrain_hours` has elapsed since the end of the loaded model
|
|||||||
The FreqAI backtesting module can be executed with the following command:
|
The FreqAI backtesting module can be executed with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade backtesting --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
freqtrade backtesting --strategy FreqaiExampleStrategy --strategy-path freqtrade/templates --config config_examples/config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
||||||
```
|
```
|
||||||
|
|
||||||
Backtesting mode requires the user to have the data pre-downloaded (unlike in dry/live mode where FreqAI automatically downloads the necessary data). The user should be careful to consider that the time range of the downloaded data is more than the backtesting time range. This is because FreqAI needs data prior to the desired backtesting time range in order to train a model to be ready to make predictions on the first candle of the user-set backtesting time range. More details on how to calculate the data to download can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
Backtesting mode requires the user to have the data [pre-downloaded](#downloading-data-for-backtesting) (unlike in dry/live mode where FreqAI automatically downloads the necessary data). The user should be careful to consider that the time range of the downloaded data is more than the backtesting time range. This is because FreqAI needs data prior to the desired backtesting time range in order to train a model to be ready to make predictions on the first candle of the user-set backtesting time range. More details on how to calculate the data to download can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
||||||
|
|
||||||
If this command has never been executed with the existing config file, it will train a new model
|
If this command has never been executed with the existing config file, it will train a new model
|
||||||
for each pair, for each backtesting window within the expanded `--timerange`.
|
for each pair, for each backtesting window within the expanded `--timerange`.
|
||||||
@ -531,20 +544,14 @@ the user is asking FreqAI to use a training period of 30 days and backtest on th
|
|||||||
This means that if the user sets `--timerange 20210501-20210701`,
|
This means that if the user sets `--timerange 20210501-20210701`,
|
||||||
FreqAI will train have trained 8 separate models at the end of `--timerange` (because the full range comprises 8 weeks). After the training of the model, FreqAI will backtest the subsequent 7 days. The "sliding window" then moves one week forward (emulating FreqAI retraining once per week in live mode) and the new model uses the previous 30 days (including the 7 days used for backtesting by the previous model) to train. This is repeated until the end of `--timerange`.
|
FreqAI will train have trained 8 separate models at the end of `--timerange` (because the full range comprises 8 weeks). After the training of the model, FreqAI will backtest the subsequent 7 days. The "sliding window" then moves one week forward (emulating FreqAI retraining once per week in live mode) and the new model uses the previous 30 days (including the 7 days used for backtesting by the previous model) to train. This is repeated until the end of `--timerange`.
|
||||||
|
|
||||||
In live mode, the required training data is automatically computed and downloaded. However, in backtesting mode,
|
|
||||||
the user must manually enter the required number of `startup_candles` in the config. This value
|
|
||||||
is used to increase the data to FreqAI, which should be sufficient to enable all indicators
|
|
||||||
to be NaN free at the beginning of the first training. This is done by identifying the
|
|
||||||
longest timeframe (`4h` in presented example config) and the longest indicator period (`20` days in presented example config)
|
|
||||||
and adding this to the `train_period_days`. The units need to be in the base candle time frame:
|
|
||||||
`startup_candles` = ( 4 hours * 20 max period * 60 minutes/hour + 30 day train_period_days * 1440 minutes per day ) / 5 min (base time frame) = 9360.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
In dry/live mode, this is all precomputed and handled automatically. Thus, `startup_candle` has no influence on dry/live mode.
|
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Although fractional `backtest_period_days` is allowed, the user should be aware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. Because of this, a true backtest of FreqAI adaptive training would take a *very* long time. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
Although fractional `backtest_period_days` is allowed, the user should be aware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. Because of this, a true backtest of FreqAI adaptive training would take a *very* long time. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
||||||
|
|
||||||
|
### Downloading data for backtesting
|
||||||
|
Live/dry instances will download the data automatically for the user, but users who wish to use backtesting functionality still need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). FreqAI users need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that they have a sufficient amount of training data *before* the start of their backtesting timerange. The amount of additional data can be roughly estimated by moving the start date of the timerange backwards by `train_period_days` and the `startup_candle_count` ([details](#setting-the-startupcandlecount)) from the beginning of the desired backtesting timerange.
|
||||||
|
|
||||||
|
As an example, if we wish to backtest the `--timerange` above of `20210501-20210701`, and we use the example config which sets `train_period_days` to 15. The startup candle count is 40 on a maximum `include_timeframes` of 1h. We would need 20210501 - 15 days - 40 * 1h / 24 hours = 20210414 (16.7 days earlier than the start of the desired training timerange).
|
||||||
|
|
||||||
### Defining model expirations
|
### Defining model expirations
|
||||||
|
|
||||||
During dry/live mode, FreqAI trains each coin pair sequentially (on separate threads/GPU from the main Freqtrade bot). This means that there is always an age discrepancy between models. If a user is training on 50 pairs, and each pair requires 5 minutes to train, the oldest model will be over 4 hours old. This may be undesirable if the characteristic time scale (the trade duration target) for a strategy is less than 4 hours. The user can decide to only make trade entries if the model is less than
|
During dry/live mode, FreqAI trains each coin pair sequentially (on separate threads/GPU from the main Freqtrade bot). This means that there is always an age discrepancy between models. If a user is training on 50 pairs, and each pair requires 5 minutes to train, the oldest model will be over 4 hours old. This may be undesirable if the characteristic time scale (the trade duration target) for a strategy is less than 4 hours. The user can decide to only make trade entries if the model is less than
|
||||||
@ -745,7 +752,7 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters
|
|||||||
|
|
||||||
![dbscan](assets/freqai_dbscan.jpg)
|
![dbscan](assets/freqai_dbscan.jpg)
|
||||||
|
|
||||||
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as 1/4 of the no. of time points in the feature set, and `eps` ($\varepsilon$) taken as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
||||||
|
|
||||||
## Additional information
|
## Additional information
|
||||||
|
|
||||||
@ -770,5 +777,5 @@ Code review, software architecture brainstorming:
|
|||||||
@xmatthias
|
@xmatthias
|
||||||
|
|
||||||
Beta testing and bug reporting:
|
Beta testing and bug reporting:
|
||||||
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm,
|
||||||
Juha Nykänen @suikula, Wagner Costa @wagnercosta
|
Juha Nykänen @suikula, Wagner Costa @wagnercosta
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
Please only use advanced trading modes when you know how freqtrade (and your strategy) works.
|
Please only use advanced trading modes when you know how freqtrade (and your strategy) works.
|
||||||
Also, never risk more than what you can afford to lose.
|
Also, never risk more than what you can afford to lose.
|
||||||
|
|
||||||
Please read the [strategy migration guide](strategy_migration.md#strategy-migration-between-v2-and-v3) to migrate your strategy from a freqtrade v2 strategy, to v3 strategy that can short and trade futures.
|
If you already have an existing strategy, please read the [strategy migration guide](strategy_migration.md#strategy-migration-between-v2-and-v3) to migrate your strategy from a freqtrade v2 strategy, to strategy of version 3 which can short and trade futures.
|
||||||
|
|
||||||
## Shorting
|
## Shorting
|
||||||
|
|
||||||
@ -62,6 +62,13 @@ You will also have to pick a "margin mode" (explanation below) - with freqtrade
|
|||||||
"margin_mode": "isolated"
|
"margin_mode": "isolated"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Pair namings
|
||||||
|
|
||||||
|
Freqtrade follows the [ccxt naming conventions for futures](https://docs.ccxt.com/en/latest/manual.html?#perpetual-swap-perpetual-future).
|
||||||
|
A futures pair will therefore have the naming of `base/quote:settle` (e.g. `ETH/USDT:USDT`).
|
||||||
|
|
||||||
|
Binance is currently still an exception to this naming scheme, where pairs are named `ETH/USDT` also for futures markets, but will be aligned as soon as CCXT is ready.
|
||||||
|
|
||||||
### Margin mode
|
### Margin mode
|
||||||
|
|
||||||
On top of `trading_mode` - you will also have to configure your `margin_mode`.
|
On top of `trading_mode` - you will also have to configure your `margin_mode`.
|
||||||
|
@ -166,7 +166,7 @@ Additional technical libraries can be installed as necessary, or custom indicato
|
|||||||
|
|
||||||
Most indicators have an instable startup period, in which they are either not available (NaN), or the calculation is incorrect. This can lead to inconsistencies, since Freqtrade does not know how long this instable period should be.
|
Most indicators have an instable startup period, in which they are either not available (NaN), or the calculation is incorrect. This can lead to inconsistencies, since Freqtrade does not know how long this instable period should be.
|
||||||
To account for this, the strategy can be assigned the `startup_candle_count` attribute.
|
To account for this, the strategy can be assigned the `startup_candle_count` attribute.
|
||||||
This should be set to the maximum number of candles that the strategy requires to calculate stable indicators.
|
This should be set to the maximum number of candles that the strategy requires to calculate stable indicators. In the case where a user includes higher timeframes with informative pairs, the `startup_candle_count` does not necessarily change. The value is the maximum period (in candles) that any of the informatives timeframes need to compute stable indicators.
|
||||||
|
|
||||||
In this example strategy, this should be set to 100 (`startup_candle_count = 100`), since the longest needed history is 100 candles.
|
In this example strategy, this should be set to 100 (`startup_candle_count = 100`), since the longest needed history is 100 candles.
|
||||||
|
|
||||||
@ -824,6 +824,8 @@ Options:
|
|||||||
- Merge the dataframe without lookahead bias
|
- Merge the dataframe without lookahead bias
|
||||||
- Forward-fill (optional)
|
- Forward-fill (optional)
|
||||||
|
|
||||||
|
For a full sample, please refer to the [complete data provider example](#complete-data-provider-sample) below.
|
||||||
|
|
||||||
All columns of the informative dataframe will be available on the returning dataframe in a renamed fashion:
|
All columns of the informative dataframe will be available on the returning dataframe in a renamed fashion:
|
||||||
|
|
||||||
!!! Example "Column renaming"
|
!!! Example "Column renaming"
|
||||||
|
@ -91,9 +91,9 @@ class DataProvider:
|
|||||||
timerange = TimeRange.parse_timerange(None if self._config.get(
|
timerange = TimeRange.parse_timerange(None if self._config.get(
|
||||||
'timerange') is None else str(self._config.get('timerange')))
|
'timerange') is None else str(self._config.get('timerange')))
|
||||||
# Move informative start time respecting startup_candle_count
|
# Move informative start time respecting startup_candle_count
|
||||||
timerange.subtract_start(
|
startup_candles = self.get_required_startup(str(timeframe))
|
||||||
timeframe_to_seconds(str(timeframe)) * self._config.get('startup_candle_count', 0)
|
tf_seconds = timeframe_to_seconds(str(timeframe))
|
||||||
)
|
timerange.subtract_start(tf_seconds * startup_candles)
|
||||||
self.__cached_pairs_backtesting[saved_pair] = load_pair_history(
|
self.__cached_pairs_backtesting[saved_pair] = load_pair_history(
|
||||||
pair=pair,
|
pair=pair,
|
||||||
timeframe=timeframe or self._config['timeframe'],
|
timeframe=timeframe or self._config['timeframe'],
|
||||||
@ -105,6 +105,21 @@ class DataProvider:
|
|||||||
)
|
)
|
||||||
return self.__cached_pairs_backtesting[saved_pair].copy()
|
return self.__cached_pairs_backtesting[saved_pair].copy()
|
||||||
|
|
||||||
|
def get_required_startup(self, timeframe: str) -> int:
|
||||||
|
freqai_config = self._config.get('freqai', {})
|
||||||
|
if not freqai_config.get('enabled', False):
|
||||||
|
return self._config.get('startup_candle_count', 0)
|
||||||
|
else:
|
||||||
|
startup_candles = self._config.get('startup_candle_count', 0)
|
||||||
|
indicator_periods = freqai_config['feature_parameters']['indicator_periods_candles']
|
||||||
|
# make sure the startupcandles is at least the set maximum indicator periods
|
||||||
|
self._config['startup_candle_count'] = max(startup_candles, max(indicator_periods))
|
||||||
|
tf_seconds = timeframe_to_seconds(timeframe)
|
||||||
|
train_candles = freqai_config['train_period_days'] * 86400 / tf_seconds
|
||||||
|
total_candles = int(self._config['startup_candle_count'] + train_candles)
|
||||||
|
logger.info(f'Increasing startup_candle_count for freqai to {total_candles}')
|
||||||
|
return total_candles
|
||||||
|
|
||||||
def get_pair_dataframe(
|
def get_pair_dataframe(
|
||||||
self,
|
self,
|
||||||
pair: str,
|
pair: str,
|
||||||
|
@ -2600,7 +2600,7 @@ class Exchange:
|
|||||||
is_short: bool,
|
is_short: bool,
|
||||||
amount: float, # Absolute value of position size
|
amount: float, # Absolute value of position size
|
||||||
stake_amount: float,
|
stake_amount: float,
|
||||||
wallet_balance: float = 0.0,
|
wallet_balance: float,
|
||||||
mm_ex_1: float = 0.0, # (Binance) Cross only
|
mm_ex_1: float = 0.0, # (Binance) Cross only
|
||||||
upnl_ex_1: float = 0.0, # (Binance) Cross only
|
upnl_ex_1: float = 0.0, # (Binance) Cross only
|
||||||
) -> Optional[float]:
|
) -> Optional[float]:
|
||||||
|
@ -76,6 +76,8 @@ class FreqaiDataDrawer:
|
|||||||
self.full_path / f"follower_dictionary-{self.follower_name}.json"
|
self.full_path / f"follower_dictionary-{self.follower_name}.json"
|
||||||
)
|
)
|
||||||
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
||||||
|
self.historic_predictions_bkp_path = Path(
|
||||||
|
self.full_path / "historic_predictions.backup.pkl")
|
||||||
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
||||||
self.follow_mode = follow_mode
|
self.follow_mode = follow_mode
|
||||||
if follow_mode:
|
if follow_mode:
|
||||||
@ -118,13 +120,21 @@ class FreqaiDataDrawer:
|
|||||||
"""
|
"""
|
||||||
exists = self.historic_predictions_path.is_file()
|
exists = self.historic_predictions_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
with open(self.historic_predictions_path, "rb") as fp:
|
try:
|
||||||
self.historic_predictions = cloudpickle.load(fp)
|
with open(self.historic_predictions_path, "rb") as fp:
|
||||||
logger.info(
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
f"Found existing historic predictions at {self.full_path}, but beware "
|
logger.info(
|
||||||
"that statistics may be inaccurate if the bot has been offline for "
|
f"Found existing historic predictions at {self.full_path}, but beware "
|
||||||
"an extended period of time."
|
"that statistics may be inaccurate if the bot has been offline for "
|
||||||
)
|
"an extended period of time."
|
||||||
|
)
|
||||||
|
except EOFError:
|
||||||
|
logger.warning(
|
||||||
|
'Historical prediction file was corrupted. Trying to load backup file.')
|
||||||
|
with open(self.historic_predictions_bkp_path, "rb") as fp:
|
||||||
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
|
logger.warning('FreqAI successfully loaded the backup historical predictions file.')
|
||||||
|
|
||||||
elif not self.follow_mode:
|
elif not self.follow_mode:
|
||||||
logger.info("Could not find existing historic_predictions, starting from scratch")
|
logger.info("Could not find existing historic_predictions, starting from scratch")
|
||||||
else:
|
else:
|
||||||
@ -142,6 +152,9 @@ class FreqaiDataDrawer:
|
|||||||
with open(self.historic_predictions_path, "wb") as fp:
|
with open(self.historic_predictions_path, "wb") as fp:
|
||||||
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
||||||
|
|
||||||
|
# create a backup
|
||||||
|
shutil.copy(self.historic_predictions_path, self.historic_predictions_bkp_path)
|
||||||
|
|
||||||
def save_drawer_to_disk(self):
|
def save_drawer_to_disk(self):
|
||||||
"""
|
"""
|
||||||
Save data drawer full of all pair model metadata in present model folder.
|
Save data drawer full of all pair model metadata in present model folder.
|
||||||
|
@ -18,8 +18,6 @@ from sklearn.model_selection import train_test_split
|
|||||||
from sklearn.neighbors import NearestNeighbors
|
from sklearn.neighbors import NearestNeighbors
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
|
||||||
from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_seconds
|
||||||
from freqtrade.strategy.interface import IStrategy
|
from freqtrade.strategy.interface import IStrategy
|
||||||
@ -73,6 +71,8 @@ class FreqaiDataKitchen:
|
|||||||
self.label_list: List = []
|
self.label_list: List = []
|
||||||
self.training_features_list: List = []
|
self.training_features_list: List = []
|
||||||
self.model_filename: str = ""
|
self.model_filename: str = ""
|
||||||
|
self.backtesting_results_path = Path()
|
||||||
|
self.backtest_predictions_folder: str = "backtesting_predictions"
|
||||||
self.live = live
|
self.live = live
|
||||||
self.pair = pair
|
self.pair = pair
|
||||||
|
|
||||||
@ -291,6 +291,7 @@ class FreqaiDataKitchen:
|
|||||||
:returns:
|
:returns:
|
||||||
:data_dictionary: updated dictionary with standardized values.
|
:data_dictionary: updated dictionary with standardized values.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# standardize the data by training stats
|
# standardize the data by training stats
|
||||||
train_max = data_dictionary["train_features"].max()
|
train_max = data_dictionary["train_features"].max()
|
||||||
train_min = data_dictionary["train_features"].min()
|
train_min = data_dictionary["train_features"].min()
|
||||||
@ -324,10 +325,24 @@ class FreqaiDataKitchen:
|
|||||||
- 1
|
- 1
|
||||||
)
|
)
|
||||||
|
|
||||||
self.data[f"{item}_max"] = train_labels_max # .to_dict()
|
self.data[f"{item}_max"] = train_labels_max
|
||||||
self.data[f"{item}_min"] = train_labels_min # .to_dict()
|
self.data[f"{item}_min"] = train_labels_min
|
||||||
return data_dictionary
|
return data_dictionary
|
||||||
|
|
||||||
|
def normalize_single_dataframe(self, df: DataFrame) -> DataFrame:
|
||||||
|
|
||||||
|
train_max = df.max()
|
||||||
|
train_min = df.min()
|
||||||
|
df = (
|
||||||
|
2 * (df - train_min) / (train_max - train_min) - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
for item in train_max.keys():
|
||||||
|
self.data[item + "_max"] = train_max[item]
|
||||||
|
self.data[item + "_min"] = train_min[item]
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame:
|
def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
Normalize a set of data using the mean and standard deviation from
|
Normalize a set of data using the mean and standard deviation from
|
||||||
@ -441,7 +456,8 @@ class FreqaiDataKitchen:
|
|||||||
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
||||||
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
||||||
df = df.loc[df["date"] >= start, :]
|
df = df.loc[df["date"] >= start, :]
|
||||||
df = df.loc[df["date"] <= stop, :]
|
if not self.live:
|
||||||
|
df = df.loc[df["date"] < stop, :]
|
||||||
|
|
||||||
return df
|
return df
|
||||||
|
|
||||||
@ -454,22 +470,23 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
from sklearn.decomposition import PCA # avoid importing if we dont need it
|
from sklearn.decomposition import PCA # avoid importing if we dont need it
|
||||||
|
|
||||||
n_components = self.data_dictionary["train_features"].shape[1]
|
pca = PCA(0.999)
|
||||||
pca = PCA(n_components=n_components)
|
|
||||||
pca = pca.fit(self.data_dictionary["train_features"])
|
pca = pca.fit(self.data_dictionary["train_features"])
|
||||||
n_keep_components = np.argmin(pca.explained_variance_ratio_.cumsum() < 0.999)
|
n_keep_components = pca.n_components_
|
||||||
pca2 = PCA(n_components=n_keep_components)
|
|
||||||
self.data["n_kept_components"] = n_keep_components
|
self.data["n_kept_components"] = n_keep_components
|
||||||
pca2 = pca2.fit(self.data_dictionary["train_features"])
|
n_components = self.data_dictionary["train_features"].shape[1]
|
||||||
logger.info("reduced feature dimension by %s", n_components - n_keep_components)
|
logger.info("reduced feature dimension by %s", n_components - n_keep_components)
|
||||||
logger.info("explained variance %f", np.sum(pca2.explained_variance_ratio_))
|
logger.info("explained variance %f", np.sum(pca.explained_variance_ratio_))
|
||||||
train_components = pca2.transform(self.data_dictionary["train_features"])
|
|
||||||
|
|
||||||
|
train_components = pca.transform(self.data_dictionary["train_features"])
|
||||||
self.data_dictionary["train_features"] = pd.DataFrame(
|
self.data_dictionary["train_features"] = pd.DataFrame(
|
||||||
data=train_components,
|
data=train_components,
|
||||||
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
||||||
index=self.data_dictionary["train_features"].index,
|
index=self.data_dictionary["train_features"].index,
|
||||||
)
|
)
|
||||||
|
# normalsing transformed training features
|
||||||
|
self.data_dictionary["train_features"] = self.normalize_single_dataframe(
|
||||||
|
self.data_dictionary["train_features"])
|
||||||
|
|
||||||
# keeping a copy of the non-transformed features so we can check for errors during
|
# keeping a copy of the non-transformed features so we can check for errors during
|
||||||
# model load from disk
|
# model load from disk
|
||||||
@ -477,15 +494,18 @@ class FreqaiDataKitchen:
|
|||||||
self.training_features_list = self.data_dictionary["train_features"].columns
|
self.training_features_list = self.data_dictionary["train_features"].columns
|
||||||
|
|
||||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||||
test_components = pca2.transform(self.data_dictionary["test_features"])
|
test_components = pca.transform(self.data_dictionary["test_features"])
|
||||||
self.data_dictionary["test_features"] = pd.DataFrame(
|
self.data_dictionary["test_features"] = pd.DataFrame(
|
||||||
data=test_components,
|
data=test_components,
|
||||||
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
||||||
index=self.data_dictionary["test_features"].index,
|
index=self.data_dictionary["test_features"].index,
|
||||||
)
|
)
|
||||||
|
# normalise transformed test feature to transformed training features
|
||||||
|
self.data_dictionary["test_features"] = self.normalize_data_from_metadata(
|
||||||
|
self.data_dictionary["test_features"])
|
||||||
|
|
||||||
self.data["n_kept_components"] = n_keep_components
|
self.data["n_kept_components"] = n_keep_components
|
||||||
self.pca = pca2
|
self.pca = pca
|
||||||
|
|
||||||
logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}")
|
logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}")
|
||||||
|
|
||||||
@ -506,6 +526,9 @@ class FreqaiDataKitchen:
|
|||||||
columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])],
|
columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])],
|
||||||
index=filtered_dataframe.index,
|
index=filtered_dataframe.index,
|
||||||
)
|
)
|
||||||
|
# normalise transformed predictions to transformed training features
|
||||||
|
self.data_dictionary["prediction_features"] = self.normalize_data_from_metadata(
|
||||||
|
self.data_dictionary["prediction_features"])
|
||||||
|
|
||||||
def compute_distances(self) -> float:
|
def compute_distances(self) -> float:
|
||||||
"""
|
"""
|
||||||
@ -885,9 +908,10 @@ class FreqaiDataKitchen:
|
|||||||
weights = np.exp(-np.arange(num_weights) / (wfactor * num_weights))[::-1]
|
weights = np.exp(-np.arange(num_weights) / (wfactor * num_weights))[::-1]
|
||||||
return weights
|
return weights
|
||||||
|
|
||||||
def append_predictions(self, predictions: DataFrame, do_predict: npt.ArrayLike) -> None:
|
def get_predictions_to_append(self, predictions: DataFrame,
|
||||||
|
do_predict: npt.ArrayLike) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
Append backtest prediction from current backtest period to all previous periods
|
Get backtest prediction from current backtest period
|
||||||
"""
|
"""
|
||||||
|
|
||||||
append_df = DataFrame()
|
append_df = DataFrame()
|
||||||
@ -902,13 +926,18 @@ class FreqaiDataKitchen:
|
|||||||
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||||
append_df["DI_values"] = self.DI_values
|
append_df["DI_values"] = self.DI_values
|
||||||
|
|
||||||
|
return append_df
|
||||||
|
|
||||||
|
def append_predictions(self, append_df: DataFrame) -> None:
|
||||||
|
"""
|
||||||
|
Append backtest prediction from current backtest period to all previous periods
|
||||||
|
"""
|
||||||
|
|
||||||
if self.full_df.empty:
|
if self.full_df.empty:
|
||||||
self.full_df = append_df
|
self.full_df = append_df
|
||||||
else:
|
else:
|
||||||
self.full_df = pd.concat([self.full_df, append_df], axis=0)
|
self.full_df = pd.concat([self.full_df, append_df], axis=0)
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
def fill_predictions(self, dataframe):
|
def fill_predictions(self, dataframe):
|
||||||
"""
|
"""
|
||||||
Back fill values to before the backtesting range so that the dataframe matches size
|
Back fill values to before the backtesting range so that the dataframe matches size
|
||||||
@ -1008,9 +1037,7 @@ class FreqaiDataKitchen:
|
|||||||
# We notice that users like to use exotic indicators where
|
# We notice that users like to use exotic indicators where
|
||||||
# they do not know the required timeperiod. Here we include a factor
|
# they do not know the required timeperiod. Here we include a factor
|
||||||
# of safety by multiplying the user considered "max" by 2.
|
# of safety by multiplying the user considered "max" by 2.
|
||||||
max_period = self.freqai_config["feature_parameters"].get(
|
max_period = self.config.get('startup_candle_count', 20) * 2
|
||||||
"indicator_max_period_candles", 20
|
|
||||||
) * 2
|
|
||||||
additional_seconds = max_period * max_tf_seconds
|
additional_seconds = max_period * max_tf_seconds
|
||||||
|
|
||||||
if trained_timestamp != 0:
|
if trained_timestamp != 0:
|
||||||
@ -1056,31 +1083,6 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
self.model_filename = f"cb_{coin.lower()}_{int(trained_timerange.stopts)}"
|
self.model_filename = f"cb_{coin.lower()}_{int(trained_timerange.stopts)}"
|
||||||
|
|
||||||
def download_all_data_for_training(self, timerange: TimeRange, dp: DataProvider) -> None:
|
|
||||||
"""
|
|
||||||
Called only once upon start of bot to download the necessary data for
|
|
||||||
populating indicators and training the model.
|
|
||||||
:param timerange: TimeRange = The full data timerange for populating the indicators
|
|
||||||
and training the model.
|
|
||||||
:param dp: DataProvider instance attached to the strategy
|
|
||||||
"""
|
|
||||||
new_pairs_days = int((timerange.stopts - timerange.startts) / SECONDS_IN_DAY)
|
|
||||||
if not dp._exchange:
|
|
||||||
# Not realistic - this is only called in live mode.
|
|
||||||
raise OperationalException("Dataprovider did not have an exchange attached.")
|
|
||||||
refresh_backtest_ohlcv_data(
|
|
||||||
dp._exchange,
|
|
||||||
pairs=self.all_pairs,
|
|
||||||
timeframes=self.freqai_config["feature_parameters"].get("include_timeframes"),
|
|
||||||
datadir=self.config["datadir"],
|
|
||||||
timerange=timerange,
|
|
||||||
new_pairs_days=new_pairs_days,
|
|
||||||
erase=False,
|
|
||||||
data_format=self.config.get("dataformat_ohlcv", "json"),
|
|
||||||
trading_mode=self.config.get("trading_mode", "spot"),
|
|
||||||
prepend=self.config.get("prepend_data", False),
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_all_pairs(self) -> None:
|
def set_all_pairs(self) -> None:
|
||||||
|
|
||||||
self.all_pairs = copy.deepcopy(
|
self.all_pairs = copy.deepcopy(
|
||||||
@ -1194,3 +1196,50 @@ class FreqaiDataKitchen:
|
|||||||
if self.unique_classes:
|
if self.unique_classes:
|
||||||
for label in self.unique_classes:
|
for label in self.unique_classes:
|
||||||
self.unique_class_list += list(self.unique_classes[label])
|
self.unique_class_list += list(self.unique_classes[label])
|
||||||
|
|
||||||
|
def save_backtesting_prediction(
|
||||||
|
self, append_df: DataFrame
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
"""
|
||||||
|
Save prediction dataframe from backtesting to h5 file format
|
||||||
|
:param append_df: dataframe for backtesting period
|
||||||
|
"""
|
||||||
|
full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder)
|
||||||
|
if not full_predictions_folder.is_dir():
|
||||||
|
full_predictions_folder.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w')
|
||||||
|
|
||||||
|
def get_backtesting_prediction(
|
||||||
|
self
|
||||||
|
) -> DataFrame:
|
||||||
|
|
||||||
|
"""
|
||||||
|
Get prediction dataframe from h5 file format
|
||||||
|
"""
|
||||||
|
append_df = pd.read_hdf(self.backtesting_results_path)
|
||||||
|
return append_df
|
||||||
|
|
||||||
|
def check_if_backtest_prediction_exists(
|
||||||
|
self
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a backtesting prediction already exists
|
||||||
|
:param dk: FreqaiDataKitchen
|
||||||
|
:return:
|
||||||
|
:boolean: whether the prediction file exists or not.
|
||||||
|
"""
|
||||||
|
path_to_predictionfile = Path(self.full_path /
|
||||||
|
self.backtest_predictions_folder /
|
||||||
|
f"{self.model_filename}_prediction.h5")
|
||||||
|
self.backtesting_results_path = path_to_predictionfile
|
||||||
|
|
||||||
|
file_exists = path_to_predictionfile.is_file()
|
||||||
|
if file_exists:
|
||||||
|
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Could not find backtesting prediction file at {path_to_predictionfile}"
|
||||||
|
)
|
||||||
|
return file_exists
|
||||||
|
@ -6,7 +6,7 @@ from abc import ABC, abstractmethod
|
|||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
from typing import Any, Dict, Tuple
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@ -26,13 +26,6 @@ pd.options.mode.chained_assignment = None
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def threaded(fn):
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class IFreqaiModel(ABC):
|
class IFreqaiModel(ABC):
|
||||||
"""
|
"""
|
||||||
Class containing all tools for training and prediction in the strategy.
|
Class containing all tools for training and prediction in the strategy.
|
||||||
@ -69,6 +62,9 @@ class IFreqaiModel(ABC):
|
|||||||
self.first = True
|
self.first = True
|
||||||
self.set_full_path()
|
self.set_full_path()
|
||||||
self.follow_mode: bool = self.freqai_info.get("follow_mode", False)
|
self.follow_mode: bool = self.freqai_info.get("follow_mode", False)
|
||||||
|
self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", False)
|
||||||
|
if self.save_backtest_models:
|
||||||
|
logger.info('Backtesting module configured to save all models.')
|
||||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||||
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
||||||
self.scanning = False
|
self.scanning = False
|
||||||
@ -92,6 +88,9 @@ class IFreqaiModel(ABC):
|
|||||||
self.begin_time_train: float = 0
|
self.begin_time_train: float = 0
|
||||||
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
||||||
|
|
||||||
|
self._threads: List[threading.Thread] = []
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
|
||||||
def assert_config(self, config: Dict[str, Any]) -> None:
|
def assert_config(self, config: Dict[str, Any]) -> None:
|
||||||
|
|
||||||
if not config.get("freqai", {}):
|
if not config.get("freqai", {}):
|
||||||
@ -125,10 +124,9 @@ class IFreqaiModel(ABC):
|
|||||||
elif not self.follow_mode:
|
elif not self.follow_mode:
|
||||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||||
with self.analysis_lock:
|
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
)
|
||||||
)
|
|
||||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||||
|
|
||||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||||
@ -146,15 +144,34 @@ class IFreqaiModel(ABC):
|
|||||||
self.model = None
|
self.model = None
|
||||||
self.dk = None
|
self.dk = None
|
||||||
|
|
||||||
@threaded
|
def shutdown(self):
|
||||||
def start_scanning(self, strategy: IStrategy) -> None:
|
"""
|
||||||
|
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
||||||
|
for current training iteration.
|
||||||
|
"""
|
||||||
|
logger.info("Stopping FreqAI")
|
||||||
|
self._stop_event.set()
|
||||||
|
|
||||||
|
logger.info("Waiting on Training iteration")
|
||||||
|
for _thread in self._threads:
|
||||||
|
_thread.join()
|
||||||
|
|
||||||
|
def start_scanning(self, *args, **kwargs) -> None:
|
||||||
|
"""
|
||||||
|
Start `self._start_scanning` in a separate thread
|
||||||
|
"""
|
||||||
|
_thread = threading.Thread(target=self._start_scanning, args=args, kwargs=kwargs)
|
||||||
|
self._threads.append(_thread)
|
||||||
|
_thread.start()
|
||||||
|
|
||||||
|
def _start_scanning(self, strategy: IStrategy) -> None:
|
||||||
"""
|
"""
|
||||||
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
||||||
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
||||||
it simply trains on what ever data is available in the self.dd.
|
it simply trains on what ever data is available in the self.dd.
|
||||||
:param strategy: IStrategy = The user defined strategy class
|
:param strategy: IStrategy = The user defined strategy class
|
||||||
"""
|
"""
|
||||||
while 1:
|
while not self._stop_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
|
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
|
||||||
|
|
||||||
@ -225,28 +242,39 @@ class IFreqaiModel(ABC):
|
|||||||
"trains"
|
"trains"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
trained_timestamp_int = int(trained_timestamp.stopts)
|
||||||
dk.data_path = Path(
|
dk.data_path = Path(
|
||||||
dk.full_path
|
dk.full_path
|
||||||
/
|
/
|
||||||
f"sub-train-{metadata['pair'].split('/')[0]}_{int(trained_timestamp.stopts)}"
|
f"sub-train-{metadata['pair'].split('/')[0]}_{trained_timestamp_int}"
|
||||||
)
|
)
|
||||||
if not self.model_exists(
|
|
||||||
metadata["pair"], dk, trained_timestamp=int(trained_timestamp.stopts)
|
dk.set_new_model_names(metadata["pair"], trained_timestamp)
|
||||||
):
|
|
||||||
dk.find_features(dataframe_train)
|
if dk.check_if_backtest_prediction_exists():
|
||||||
self.model = self.train(dataframe_train, metadata["pair"], dk)
|
append_df = dk.get_backtesting_prediction()
|
||||||
self.dd.pair_dict[metadata["pair"]]["trained_timestamp"] = int(
|
dk.append_predictions(append_df)
|
||||||
trained_timestamp.stopts)
|
|
||||||
dk.set_new_model_names(metadata["pair"], trained_timestamp)
|
|
||||||
self.dd.save_data(self.model, metadata["pair"], dk)
|
|
||||||
else:
|
else:
|
||||||
self.model = self.dd.load_data(metadata["pair"], dk)
|
if not self.model_exists(
|
||||||
|
metadata["pair"], dk, trained_timestamp=trained_timestamp_int
|
||||||
|
):
|
||||||
|
dk.find_features(dataframe_train)
|
||||||
|
self.model = self.train(dataframe_train, metadata["pair"], dk)
|
||||||
|
self.dd.pair_dict[metadata["pair"]]["trained_timestamp"] = int(
|
||||||
|
trained_timestamp.stopts)
|
||||||
|
|
||||||
self.check_if_feature_list_matches_strategy(dataframe_train, dk)
|
if self.save_backtest_models:
|
||||||
|
logger.info('Saving backtest model to disk.')
|
||||||
|
self.dd.save_data(self.model, metadata["pair"], dk)
|
||||||
|
else:
|
||||||
|
self.model = self.dd.load_data(metadata["pair"], dk)
|
||||||
|
|
||||||
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
self.check_if_feature_list_matches_strategy(dataframe_train, dk)
|
||||||
|
|
||||||
dk.append_predictions(pred_df, do_preds)
|
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
||||||
|
append_df = dk.get_predictions_to_append(pred_df, do_preds)
|
||||||
|
dk.append_predictions(append_df)
|
||||||
|
dk.save_backtesting_prediction(append_df)
|
||||||
|
|
||||||
dk.fill_predictions(dataframe)
|
dk.fill_predictions(dataframe)
|
||||||
|
|
||||||
@ -291,14 +319,8 @@ class IFreqaiModel(ABC):
|
|||||||
)
|
)
|
||||||
dk.set_paths(metadata["pair"], new_trained_timerange.stopts)
|
dk.set_paths(metadata["pair"], new_trained_timerange.stopts)
|
||||||
|
|
||||||
# download candle history if it is not already in memory
|
# load candle history into memory if it is not yet.
|
||||||
if not self.dd.historic_data:
|
if not self.dd.historic_data:
|
||||||
logger.info(
|
|
||||||
"Downloading all training data for all pairs in whitelist and "
|
|
||||||
"corr_pairlist, this may take a while if you do not have the "
|
|
||||||
"data saved"
|
|
||||||
)
|
|
||||||
dk.download_all_data_for_training(data_load_timerange, strategy.dp)
|
|
||||||
self.dd.load_all_pair_histories(data_load_timerange, dk)
|
self.dd.load_all_pair_histories(data_load_timerange, dk)
|
||||||
|
|
||||||
if not self.scanning:
|
if not self.scanning:
|
||||||
@ -471,11 +493,6 @@ class IFreqaiModel(ABC):
|
|||||||
:return:
|
:return:
|
||||||
:boolean: whether the model file exists or not.
|
:boolean: whether the model file exists or not.
|
||||||
"""
|
"""
|
||||||
coin, _ = pair.split("/")
|
|
||||||
|
|
||||||
if not self.live:
|
|
||||||
dk.model_filename = model_filename = f"cb_{coin.lower()}_{trained_timestamp}"
|
|
||||||
|
|
||||||
path_to_modelfile = Path(dk.data_path / f"{model_filename}_model.joblib")
|
path_to_modelfile = Path(dk.data_path / f"{model_filename}_model.joblib")
|
||||||
file_exists = path_to_modelfile.is_file()
|
file_exists = path_to_modelfile.is_file()
|
||||||
if file_exists and not scanning:
|
if file_exists and not scanning:
|
||||||
@ -628,8 +645,8 @@ class IFreqaiModel(ABC):
|
|||||||
logger.info(
|
logger.info(
|
||||||
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
|
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
|
||||||
if self.inference_time > 0.25 * self.base_tf_seconds:
|
if self.inference_time > 0.25 * self.base_tf_seconds:
|
||||||
logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to'
|
logger.warning("Inference took over 25% of the candle time. Reduce pairlist to"
|
||||||
' avoid blinding open trades and degrading performance.')
|
" avoid blinding open trades and degrading performance.")
|
||||||
self.pair_it = 0
|
self.pair_it = 0
|
||||||
self.inference_time = 0
|
self.inference_time = 0
|
||||||
return
|
return
|
||||||
|
134
freqtrade/freqai/utils.py
Normal file
134
freqtrade/freqai/utils.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from freqtrade.configuration import TimeRange
|
||||||
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
|
from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
||||||
|
from freqtrade.exceptions import OperationalException
|
||||||
|
from freqtrade.exchange import timeframe_to_seconds
|
||||||
|
from freqtrade.exchange.exchange import market_is_active
|
||||||
|
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def download_all_data_for_training(dp: DataProvider, config: dict) -> None:
|
||||||
|
"""
|
||||||
|
Called only once upon start of bot to download the necessary data for
|
||||||
|
populating indicators and training the model.
|
||||||
|
:param timerange: TimeRange = The full data timerange for populating the indicators
|
||||||
|
and training the model.
|
||||||
|
:param dp: DataProvider instance attached to the strategy
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dp._exchange is None:
|
||||||
|
raise OperationalException('No exchange object found.')
|
||||||
|
markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m)
|
||||||
|
or config.get('include_inactive')]
|
||||||
|
|
||||||
|
all_pairs = dynamic_expand_pairlist(config, markets)
|
||||||
|
|
||||||
|
timerange = get_required_data_timerange(config)
|
||||||
|
|
||||||
|
new_pairs_days = int((timerange.stopts - timerange.startts) / 86400)
|
||||||
|
|
||||||
|
refresh_backtest_ohlcv_data(
|
||||||
|
dp._exchange,
|
||||||
|
pairs=all_pairs,
|
||||||
|
timeframes=config["freqai"]["feature_parameters"].get("include_timeframes"),
|
||||||
|
datadir=config["datadir"],
|
||||||
|
timerange=timerange,
|
||||||
|
new_pairs_days=new_pairs_days,
|
||||||
|
erase=False,
|
||||||
|
data_format=config.get("dataformat_ohlcv", "json"),
|
||||||
|
trading_mode=config.get("trading_mode", "spot"),
|
||||||
|
prepend=config.get("prepend_data", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_required_data_timerange(
|
||||||
|
config: dict
|
||||||
|
) -> TimeRange:
|
||||||
|
"""
|
||||||
|
Used to compute the required data download time range
|
||||||
|
for auto data-download in FreqAI
|
||||||
|
"""
|
||||||
|
time = datetime.now(tz=timezone.utc).timestamp()
|
||||||
|
|
||||||
|
timeframes = config["freqai"]["feature_parameters"].get("include_timeframes")
|
||||||
|
|
||||||
|
max_tf_seconds = 0
|
||||||
|
for tf in timeframes:
|
||||||
|
secs = timeframe_to_seconds(tf)
|
||||||
|
if secs > max_tf_seconds:
|
||||||
|
max_tf_seconds = secs
|
||||||
|
|
||||||
|
startup_candles = config.get('startup_candle_count', 0)
|
||||||
|
indicator_periods = config["freqai"]["feature_parameters"]["indicator_periods_candles"]
|
||||||
|
|
||||||
|
# factor the max_period as a factor of safety.
|
||||||
|
max_period = int(max(startup_candles, max(indicator_periods)) * 1.5)
|
||||||
|
config['startup_candle_count'] = max_period
|
||||||
|
logger.info(f'FreqAI auto-downloader using {max_period} startup candles.')
|
||||||
|
|
||||||
|
additional_seconds = max_period * max_tf_seconds
|
||||||
|
|
||||||
|
startts = int(
|
||||||
|
time
|
||||||
|
- config["freqai"].get("train_period_days", 0) * 86400
|
||||||
|
- additional_seconds
|
||||||
|
)
|
||||||
|
stopts = int(time)
|
||||||
|
data_load_timerange = TimeRange('date', 'date', startts, stopts)
|
||||||
|
|
||||||
|
return data_load_timerange
|
||||||
|
|
||||||
|
|
||||||
|
# Keep below for when we wish to download heterogeneously lengthed data for FreqAI.
|
||||||
|
# def download_all_data_for_training(dp: DataProvider, config: dict) -> None:
|
||||||
|
# """
|
||||||
|
# Called only once upon start of bot to download the necessary data for
|
||||||
|
# populating indicators and training a FreqAI model.
|
||||||
|
# :param timerange: TimeRange = The full data timerange for populating the indicators
|
||||||
|
# and training the model.
|
||||||
|
# :param dp: DataProvider instance attached to the strategy
|
||||||
|
# """
|
||||||
|
|
||||||
|
# if dp._exchange is not None:
|
||||||
|
# markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m)
|
||||||
|
# or config.get('include_inactive')]
|
||||||
|
# else:
|
||||||
|
# # This should not occur:
|
||||||
|
# raise OperationalException('No exchange object found.')
|
||||||
|
|
||||||
|
# all_pairs = dynamic_expand_pairlist(config, markets)
|
||||||
|
|
||||||
|
# if not dp._exchange:
|
||||||
|
# # Not realistic - this is only called in live mode.
|
||||||
|
# raise OperationalException("Dataprovider did not have an exchange attached.")
|
||||||
|
|
||||||
|
# time = datetime.now(tz=timezone.utc).timestamp()
|
||||||
|
|
||||||
|
# for tf in config["freqai"]["feature_parameters"].get("include_timeframes"):
|
||||||
|
# timerange = TimeRange()
|
||||||
|
# timerange.startts = int(time)
|
||||||
|
# timerange.stopts = int(time)
|
||||||
|
# startup_candles = dp.get_required_startup(str(tf))
|
||||||
|
# tf_seconds = timeframe_to_seconds(str(tf))
|
||||||
|
# timerange.subtract_start(tf_seconds * startup_candles)
|
||||||
|
# new_pairs_days = int((timerange.stopts - timerange.startts) / 86400)
|
||||||
|
# # FIXME: now that we are looping on `refresh_backtest_ohlcv_data`, the function
|
||||||
|
# # redownloads the funding rate for each pair.
|
||||||
|
# refresh_backtest_ohlcv_data(
|
||||||
|
# dp._exchange,
|
||||||
|
# pairs=all_pairs,
|
||||||
|
# timeframes=[tf],
|
||||||
|
# datadir=config["datadir"],
|
||||||
|
# timerange=timerange,
|
||||||
|
# new_pairs_days=new_pairs_days,
|
||||||
|
# erase=False,
|
||||||
|
# data_format=config.get("dataformat_ohlcv", "json"),
|
||||||
|
# trading_mode=config.get("trading_mode", "spot"),
|
||||||
|
# prepend=config.get("prepend_data", False),
|
||||||
|
# )
|
@ -142,15 +142,20 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
logger.info('Cleaning up modules ...')
|
logger.info('Cleaning up modules ...')
|
||||||
|
try:
|
||||||
|
# Wrap db activities in shutdown to avoid problems if database is gone,
|
||||||
|
# and raises further exceptions.
|
||||||
|
if self.config['cancel_open_orders_on_exit']:
|
||||||
|
self.cancel_all_open_orders()
|
||||||
|
|
||||||
if self.config['cancel_open_orders_on_exit']:
|
self.check_for_open_trades()
|
||||||
self.cancel_all_open_orders()
|
|
||||||
|
|
||||||
self.check_for_open_trades()
|
finally:
|
||||||
|
self.strategy.ft_bot_cleanup()
|
||||||
|
|
||||||
self.rpc.cleanup()
|
self.rpc.cleanup()
|
||||||
Trade.commit()
|
Trade.commit()
|
||||||
self.exchange.close()
|
self.exchange.close()
|
||||||
|
|
||||||
def startup(self) -> None:
|
def startup(self) -> None:
|
||||||
"""
|
"""
|
||||||
@ -281,7 +286,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
pair=trade.pair,
|
pair=trade.pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
open_date=trade.open_date_utc
|
open_date=trade.date_last_filled_utc
|
||||||
)
|
)
|
||||||
trade.funding_fees = funding_fees
|
trade.funding_fees = funding_fees
|
||||||
else:
|
else:
|
||||||
@ -726,10 +731,11 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
||||||
base_currency = self.exchange.get_pair_base_currency(pair)
|
base_currency = self.exchange.get_pair_base_currency(pair)
|
||||||
open_date = datetime.now(timezone.utc)
|
open_date = datetime.now(timezone.utc)
|
||||||
funding_fees = self.exchange.get_funding_fees(
|
|
||||||
pair=pair, amount=amount, is_short=is_short, open_date=open_date)
|
|
||||||
# This is a new trade
|
# This is a new trade
|
||||||
if trade is None:
|
if trade is None:
|
||||||
|
funding_fees = self.exchange.get_funding_fees(
|
||||||
|
pair=pair, amount=amount, is_short=is_short, open_date=open_date)
|
||||||
trade = Trade(
|
trade = Trade(
|
||||||
pair=pair,
|
pair=pair,
|
||||||
base_currency=base_currency,
|
base_currency=base_currency,
|
||||||
@ -1484,7 +1490,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
pair=trade.pair,
|
pair=trade.pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
open_date=trade.open_date_utc,
|
open_date=trade.date_last_filled_utc,
|
||||||
)
|
)
|
||||||
exit_type = 'exit'
|
exit_type = 'exit'
|
||||||
exit_reason = exit_tag or exit_check.exit_reason
|
exit_reason = exit_tag or exit_check.exit_reason
|
||||||
@ -1778,7 +1784,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
def apply_fee_conditional(self, trade: Trade, trade_base_currency: str,
|
def apply_fee_conditional(self, trade: Trade, trade_base_currency: str,
|
||||||
amount: float, fee_abs: float) -> float:
|
amount: float, fee_abs: float, order_obj: Order) -> Optional[float]:
|
||||||
"""
|
"""
|
||||||
Applies the fee to amount (either from Order or from Trades).
|
Applies the fee to amount (either from Order or from Trades).
|
||||||
Can eat into dust if more than the required asset is available.
|
Can eat into dust if more than the required asset is available.
|
||||||
@ -1786,40 +1792,42 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
never in base currency.
|
never in base currency.
|
||||||
"""
|
"""
|
||||||
self.wallets.update()
|
self.wallets.update()
|
||||||
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount:
|
amount_ = amount
|
||||||
|
if order_obj.ft_order_side == trade.exit_side or order_obj.ft_order_side == 'stoploss':
|
||||||
|
# check against remaining amount!
|
||||||
|
amount_ = trade.amount - amount
|
||||||
|
|
||||||
|
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_:
|
||||||
# Eat into dust if we own more than base currency
|
# Eat into dust if we own more than base currency
|
||||||
logger.info(f"Fee amount for {trade} was in base currency - "
|
logger.info(f"Fee amount for {trade} was in base currency - "
|
||||||
f"Eating Fee {fee_abs} into dust.")
|
f"Eating Fee {fee_abs} into dust.")
|
||||||
elif fee_abs != 0:
|
elif fee_abs != 0:
|
||||||
real_amount = self.exchange.amount_to_precision(trade.pair, amount - fee_abs)
|
logger.info(f"Applying fee on amount for {trade}, fee={fee_abs}.")
|
||||||
logger.info(f"Applying fee on amount for {trade} "
|
return fee_abs
|
||||||
f"(from {amount} to {real_amount}).")
|
return None
|
||||||
return real_amount
|
|
||||||
return amount
|
|
||||||
|
|
||||||
def handle_order_fee(self, trade: Trade, order_obj: Order, order: Dict[str, Any]) -> None:
|
def handle_order_fee(self, trade: Trade, order_obj: Order, order: Dict[str, Any]) -> None:
|
||||||
# Try update amount (binance-fix)
|
# Try update amount (binance-fix)
|
||||||
try:
|
try:
|
||||||
new_amount = self.get_real_amount(trade, order, order_obj)
|
fee_abs = self.get_real_amount(trade, order, order_obj)
|
||||||
if not isclose(safe_value_fallback(order, 'filled', 'amount'), new_amount,
|
if fee_abs is not None:
|
||||||
abs_tol=constants.MATH_CLOSE_PREC):
|
order_obj.ft_fee_base = fee_abs
|
||||||
order_obj.ft_fee_base = trade.amount - new_amount
|
|
||||||
except DependencyException as exception:
|
except DependencyException as exception:
|
||||||
logger.warning("Could not update trade amount: %s", exception)
|
logger.warning("Could not update trade amount: %s", exception)
|
||||||
|
|
||||||
def get_real_amount(self, trade: Trade, order: Dict, order_obj: Order) -> float:
|
def get_real_amount(self, trade: Trade, order: Dict, order_obj: Order) -> Optional[float]:
|
||||||
"""
|
"""
|
||||||
Detect and update trade fee.
|
Detect and update trade fee.
|
||||||
Calls trade.update_fee() upon correct detection.
|
Calls trade.update_fee() upon correct detection.
|
||||||
Returns modified amount if the fee was taken from the destination currency.
|
Returns modified amount if the fee was taken from the destination currency.
|
||||||
Necessary for exchanges which charge fees in base currency (e.g. binance)
|
Necessary for exchanges which charge fees in base currency (e.g. binance)
|
||||||
:return: identical (or new) amount for the trade
|
:return: Absolute fee to apply for this order or None
|
||||||
"""
|
"""
|
||||||
# Init variables
|
# Init variables
|
||||||
order_amount = safe_value_fallback(order, 'filled', 'amount')
|
order_amount = safe_value_fallback(order, 'filled', 'amount')
|
||||||
# Only run for closed orders
|
# Only run for closed orders
|
||||||
if trade.fee_updated(order.get('side', '')) or order['status'] == 'open':
|
if trade.fee_updated(order.get('side', '')) or order['status'] == 'open':
|
||||||
return order_amount
|
return None
|
||||||
|
|
||||||
trade_base_currency = self.exchange.get_pair_base_currency(trade.pair)
|
trade_base_currency = self.exchange.get_pair_base_currency(trade.pair)
|
||||||
# use fee from order-dict if possible
|
# use fee from order-dict if possible
|
||||||
@ -1836,13 +1844,14 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
if trade_base_currency == fee_currency:
|
if trade_base_currency == fee_currency:
|
||||||
# Apply fee to amount
|
# Apply fee to amount
|
||||||
return self.apply_fee_conditional(trade, trade_base_currency,
|
return self.apply_fee_conditional(trade, trade_base_currency,
|
||||||
amount=order_amount, fee_abs=fee_cost)
|
amount=order_amount, fee_abs=fee_cost,
|
||||||
return order_amount
|
order_obj=order_obj)
|
||||||
|
return None
|
||||||
return self.fee_detection_from_trades(
|
return self.fee_detection_from_trades(
|
||||||
trade, order, order_obj, order_amount, order.get('trades', []))
|
trade, order, order_obj, order_amount, order.get('trades', []))
|
||||||
|
|
||||||
def fee_detection_from_trades(self, trade: Trade, order: Dict, order_obj: Order,
|
def fee_detection_from_trades(self, trade: Trade, order: Dict, order_obj: Order,
|
||||||
order_amount: float, trades: List) -> float:
|
order_amount: float, trades: List) -> Optional[float]:
|
||||||
"""
|
"""
|
||||||
fee-detection fallback to Trades.
|
fee-detection fallback to Trades.
|
||||||
Either uses provided trades list or the result of fetch_my_trades to get correct fee.
|
Either uses provided trades list or the result of fetch_my_trades to get correct fee.
|
||||||
@ -1853,7 +1862,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
if len(trades) == 0:
|
if len(trades) == 0:
|
||||||
logger.info("Applying fee on amount for %s failed: myTrade-Dict empty found", trade)
|
logger.info("Applying fee on amount for %s failed: myTrade-Dict empty found", trade)
|
||||||
return order_amount
|
return None
|
||||||
fee_currency = None
|
fee_currency = None
|
||||||
amount = 0
|
amount = 0
|
||||||
fee_abs = 0.0
|
fee_abs = 0.0
|
||||||
@ -1895,10 +1904,9 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
raise DependencyException("Half bought? Amounts don't match")
|
raise DependencyException("Half bought? Amounts don't match")
|
||||||
|
|
||||||
if fee_abs != 0:
|
if fee_abs != 0:
|
||||||
return self.apply_fee_conditional(trade, trade_base_currency,
|
return self.apply_fee_conditional(
|
||||||
amount=amount, fee_abs=fee_abs)
|
trade, trade_base_currency, amount=amount, fee_abs=fee_abs, order_obj=order_obj)
|
||||||
else:
|
return None
|
||||||
return amount
|
|
||||||
|
|
||||||
def get_valid_price(self, custom_price: float, proposed_price: float) -> float:
|
def get_valid_price(self, custom_price: float, proposed_price: float) -> float:
|
||||||
"""
|
"""
|
||||||
|
@ -212,21 +212,12 @@ class Backtesting:
|
|||||||
"""
|
"""
|
||||||
self.progress.init_step(BacktestState.DATALOAD, 1)
|
self.progress.init_step(BacktestState.DATALOAD, 1)
|
||||||
|
|
||||||
if self.config.get('freqai', {}).get('enabled', False):
|
|
||||||
startup_candles = int(self.config.get('freqai', {}).get('startup_candles', 0))
|
|
||||||
if not startup_candles:
|
|
||||||
raise OperationalException('FreqAI backtesting module requires user set '
|
|
||||||
'startup_candles in config.')
|
|
||||||
self.required_startup += int(self.config.get('freqai', {}).get('startup_candles', 0))
|
|
||||||
logger.info(f'Increasing startup_candle_count for freqai to {self.required_startup}')
|
|
||||||
self.config['startup_candle_count'] = self.required_startup
|
|
||||||
|
|
||||||
data = history.load_data(
|
data = history.load_data(
|
||||||
datadir=self.config['datadir'],
|
datadir=self.config['datadir'],
|
||||||
pairs=self.pairlists.whitelist,
|
pairs=self.pairlists.whitelist,
|
||||||
timeframe=self.timeframe,
|
timeframe=self.timeframe,
|
||||||
timerange=self.timerange,
|
timerange=self.timerange,
|
||||||
startup_candles=self.required_startup,
|
startup_candles=self.dataprovider.get_required_startup(self.timeframe),
|
||||||
fail_without_data=True,
|
fail_without_data=True,
|
||||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT)
|
candle_type=self.config.get('candle_type_def', CandleType.SPOT)
|
||||||
@ -695,7 +686,7 @@ class Backtesting:
|
|||||||
self.futures_data[trade.pair],
|
self.futures_data[trade.pair],
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
open_date=trade.open_date_utc,
|
open_date=trade.date_last_filled_utc,
|
||||||
close_date=exit_candle_time,
|
close_date=exit_candle_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -421,9 +421,10 @@ class Hyperopt:
|
|||||||
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
||||||
|
|
||||||
# Trim startup period from analyzed dataframe to get correct dates for output.
|
# Trim startup period from analyzed dataframe to get correct dates for output.
|
||||||
processed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
||||||
self.min_date, self.max_date = get_timerange(processed)
|
self.min_date, self.max_date = get_timerange(trimmed)
|
||||||
return processed
|
# Real trimming will happen as part of backtesting.
|
||||||
|
return preprocessed
|
||||||
|
|
||||||
def prepare_hyperopt_data(self) -> None:
|
def prepare_hyperopt_data(self) -> None:
|
||||||
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
|
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
|
||||||
|
@ -212,17 +212,18 @@ def migrate_orders_table(engine, table_back_name: str, cols_order: List):
|
|||||||
ft_fee_base = get_column_def(cols_order, 'ft_fee_base', 'null')
|
ft_fee_base = get_column_def(cols_order, 'ft_fee_base', 'null')
|
||||||
average = get_column_def(cols_order, 'average', 'null')
|
average = get_column_def(cols_order, 'average', 'null')
|
||||||
stop_price = get_column_def(cols_order, 'stop_price', 'null')
|
stop_price = get_column_def(cols_order, 'stop_price', 'null')
|
||||||
|
funding_fee = get_column_def(cols_order, 'funding_fee', '0.0')
|
||||||
|
|
||||||
# sqlite does not support literals for booleans
|
# sqlite does not support literals for booleans
|
||||||
with engine.begin() as connection:
|
with engine.begin() as connection:
|
||||||
connection.execute(text(f"""
|
connection.execute(text(f"""
|
||||||
insert into orders (id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
insert into orders (id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
||||||
status, symbol, order_type, side, price, amount, filled, average, remaining, cost,
|
status, symbol, order_type, side, price, amount, filled, average, remaining, cost,
|
||||||
stop_price, order_date, order_filled_date, order_update_date, ft_fee_base)
|
stop_price, order_date, order_filled_date, order_update_date, ft_fee_base, funding_fee)
|
||||||
select id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
select id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
||||||
status, symbol, order_type, side, price, amount, filled, {average} average, remaining,
|
status, symbol, order_type, side, price, amount, filled, {average} average, remaining,
|
||||||
cost, {stop_price} stop_price, order_date, order_filled_date,
|
cost, {stop_price} stop_price, order_date, order_filled_date,
|
||||||
order_update_date, {ft_fee_base} ft_fee_base
|
order_update_date, {ft_fee_base} ft_fee_base, {funding_fee} funding_fee
|
||||||
from {table_back_name}
|
from {table_back_name}
|
||||||
"""))
|
"""))
|
||||||
|
|
||||||
@ -307,9 +308,10 @@ def check_migrate(engine, decl_base, previous_tables) -> None:
|
|||||||
# Check if migration necessary
|
# Check if migration necessary
|
||||||
# Migrates both trades and orders table!
|
# Migrates both trades and orders table!
|
||||||
# if ('orders' not in previous_tables
|
# if ('orders' not in previous_tables
|
||||||
# or not has_column(cols_orders, 'stop_price')):
|
# or not has_column(cols_orders, 'funding_fee')):
|
||||||
migrating = False
|
migrating = False
|
||||||
if not has_column(cols_trades, 'contract_size'):
|
# if not has_column(cols_trades, 'contract_size'):
|
||||||
|
if not has_column(cols_orders, 'funding_fee'):
|
||||||
migrating = True
|
migrating = True
|
||||||
logger.info(f"Running database migration for trades - "
|
logger.info(f"Running database migration for trades - "
|
||||||
f"backup: {table_back_name}, {order_table_bak_name}")
|
f"backup: {table_back_name}, {order_table_bak_name}")
|
||||||
|
@ -65,6 +65,8 @@ class Order(_DECL_BASE):
|
|||||||
order_filled_date = Column(DateTime, nullable=True)
|
order_filled_date = Column(DateTime, nullable=True)
|
||||||
order_update_date = Column(DateTime, nullable=True)
|
order_update_date = Column(DateTime, nullable=True)
|
||||||
|
|
||||||
|
funding_fee = Column(Float, nullable=True)
|
||||||
|
|
||||||
ft_fee_base = Column(Float, nullable=True)
|
ft_fee_base = Column(Float, nullable=True)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -72,6 +74,13 @@ class Order(_DECL_BASE):
|
|||||||
""" Order-date with UTC timezoneinfo"""
|
""" Order-date with UTC timezoneinfo"""
|
||||||
return self.order_date.replace(tzinfo=timezone.utc)
|
return self.order_date.replace(tzinfo=timezone.utc)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def order_filled_utc(self) -> Optional[datetime]:
|
||||||
|
""" last order-date with UTC timezoneinfo"""
|
||||||
|
return (
|
||||||
|
self.order_filled_date.replace(tzinfo=timezone.utc) if self.order_filled_date else None
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def safe_price(self) -> float:
|
def safe_price(self) -> float:
|
||||||
return self.average or self.price
|
return self.average or self.price
|
||||||
@ -119,6 +128,10 @@ class Order(_DECL_BASE):
|
|||||||
self.ft_is_open = True
|
self.ft_is_open = True
|
||||||
if self.status in NON_OPEN_EXCHANGE_STATES:
|
if self.status in NON_OPEN_EXCHANGE_STATES:
|
||||||
self.ft_is_open = False
|
self.ft_is_open = False
|
||||||
|
if self.trade:
|
||||||
|
# Assign funding fee up to this point
|
||||||
|
# (represents the funding fee since the last order)
|
||||||
|
self.funding_fee = self.trade.funding_fees
|
||||||
if (order.get('filled', 0.0) or 0.0) > 0:
|
if (order.get('filled', 0.0) or 0.0) > 0:
|
||||||
self.order_filled_date = datetime.now(timezone.utc)
|
self.order_filled_date = datetime.now(timezone.utc)
|
||||||
self.order_update_date = datetime.now(timezone.utc)
|
self.order_update_date = datetime.now(timezone.utc)
|
||||||
@ -179,6 +192,10 @@ class Order(_DECL_BASE):
|
|||||||
self.remaining = 0
|
self.remaining = 0
|
||||||
self.status = 'closed'
|
self.status = 'closed'
|
||||||
self.ft_is_open = False
|
self.ft_is_open = False
|
||||||
|
# Assign funding fees to Order.
|
||||||
|
# Assumes backtesting will use date_last_filled_utc to calculate future funding fees.
|
||||||
|
self.funding_fee = trade.funding_fees
|
||||||
|
|
||||||
if (self.ft_order_side == trade.entry_side):
|
if (self.ft_order_side == trade.entry_side):
|
||||||
trade.open_rate = self.price
|
trade.open_rate = self.price
|
||||||
trade.recalc_trade_from_orders()
|
trade.recalc_trade_from_orders()
|
||||||
@ -346,6 +363,15 @@ class LocalTrade():
|
|||||||
else:
|
else:
|
||||||
return self.amount
|
return self.amount
|
||||||
|
|
||||||
|
@property
|
||||||
|
def date_last_filled_utc(self) -> datetime:
|
||||||
|
""" Date of the last filled order"""
|
||||||
|
orders = self.select_filled_orders()
|
||||||
|
if not orders:
|
||||||
|
return self.open_date_utc
|
||||||
|
return max([self.open_date_utc,
|
||||||
|
max(o.order_filled_utc for o in orders if o.order_filled_utc)])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def open_date_utc(self):
|
def open_date_utc(self):
|
||||||
return self.open_date.replace(tzinfo=timezone.utc)
|
return self.open_date.replace(tzinfo=timezone.utc)
|
||||||
@ -648,7 +674,6 @@ class LocalTrade():
|
|||||||
"""
|
"""
|
||||||
self.close_rate = rate
|
self.close_rate = rate
|
||||||
self.close_date = self.close_date or datetime.utcnow()
|
self.close_date = self.close_date or datetime.utcnow()
|
||||||
self.close_profit_abs = self.calc_profit(rate) + self.realized_profit
|
|
||||||
self.is_open = False
|
self.is_open = False
|
||||||
self.exit_order_status = 'closed'
|
self.exit_order_status = 'closed'
|
||||||
self.open_order_id = None
|
self.open_order_id = None
|
||||||
@ -844,10 +869,14 @@ class LocalTrade():
|
|||||||
close_profit = 0.0
|
close_profit = 0.0
|
||||||
close_profit_abs = 0.0
|
close_profit_abs = 0.0
|
||||||
profit = None
|
profit = None
|
||||||
for o in self.orders:
|
# Reset funding fees
|
||||||
|
self.funding_fees = 0.0
|
||||||
|
funding_fees = 0.0
|
||||||
|
ordercount = len(self.orders) - 1
|
||||||
|
for i, o in enumerate(self.orders):
|
||||||
if o.ft_is_open or not o.filled:
|
if o.ft_is_open or not o.filled:
|
||||||
continue
|
continue
|
||||||
|
funding_fees += (o.funding_fee or 0.0)
|
||||||
tmp_amount = FtPrecise(o.safe_amount_after_fee)
|
tmp_amount = FtPrecise(o.safe_amount_after_fee)
|
||||||
tmp_price = FtPrecise(o.safe_price)
|
tmp_price = FtPrecise(o.safe_price)
|
||||||
|
|
||||||
@ -862,7 +891,11 @@ class LocalTrade():
|
|||||||
avg_price = current_stake / current_amount
|
avg_price = current_stake / current_amount
|
||||||
|
|
||||||
if is_exit:
|
if is_exit:
|
||||||
# Process partial exits
|
# Process exits
|
||||||
|
if i == ordercount and is_closing:
|
||||||
|
# Apply funding fees only to the last closing order
|
||||||
|
self.funding_fees = funding_fees
|
||||||
|
|
||||||
exit_rate = o.safe_price
|
exit_rate = o.safe_price
|
||||||
exit_amount = o.safe_amount_after_fee
|
exit_amount = o.safe_amount_after_fee
|
||||||
profit = self.calc_profit(rate=exit_rate, amount=exit_amount,
|
profit = self.calc_profit(rate=exit_rate, amount=exit_amount,
|
||||||
@ -872,6 +905,7 @@ class LocalTrade():
|
|||||||
exit_rate, amount=exit_amount, open_rate=avg_price)
|
exit_rate, amount=exit_amount, open_rate=avg_price)
|
||||||
else:
|
else:
|
||||||
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
||||||
|
self.funding_fees = funding_fees
|
||||||
|
|
||||||
if close_profit:
|
if close_profit:
|
||||||
self.close_profit = close_profit
|
self.close_profit = close_profit
|
||||||
|
@ -52,7 +52,7 @@ class PrecisionFilter(IPairList):
|
|||||||
:return: True if the pair can stay, false if it should be removed
|
:return: True if the pair can stay, false if it should be removed
|
||||||
"""
|
"""
|
||||||
if ticker.get('last', None) is None:
|
if ticker.get('last', None) is None:
|
||||||
self.log_once(f"Removed {ticker['symbol']} from whitelist, because "
|
self.log_once(f"Removed {pair} from whitelist, because "
|
||||||
"ticker['last'] is empty (Usually no trade in the last 24h).",
|
"ticker['last'] is empty (Usually no trade in the last 24h).",
|
||||||
logger.info)
|
logger.info)
|
||||||
return False
|
return False
|
||||||
@ -62,10 +62,10 @@ class PrecisionFilter(IPairList):
|
|||||||
sp = self._exchange.price_to_precision(pair, stop_price)
|
sp = self._exchange.price_to_precision(pair, stop_price)
|
||||||
|
|
||||||
stop_gap_price = self._exchange.price_to_precision(pair, stop_price * 0.99)
|
stop_gap_price = self._exchange.price_to_precision(pair, stop_price * 0.99)
|
||||||
logger.debug(f"{ticker['symbol']} - {sp} : {stop_gap_price}")
|
logger.debug(f"{pair} - {sp} : {stop_gap_price}")
|
||||||
|
|
||||||
if sp <= stop_gap_price:
|
if sp <= stop_gap_price:
|
||||||
self.log_once(f"Removed {ticker['symbol']} from whitelist, because "
|
self.log_once(f"Removed {pair} from whitelist, because "
|
||||||
f"stop price {sp} would be <= stop limit {stop_gap_price}", logger.info)
|
f"stop price {sp} would be <= stop limit {stop_gap_price}", logger.info)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -186,6 +186,7 @@ class VolumePairList(IPairList):
|
|||||||
needed_pairs, since_ms=since_ms, cache=False
|
needed_pairs, since_ms=since_ms, cache=False
|
||||||
)
|
)
|
||||||
for i, p in enumerate(filtered_tickers):
|
for i, p in enumerate(filtered_tickers):
|
||||||
|
contract_size = self._exchange.markets[p['symbol']].get('contractSize', 1.0) or 1.0
|
||||||
pair_candles = candles[
|
pair_candles = candles[
|
||||||
(p['symbol'], self._lookback_timeframe, self._def_candletype)
|
(p['symbol'], self._lookback_timeframe, self._def_candletype)
|
||||||
] if (
|
] if (
|
||||||
@ -199,6 +200,7 @@ class VolumePairList(IPairList):
|
|||||||
|
|
||||||
pair_candles['quoteVolume'] = (
|
pair_candles['quoteVolume'] = (
|
||||||
pair_candles['volume'] * pair_candles['typical_price']
|
pair_candles['volume'] * pair_candles['typical_price']
|
||||||
|
* contract_size
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Exchange ohlcv data is in quote volume already.
|
# Exchange ohlcv data is in quote volume already.
|
||||||
|
@ -261,11 +261,15 @@ class RPC:
|
|||||||
profit_str += f" ({fiat_profit:.2f})"
|
profit_str += f" ({fiat_profit:.2f})"
|
||||||
fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \
|
fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \
|
||||||
else fiat_profit_sum + fiat_profit
|
else fiat_profit_sum + fiat_profit
|
||||||
|
open_order = (trade.select_order_by_order_id(
|
||||||
|
trade.open_order_id) if trade.open_order_id else None)
|
||||||
|
|
||||||
detail_trade = [
|
detail_trade = [
|
||||||
f'{trade.id} {direction_str}',
|
f'{trade.id} {direction_str}',
|
||||||
trade.pair + ('*' if (trade.open_order_id is not None
|
trade.pair + ('*' if (open_order
|
||||||
and trade.close_rate_requested is None) else '')
|
and open_order.ft_order_side == trade.entry_side) else '')
|
||||||
+ ('**' if (trade.close_rate_requested is not None) else ''),
|
+ ('**' if (open_order and
|
||||||
|
open_order.ft_order_side == trade.exit_side is not None) else ''),
|
||||||
shorten_date(arrow.get(trade.open_date).humanize(only_distance=True)),
|
shorten_date(arrow.get(trade.open_date).humanize(only_distance=True)),
|
||||||
profit_str
|
profit_str
|
||||||
]
|
]
|
||||||
|
@ -6,6 +6,7 @@ This module manage Telegram communication
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
from copy import deepcopy
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import date, datetime, timedelta
|
from datetime import date, datetime, timedelta
|
||||||
from functools import partial
|
from functools import partial
|
||||||
@ -374,7 +375,7 @@ class Telegram(RPCHandler):
|
|||||||
message += f"\n*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`"
|
message += f"\n*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`"
|
||||||
return message
|
return message
|
||||||
|
|
||||||
def compose_message(self, msg: Dict[str, Any], msg_type: RPCMessageType) -> str:
|
def compose_message(self, msg: Dict[str, Any], msg_type: RPCMessageType) -> Optional[str]:
|
||||||
if msg_type in [RPCMessageType.ENTRY, RPCMessageType.ENTRY_FILL]:
|
if msg_type in [RPCMessageType.ENTRY, RPCMessageType.ENTRY_FILL]:
|
||||||
message = self._format_entry_msg(msg)
|
message = self._format_entry_msg(msg)
|
||||||
|
|
||||||
@ -411,7 +412,8 @@ class Telegram(RPCHandler):
|
|||||||
elif msg_type == RPCMessageType.STRATEGY_MSG:
|
elif msg_type == RPCMessageType.STRATEGY_MSG:
|
||||||
message = f"{msg['msg']}"
|
message = f"{msg['msg']}"
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"Unknown message type: {msg_type}")
|
logger.debug("Unknown message type: %s", msg_type)
|
||||||
|
return None
|
||||||
return message
|
return message
|
||||||
|
|
||||||
def send_msg(self, msg: Dict[str, Any]) -> None:
|
def send_msg(self, msg: Dict[str, Any]) -> None:
|
||||||
@ -438,9 +440,9 @@ class Telegram(RPCHandler):
|
|||||||
# Notification disabled
|
# Notification disabled
|
||||||
return
|
return
|
||||||
|
|
||||||
message = self.compose_message(msg, msg_type)
|
message = self.compose_message(deepcopy(msg), msg_type)
|
||||||
|
if message:
|
||||||
self._send_msg(message, disable_notification=(noti == 'silent'))
|
self._send_msg(message, disable_notification=(noti == 'silent'))
|
||||||
|
|
||||||
def _get_sell_emoji(self, msg):
|
def _get_sell_emoji(self, msg):
|
||||||
"""
|
"""
|
||||||
|
@ -148,10 +148,19 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
def load_freqAI_model(self) -> None:
|
def load_freqAI_model(self) -> None:
|
||||||
if self.config.get('freqai', {}).get('enabled', False):
|
if self.config.get('freqai', {}).get('enabled', False):
|
||||||
# Import here to avoid importing this if freqAI is disabled
|
# Import here to avoid importing this if freqAI is disabled
|
||||||
|
from freqtrade.freqai.utils import download_all_data_for_training
|
||||||
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
|
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
|
||||||
|
|
||||||
self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)
|
self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)
|
||||||
self.freqai_info = self.config["freqai"]
|
self.freqai_info = self.config["freqai"]
|
||||||
|
|
||||||
|
# download the desired data in dry/live
|
||||||
|
if self.config.get('runmode') in (RunMode.DRY_RUN, RunMode.LIVE):
|
||||||
|
logger.info(
|
||||||
|
"Downloading all training data for all pairs in whitelist and "
|
||||||
|
"corr_pairlist, this may take a while if the data is not "
|
||||||
|
"already on disk."
|
||||||
|
)
|
||||||
|
download_all_data_for_training(self.dp, self.config)
|
||||||
else:
|
else:
|
||||||
# Gracious failures if freqAI is disabled but "start" is called.
|
# Gracious failures if freqAI is disabled but "start" is called.
|
||||||
class DummyClass():
|
class DummyClass():
|
||||||
@ -159,6 +168,10 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
'freqAI is not enabled. '
|
'freqAI is not enabled. '
|
||||||
'Please enable it in your config to use this strategy.')
|
'Please enable it in your config to use this strategy.')
|
||||||
|
|
||||||
|
def shutdown(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
self.freqai = DummyClass() # type: ignore
|
self.freqai = DummyClass() # type: ignore
|
||||||
|
|
||||||
def ft_bot_start(self, **kwargs) -> None:
|
def ft_bot_start(self, **kwargs) -> None:
|
||||||
@ -172,6 +185,12 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
|
|
||||||
self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT)
|
self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT)
|
||||||
|
|
||||||
|
def ft_bot_cleanup(self) -> None:
|
||||||
|
"""
|
||||||
|
Clean up FreqAI and child threads
|
||||||
|
"""
|
||||||
|
self.freqai.shutdown()
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
|
@ -43,7 +43,8 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
process_only_new_candles = True
|
process_only_new_candles = True
|
||||||
stoploss = -0.05
|
stoploss = -0.05
|
||||||
use_exit_signal = True
|
use_exit_signal = True
|
||||||
startup_candle_count: int = 300
|
# this is the maximum period fed to talib (timeframe independent)
|
||||||
|
startup_candle_count: int = 40
|
||||||
can_short = False
|
can_short = False
|
||||||
|
|
||||||
linear_roi_offset = DecimalParameter(
|
linear_roi_offset = DecimalParameter(
|
||||||
|
@ -45,7 +45,6 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||||||
"weight_factor": 0.9,
|
"weight_factor": 0.9,
|
||||||
"principal_component_analysis": false,
|
"principal_component_analysis": false,
|
||||||
"use_SVM_to_remove_outliers": true,
|
"use_SVM_to_remove_outliers": true,
|
||||||
"indicator_max_period_candles": 20,
|
|
||||||
"indicator_periods_candles": [10, 20]
|
"indicator_periods_candles": [10, 20]
|
||||||
},
|
},
|
||||||
"data_split_parameters": {
|
"data_split_parameters": {
|
||||||
|
@ -10,7 +10,7 @@ flake8==5.0.4
|
|||||||
flake8-tidy-imports==4.8.0
|
flake8-tidy-imports==4.8.0
|
||||||
mypy==0.971
|
mypy==0.971
|
||||||
pre-commit==2.20.0
|
pre-commit==2.20.0
|
||||||
pytest==7.1.2
|
pytest==7.1.3
|
||||||
pytest-asyncio==0.19.0
|
pytest-asyncio==0.19.0
|
||||||
pytest-cov==3.0.0
|
pytest-cov==3.0.0
|
||||||
pytest-mock==3.8.2
|
pytest-mock==3.8.2
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
numpy==1.23.2
|
numpy==1.23.2
|
||||||
pandas==1.4.3
|
pandas==1.4.4
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==1.92.84
|
ccxt==1.93.3
|
||||||
# Pin cryptography for now due to rust build errors with piwheels
|
# Pin cryptography for now due to rust build errors with piwheels
|
||||||
cryptography==37.0.4
|
cryptography==37.0.4
|
||||||
aiohttp==3.8.1
|
aiohttp==3.8.1
|
||||||
SQLAlchemy==1.4.40
|
SQLAlchemy==1.4.40
|
||||||
python-telegram-bot==13.13
|
python-telegram-bot==13.14
|
||||||
arrow==1.2.2
|
arrow==1.2.3
|
||||||
cachetools==4.2.2
|
cachetools==4.2.2
|
||||||
requests==2.28.1
|
requests==2.28.1
|
||||||
urllib3==1.26.12
|
urllib3==1.26.12
|
||||||
jsonschema==4.14.0
|
jsonschema==4.15.0
|
||||||
TA-Lib==0.4.24
|
TA-Lib==0.4.24
|
||||||
technical==1.3.0
|
technical==1.3.0
|
||||||
tabulate==0.8.10
|
tabulate==0.8.10
|
||||||
pycoingecko==2.2.0
|
pycoingecko==3.0.0
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
tables==3.7.0
|
tables==3.7.0
|
||||||
blosc==1.10.6
|
blosc==1.10.6
|
||||||
@ -34,17 +34,17 @@ orjson==3.8.0
|
|||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
|
|
||||||
# API Server
|
# API Server
|
||||||
fastapi==0.81.0
|
fastapi==0.82.0
|
||||||
uvicorn==0.18.3
|
uvicorn==0.18.3
|
||||||
pyjwt==2.4.0
|
pyjwt==2.4.0
|
||||||
aiofiles==0.8.0
|
aiofiles==0.8.0
|
||||||
psutil==5.9.1
|
psutil==5.9.2
|
||||||
|
|
||||||
# Support for colorized terminal output
|
# Support for colorized terminal output
|
||||||
colorama==0.4.5
|
colorama==0.4.5
|
||||||
# Building config files interactively
|
# Building config files interactively
|
||||||
questionary==1.10.0
|
questionary==1.10.0
|
||||||
prompt-toolkit==3.0.30
|
prompt-toolkit==3.0.31
|
||||||
# Extensions to datetime library
|
# Extensions to datetime library
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
from math import isclose
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
@ -269,7 +268,7 @@ def test_create_cum_profit(testdatadir):
|
|||||||
"cum_profits", timeframe="5m")
|
"cum_profits", timeframe="5m")
|
||||||
assert "cum_profits" in cum_profits.columns
|
assert "cum_profits" in cum_profits.columns
|
||||||
assert cum_profits.iloc[0]['cum_profits'] == 0
|
assert cum_profits.iloc[0]['cum_profits'] == 0
|
||||||
assert isclose(cum_profits.iloc[-1]['cum_profits'], 8.723007518796964e-06)
|
assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 8.723007518796964e-06
|
||||||
|
|
||||||
|
|
||||||
def test_create_cum_profit1(testdatadir):
|
def test_create_cum_profit1(testdatadir):
|
||||||
@ -287,7 +286,7 @@ def test_create_cum_profit1(testdatadir):
|
|||||||
"cum_profits", timeframe="5m")
|
"cum_profits", timeframe="5m")
|
||||||
assert "cum_profits" in cum_profits.columns
|
assert "cum_profits" in cum_profits.columns
|
||||||
assert cum_profits.iloc[0]['cum_profits'] == 0
|
assert cum_profits.iloc[0]['cum_profits'] == 0
|
||||||
assert isclose(cum_profits.iloc[-1]['cum_profits'], 8.723007518796964e-06)
|
assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 8.723007518796964e-06
|
||||||
|
|
||||||
with pytest.raises(ValueError, match='Trade dataframe empty.'):
|
with pytest.raises(ValueError, match='Trade dataframe empty.'):
|
||||||
create_cum_profit(df.set_index('date'), bt_data[bt_data["pair"] == 'NOTAPAIR'],
|
create_cum_profit(df.set_index('date'), bt_data[bt_data["pair"] == 'NOTAPAIR'],
|
||||||
|
@ -2,7 +2,6 @@ import copy
|
|||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from math import isclose
|
|
||||||
from random import randint
|
from random import randint
|
||||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||||
|
|
||||||
@ -407,10 +406,10 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
# min
|
# min
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss)
|
||||||
expected_result = 2 * (1 + 0.05) / (1 - abs(stoploss))
|
expected_result = 2 * (1 + 0.05) / (1 - abs(stoploss))
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss, 3.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss, 3.0)
|
||||||
assert isclose(result, expected_result / 3)
|
assert pytest.approx(result) == expected_result / 3
|
||||||
# max
|
# max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 10000
|
assert result == 10000
|
||||||
@ -426,10 +425,10 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
)
|
)
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
||||||
expected_result = 2 * 2 * (1 + 0.05) / (1 - abs(stoploss))
|
expected_result = 2 * 2 * (1 + 0.05) / (1 - abs(stoploss))
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 5.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 5.0)
|
||||||
assert isclose(result, expected_result / 5)
|
assert pytest.approx(result) == expected_result / 5
|
||||||
# max
|
# max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 20000
|
assert result == 20000
|
||||||
@ -445,10 +444,10 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
)
|
)
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
||||||
expected_result = max(2, 2 * 2) * (1 + 0.05) / (1 - abs(stoploss))
|
expected_result = max(2, 2 * 2) * (1 + 0.05) / (1 - abs(stoploss))
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 10)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 10)
|
||||||
assert isclose(result, expected_result / 10)
|
assert pytest.approx(result) == expected_result / 10
|
||||||
|
|
||||||
# min amount and cost are set (amount is minial)
|
# min amount and cost are set (amount is minial)
|
||||||
markets["ETH/BTC"]["limits"] = {
|
markets["ETH/BTC"]["limits"] = {
|
||||||
@ -461,20 +460,20 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
)
|
)
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss)
|
||||||
expected_result = max(8, 2 * 2) * (1 + 0.05) / (1 - abs(stoploss))
|
expected_result = max(8, 2 * 2) * (1 + 0.05) / (1 - abs(stoploss))
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 7.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 7.0)
|
||||||
assert isclose(result, expected_result / 7.0)
|
assert pytest.approx(result) == expected_result / 7.0
|
||||||
# Max
|
# Max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 1000
|
assert result == 1000
|
||||||
|
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4)
|
||||||
expected_result = max(8, 2 * 2) * 1.5
|
expected_result = max(8, 2 * 2) * 1.5
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4, 8.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4, 8.0)
|
||||||
assert isclose(result, expected_result / 8.0)
|
assert pytest.approx(result) == expected_result / 8.0
|
||||||
# Max
|
# Max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 1000
|
assert result == 1000
|
||||||
@ -482,10 +481,10 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
# Really big stoploss
|
# Really big stoploss
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1)
|
||||||
expected_result = max(8, 2 * 2) * 1.5
|
expected_result = max(8, 2 * 2) * 1.5
|
||||||
assert isclose(result, expected_result)
|
assert pytest.approx(result) == expected_result
|
||||||
# With Leverage
|
# With Leverage
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0)
|
||||||
assert isclose(result, expected_result / 12)
|
assert pytest.approx(result) == expected_result / 12
|
||||||
# Max
|
# Max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 1000
|
assert result == 1000
|
||||||
@ -501,7 +500,7 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
|
|
||||||
# Contract size 0.01
|
# Contract size 0.01
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1)
|
||||||
assert isclose(result, expected_result * 0.01)
|
assert pytest.approx(result) == expected_result * 0.01
|
||||||
# Max
|
# Max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 10
|
assert result == 10
|
||||||
@ -513,7 +512,7 @@ def test__get_stake_amount_limit(mocker, default_conf) -> None:
|
|||||||
)
|
)
|
||||||
# With Leverage, Contract size 10
|
# With Leverage, Contract size 10
|
||||||
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0)
|
result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0)
|
||||||
assert isclose(result, (expected_result / 12) * 10.0)
|
assert pytest.approx(result) == (expected_result / 12) * 10.0
|
||||||
# Max
|
# Max
|
||||||
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
result = exchange.get_max_pair_stake_amount('ETH/BTC', 2)
|
||||||
assert result == 10000
|
assert result == 10000
|
||||||
@ -3239,7 +3238,7 @@ def test_get_trades_for_order(default_conf, mocker, exchange_name, trading_mode,
|
|||||||
orders = exchange.get_trades_for_order(order_id, 'ETH/USDT:USDT', since)
|
orders = exchange.get_trades_for_order(order_id, 'ETH/USDT:USDT', since)
|
||||||
assert len(orders) == 1
|
assert len(orders) == 1
|
||||||
assert orders[0]['price'] == 165
|
assert orders[0]['price'] == 165
|
||||||
assert isclose(orders[0]['amount'], amount)
|
assert pytest.approx(orders[0]['amount']) == amount
|
||||||
assert api_mock.fetch_my_trades.call_count == 1
|
assert api_mock.fetch_my_trades.call_count == 1
|
||||||
# since argument should be
|
# since argument should be
|
||||||
assert isinstance(api_mock.fetch_my_trades.call_args[0][1], int)
|
assert isinstance(api_mock.fetch_my_trades.call_args[0][1], int)
|
||||||
@ -3776,8 +3775,8 @@ def test__get_funding_fees_from_exchange(default_conf, mocker, exchange_name):
|
|||||||
since=unix_time
|
since=unix_time
|
||||||
)
|
)
|
||||||
|
|
||||||
assert (isclose(expected_fees, fees_from_datetime))
|
assert pytest.approx(expected_fees) == fees_from_datetime
|
||||||
assert (isclose(expected_fees, fees_from_unix_time))
|
assert pytest.approx(expected_fees) == fees_from_unix_time
|
||||||
|
|
||||||
ccxt_exceptionhandlers(
|
ccxt_exceptionhandlers(
|
||||||
mocker,
|
mocker,
|
||||||
@ -4514,7 +4513,7 @@ def test_liquidation_price(
|
|||||||
default_conf['liquidation_buffer'] = 0.0
|
default_conf['liquidation_buffer'] = 0.0
|
||||||
exchange = get_patched_exchange(mocker, default_conf, id=exchange_name)
|
exchange = get_patched_exchange(mocker, default_conf, id=exchange_name)
|
||||||
exchange.get_maintenance_ratio_and_amt = MagicMock(return_value=(mm_ratio, maintenance_amt))
|
exchange.get_maintenance_ratio_and_amt = MagicMock(return_value=(mm_ratio, maintenance_amt))
|
||||||
assert isclose(round(exchange.get_liquidation_price(
|
assert pytest.approx(round(exchange.get_liquidation_price(
|
||||||
pair='DOGE/USDT',
|
pair='DOGE/USDT',
|
||||||
open_rate=open_rate,
|
open_rate=open_rate,
|
||||||
is_short=is_short,
|
is_short=is_short,
|
||||||
@ -4523,7 +4522,7 @@ def test_liquidation_price(
|
|||||||
upnl_ex_1=upnl_ex_1,
|
upnl_ex_1=upnl_ex_1,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
stake_amount=open_rate * amount,
|
stake_amount=open_rate * amount,
|
||||||
), 2), expected)
|
), 2)) == expected
|
||||||
|
|
||||||
|
|
||||||
def test_get_max_pair_stake_amount(
|
def test_get_max_pair_stake_amount(
|
||||||
@ -4868,8 +4867,8 @@ def test_get_max_leverage_futures(default_conf, mocker, leverage_tiers):
|
|||||||
assert exchange.get_max_leverage("BNB/BUSD", 1.0) == 20.0
|
assert exchange.get_max_leverage("BNB/BUSD", 1.0) == 20.0
|
||||||
assert exchange.get_max_leverage("BNB/USDT", 100.0) == 75.0
|
assert exchange.get_max_leverage("BNB/USDT", 100.0) == 75.0
|
||||||
assert exchange.get_max_leverage("BTC/USDT", 170.30) == 125.0
|
assert exchange.get_max_leverage("BTC/USDT", 170.30) == 125.0
|
||||||
assert isclose(exchange.get_max_leverage("BNB/BUSD", 99999.9), 5.000005)
|
assert pytest.approx(exchange.get_max_leverage("BNB/BUSD", 99999.9)) == 5.000005
|
||||||
assert isclose(exchange.get_max_leverage("BNB/USDT", 1500), 33.333333333333333)
|
assert pytest.approx(exchange.get_max_leverage("BNB/USDT", 1500)) == 33.333333333333333
|
||||||
assert exchange.get_max_leverage("BTC/USDT", 300000000) == 2.0
|
assert exchange.get_max_leverage("BTC/USDT", 300000000) == 2.0
|
||||||
assert exchange.get_max_leverage("BTC/USDT", 600000000) == 1.0 # Last tier
|
assert exchange.get_max_leverage("BTC/USDT", 600000000) == 1.0 # Last tier
|
||||||
|
|
||||||
@ -4986,6 +4985,7 @@ def test_get_liquidation_price1(mocker, default_conf):
|
|||||||
is_short=False,
|
is_short=False,
|
||||||
amount=0.8,
|
amount=0.8,
|
||||||
stake_amount=18.884 * 0.8,
|
stake_amount=18.884 * 0.8,
|
||||||
|
wallet_balance=18.884 * 0.8,
|
||||||
)
|
)
|
||||||
assert liq_price == 17.47
|
assert liq_price == 17.47
|
||||||
|
|
||||||
@ -4997,6 +4997,7 @@ def test_get_liquidation_price1(mocker, default_conf):
|
|||||||
is_short=False,
|
is_short=False,
|
||||||
amount=0.8,
|
amount=0.8,
|
||||||
stake_amount=18.884 * 0.8,
|
stake_amount=18.884 * 0.8,
|
||||||
|
wallet_balance=18.884 * 0.8,
|
||||||
)
|
)
|
||||||
assert liq_price == 17.540699999999998
|
assert liq_price == 17.540699999999998
|
||||||
|
|
||||||
@ -5008,6 +5009,7 @@ def test_get_liquidation_price1(mocker, default_conf):
|
|||||||
is_short=False,
|
is_short=False,
|
||||||
amount=0.8,
|
amount=0.8,
|
||||||
stake_amount=18.884 * 0.8,
|
stake_amount=18.884 * 0.8,
|
||||||
|
wallet_balance=18.884 * 0.8,
|
||||||
)
|
)
|
||||||
assert liq_price is None
|
assert liq_price is None
|
||||||
default_conf['trading_mode'] = 'margin'
|
default_conf['trading_mode'] = 'margin'
|
||||||
@ -5020,6 +5022,7 @@ def test_get_liquidation_price1(mocker, default_conf):
|
|||||||
is_short=False,
|
is_short=False,
|
||||||
amount=0.8,
|
amount=0.8,
|
||||||
stake_amount=18.884 * 0.8,
|
stake_amount=18.884 * 0.8,
|
||||||
|
wallet_balance=18.884 * 0.8,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -5145,7 +5148,7 @@ def test_get_liquidation_price(
|
|||||||
else:
|
else:
|
||||||
buffer_amount = liquidation_buffer * abs(open_rate - expected_liq)
|
buffer_amount = liquidation_buffer * abs(open_rate - expected_liq)
|
||||||
expected_liq = expected_liq - buffer_amount if is_short else expected_liq + buffer_amount
|
expected_liq = expected_liq - buffer_amount if is_short else expected_liq + buffer_amount
|
||||||
assert isclose(expected_liq, liq)
|
assert pytest.approx(expected_liq) == liq
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('contract_size,order_amount', [
|
@pytest.mark.parametrize('contract_size,order_amount', [
|
||||||
|
@ -45,7 +45,6 @@ def freqai_conf(default_conf, tmpdir):
|
|||||||
"principal_component_analysis": False,
|
"principal_component_analysis": False,
|
||||||
"use_SVM_to_remove_outliers": True,
|
"use_SVM_to_remove_outliers": True,
|
||||||
"stratify_training_data": 0,
|
"stratify_training_data": 0,
|
||||||
"indicator_max_period_candles": 10,
|
|
||||||
"indicator_periods_candles": [10],
|
"indicator_periods_candles": [10],
|
||||||
},
|
},
|
||||||
"data_split_parameters": {"test_size": 0.33, "random_state": 1},
|
"data_split_parameters": {"test_size": 0.33, "random_state": 1},
|
||||||
|
@ -48,10 +48,4 @@ def test_freqai_backtest_load_data(freqai_conf, mocker, caplog):
|
|||||||
|
|
||||||
assert log_has_re('Increasing startup_candle_count for freqai to.*', caplog)
|
assert log_has_re('Increasing startup_candle_count for freqai to.*', caplog)
|
||||||
|
|
||||||
del freqai_conf['freqai']['startup_candles']
|
|
||||||
backtesting = Backtesting(freqai_conf)
|
|
||||||
with pytest.raises(OperationalException,
|
|
||||||
match=r'FreqAI backtesting module.*startup_candles in config.'):
|
|
||||||
backtesting.load_bt_data()
|
|
||||||
|
|
||||||
Backtesting.cleanup()
|
Backtesting.cleanup()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import datetime
|
|
||||||
import shutil
|
import shutil
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -57,16 +57,13 @@ def test_split_timerange(
|
|||||||
shutil.rmtree(Path(dk.full_path))
|
shutil.rmtree(Path(dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
def test_check_if_model_expired(mocker, freqai_conf):
|
||||||
"timestamp, expected",
|
|
||||||
[
|
|
||||||
(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() - 7200, True),
|
|
||||||
(datetime.datetime.now(tz=datetime.timezone.utc).timestamp(), False),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_check_if_model_expired(mocker, freqai_conf, timestamp, expected):
|
|
||||||
dk = get_patched_data_kitchen(mocker, freqai_conf)
|
dk = get_patched_data_kitchen(mocker, freqai_conf)
|
||||||
assert dk.check_if_model_expired(timestamp) == expected
|
now = datetime.now(tz=timezone.utc).timestamp()
|
||||||
|
assert dk.check_if_model_expired(now) is False
|
||||||
|
now = (datetime.now(tz=timezone.utc) - timedelta(hours=2)).timestamp()
|
||||||
|
assert dk.check_if_model_expired(now) is True
|
||||||
shutil.rmtree(Path(dk.full_path))
|
shutil.rmtree(Path(dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +72,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog):
|
|||||||
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
||||||
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"DBSCAN found eps of 2.42.",
|
"DBSCAN found eps of 2.36.",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -84,7 +81,7 @@ def test_compute_distances(mocker, freqai_conf):
|
|||||||
freqai = make_data_dictionary(mocker, freqai_conf)
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
||||||
avg_mean_dist = freqai.dk.compute_distances()
|
avg_mean_dist = freqai.dk.compute_distances()
|
||||||
assert round(avg_mean_dist, 2) == 2.56
|
assert round(avg_mean_dist, 2) == 2.54
|
||||||
|
|
||||||
|
|
||||||
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
||||||
@ -92,7 +89,7 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf,
|
|||||||
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
||||||
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"SVM detected 8.46%",
|
"SVM detected 8.09%",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -176,6 +176,7 @@ def test_extract_data_and_train_model_LightGBMClassifier(mocker, freqai_conf):
|
|||||||
|
|
||||||
def test_start_backtesting(mocker, freqai_conf):
|
def test_start_backtesting(mocker, freqai_conf):
|
||||||
freqai_conf.update({"timerange": "20180120-20180130"})
|
freqai_conf.update({"timerange": "20180120-20180130"})
|
||||||
|
freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -194,7 +195,7 @@ def test_start_backtesting(mocker, freqai_conf):
|
|||||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||||
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
||||||
|
|
||||||
assert len(model_folders) == 5
|
assert len(model_folders) == 6
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
@ -202,6 +203,7 @@ def test_start_backtesting(mocker, freqai_conf):
|
|||||||
def test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf):
|
def test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf):
|
||||||
freqai_conf.update({"timerange": "20180120-20180124"})
|
freqai_conf.update({"timerange": "20180120-20180124"})
|
||||||
freqai_conf.get("freqai", {}).update({"backtest_period_days": 0.5})
|
freqai_conf.get("freqai", {}).update({"backtest_period_days": 0.5})
|
||||||
|
freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -219,13 +221,14 @@ def test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf):
|
|||||||
metadata = {"pair": "LTC/BTC"}
|
metadata = {"pair": "LTC/BTC"}
|
||||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||||
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
||||||
assert len(model_folders) == 8
|
assert len(model_folders) == 9
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
||||||
freqai_conf.update({"timerange": "20180120-20180130"})
|
freqai_conf.update({"timerange": "20180120-20180130"})
|
||||||
|
freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -244,7 +247,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
|||||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||||
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
|
||||||
|
|
||||||
assert len(model_folders) == 5
|
assert len(model_folders) == 6
|
||||||
|
|
||||||
# without deleting the exiting folder structure, re-run
|
# without deleting the exiting folder structure, re-run
|
||||||
|
|
||||||
@ -265,10 +268,14 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
|
|||||||
freqai.start_backtesting(df, metadata, freqai.dk)
|
freqai.start_backtesting(df, metadata, freqai.dk)
|
||||||
|
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"Found model at ",
|
"Found backtesting prediction file ",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder)
|
||||||
|
prediction_files = [x for x in path.iterdir() if x.is_file()]
|
||||||
|
assert len(prediction_files) == 5
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
from math import isclose
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from freqtrade.leverage import interest
|
from freqtrade.leverage import interest
|
||||||
@ -30,9 +28,9 @@ twentyfive_hours = FtPrecise(25.0)
|
|||||||
def test_interest(exchange, interest_rate, hours, expected):
|
def test_interest(exchange, interest_rate, hours, expected):
|
||||||
borrowed = FtPrecise(60.0)
|
borrowed = FtPrecise(60.0)
|
||||||
|
|
||||||
assert isclose(interest(
|
assert pytest.approx(float(interest(
|
||||||
exchange_name=exchange,
|
exchange_name=exchange,
|
||||||
borrowed=borrowed,
|
borrowed=borrowed,
|
||||||
rate=FtPrecise(interest_rate),
|
rate=FtPrecise(interest_rate),
|
||||||
hours=hours
|
hours=hours
|
||||||
), expected)
|
))) == expected
|
||||||
|
@ -37,6 +37,7 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool,
|
|||||||
trade.orders.append(Order(
|
trade.orders.append(Order(
|
||||||
ft_order_side=trade.entry_side,
|
ft_order_side=trade.entry_side,
|
||||||
order_id=f'{pair}-{trade.entry_side}-{trade.open_date}',
|
order_id=f'{pair}-{trade.entry_side}-{trade.open_date}',
|
||||||
|
ft_is_open=False,
|
||||||
ft_pair=pair,
|
ft_pair=pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
filled=trade.amount,
|
filled=trade.amount,
|
||||||
@ -51,6 +52,7 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool,
|
|||||||
trade.orders.append(Order(
|
trade.orders.append(Order(
|
||||||
ft_order_side=trade.exit_side,
|
ft_order_side=trade.exit_side,
|
||||||
order_id=f'{pair}-{trade.exit_side}-{trade.close_date}',
|
order_id=f'{pair}-{trade.exit_side}-{trade.close_date}',
|
||||||
|
ft_is_open=False,
|
||||||
ft_pair=pair,
|
ft_pair=pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
filled=trade.amount,
|
filled=trade.amount,
|
||||||
|
@ -2138,11 +2138,11 @@ def test_send_msg_strategy_msg_notification(default_conf, mocker) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def test_send_msg_unknown_type(default_conf, mocker) -> None:
|
def test_send_msg_unknown_type(default_conf, mocker) -> None:
|
||||||
telegram, _, _ = get_telegram_testobject(mocker, default_conf)
|
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
|
||||||
with pytest.raises(NotImplementedError, match=r'Unknown message type: None'):
|
telegram.send_msg({
|
||||||
telegram.send_msg({
|
'type': None,
|
||||||
'type': None,
|
})
|
||||||
})
|
msg_mock.call_count == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
|
@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
from math import isclose
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pytest
|
import pytest
|
||||||
@ -165,7 +163,7 @@ def test_stoploss_from_open():
|
|||||||
or (side == 'short' and expected_stop_price < current_price)):
|
or (side == 'short' and expected_stop_price < current_price)):
|
||||||
assert stoploss == 0
|
assert stoploss == 0
|
||||||
else:
|
else:
|
||||||
assert isclose(stop_price, expected_stop_price, rel_tol=0.00001)
|
assert pytest.approx(stop_price) == expected_stop_price
|
||||||
|
|
||||||
|
|
||||||
def test_stoploss_from_absolute():
|
def test_stoploss_from_absolute():
|
||||||
|
@ -1051,8 +1051,6 @@ def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_sho
|
|||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
|
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
|
||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
|
|
||||||
return_value=order['amount'])
|
|
||||||
|
|
||||||
stoploss = MagicMock(return_value={'id': 13434334})
|
stoploss = MagicMock(return_value={'id': 13434334})
|
||||||
mocker.patch('freqtrade.exchange.Binance.stoploss', stoploss)
|
mocker.patch('freqtrade.exchange.Binance.stoploss', stoploss)
|
||||||
@ -1875,8 +1873,6 @@ def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog
|
|||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
||||||
return_value=limit_order[entry_side(is_short)])
|
return_value=limit_order[entry_side(is_short)])
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
|
|
||||||
return_value=limit_order[entry_side(is_short)]['amount'])
|
|
||||||
|
|
||||||
trade = MagicMock()
|
trade = MagicMock()
|
||||||
trade.is_short = is_short
|
trade.is_short = is_short
|
||||||
@ -1886,14 +1882,13 @@ def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog
|
|||||||
n = freqtrade.exit_positions(trades)
|
n = freqtrade.exit_positions(trades)
|
||||||
assert n == 0
|
assert n == 0
|
||||||
# Test amount not modified by fee-logic
|
# Test amount not modified by fee-logic
|
||||||
assert not log_has(
|
assert not log_has_re(r'Applying fee to amount for Trade .*', caplog)
|
||||||
'Applying fee to amount for Trade {} from 30.0 to 90.81'.format(trade), caplog
|
|
||||||
)
|
|
||||||
|
|
||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=90.81)
|
gra = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
|
||||||
# test amount modified by fee-logic
|
# test amount modified by fee-logic
|
||||||
n = freqtrade.exit_positions(trades)
|
n = freqtrade.exit_positions(trades)
|
||||||
assert n == 0
|
assert n == 0
|
||||||
|
assert gra.call_count == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("is_short", [False, True])
|
@pytest.mark.parametrize("is_short", [False, True])
|
||||||
@ -1927,8 +1922,7 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca
|
|||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
|
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
|
||||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
mocker.patch('freqtrade.exchange.Exchange.fetch_order', return_value=order)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
mocker.patch('freqtrade.exchange.Exchange.get_trades_for_order', return_value=[])
|
||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
|
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
|
||||||
return_value=order['amount'])
|
|
||||||
order_id = order['id']
|
order_id = order['id']
|
||||||
|
|
||||||
trade = Trade(
|
trade = Trade(
|
||||||
@ -1960,11 +1954,11 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca
|
|||||||
assert trade.amount == order['amount']
|
assert trade.amount == order['amount']
|
||||||
|
|
||||||
trade.open_order_id = order_id
|
trade.open_order_id = order_id
|
||||||
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=90.81)
|
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.01)
|
||||||
assert trade.amount != 90.81
|
assert trade.amount == 30.0
|
||||||
# test amount modified by fee-logic
|
# test amount modified by fee-logic
|
||||||
freqtrade.update_trade_state(trade, order_id)
|
freqtrade.update_trade_state(trade, order_id)
|
||||||
assert trade.amount == 90.81
|
assert trade.amount == 29.99
|
||||||
assert trade.open_order_id is None
|
assert trade.open_order_id is None
|
||||||
|
|
||||||
trade.is_open = True
|
trade.is_open = True
|
||||||
@ -4268,10 +4262,10 @@ def test_get_real_amount_quote(default_conf_usdt, trades_for_order, buy_order_fe
|
|||||||
caplog.clear()
|
caplog.clear()
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
# Amount is reduced by "fee"
|
# Amount is reduced by "fee"
|
||||||
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == amount - (amount * 0.001)
|
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == (amount * 0.001)
|
||||||
assert log_has(
|
assert log_has(
|
||||||
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,'
|
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,'
|
||||||
' leverage=1.0, open_rate=0.24544100, open_since=closed) (from 8.0 to 7.992).',
|
' leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.',
|
||||||
caplog
|
caplog
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -4296,7 +4290,7 @@ def test_get_real_amount_quote_dust(default_conf_usdt, trades_for_order, buy_ord
|
|||||||
walletmock.reset_mock()
|
walletmock.reset_mock()
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
# Amount is kept as is
|
# Amount is kept as is
|
||||||
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == amount
|
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
|
||||||
assert walletmock.call_count == 1
|
assert walletmock.call_count == 1
|
||||||
assert log_has_re(r'Fee amount for Trade.* was in base currency '
|
assert log_has_re(r'Fee amount for Trade.* was in base currency '
|
||||||
'- Eating Fee 0.008 into dust', caplog)
|
'- Eating Fee 0.008 into dust', caplog)
|
||||||
@ -4319,7 +4313,7 @@ def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mock
|
|||||||
|
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
# Amount is reduced by "fee"
|
# Amount is reduced by "fee"
|
||||||
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == amount
|
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
|
||||||
assert log_has(
|
assert log_has(
|
||||||
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
||||||
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: '
|
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: '
|
||||||
@ -4343,8 +4337,7 @@ def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mock
|
|||||||
# from order
|
# from order
|
||||||
({'cost': 0.004, 'currency': 'LTC'}, 0.004, False, (
|
({'cost': 0.004, 'currency': 'LTC'}, 0.004, False, (
|
||||||
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
||||||
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) (from'
|
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004.'
|
||||||
' 8.0 to 7.996).'
|
|
||||||
)),
|
)),
|
||||||
# invalid, no currency in from fee dict
|
# invalid, no currency in from fee dict
|
||||||
({'cost': 0.008, 'currency': None}, 0, True, None),
|
({'cost': 0.008, 'currency': None}, 0, True, None),
|
||||||
@ -4376,7 +4369,11 @@ def test_get_real_amount(
|
|||||||
|
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
assert freqtrade.get_real_amount(trade, buy_order, order_obj) == amount - fee_reduction_amount
|
res = freqtrade.get_real_amount(trade, buy_order, order_obj)
|
||||||
|
if fee_reduction_amount == 0:
|
||||||
|
assert res is None
|
||||||
|
else:
|
||||||
|
assert res == fee_reduction_amount
|
||||||
|
|
||||||
if expected_log:
|
if expected_log:
|
||||||
assert log_has(expected_log, caplog)
|
assert log_has(expected_log, caplog)
|
||||||
@ -4422,14 +4419,14 @@ def test_get_real_amount_multi(
|
|||||||
return_value={'ask': 0.19, 'last': 0.2})
|
return_value={'ask': 0.19, 'last': 0.2})
|
||||||
|
|
||||||
# Amount is reduced by "fee"
|
# Amount is reduced by "fee"
|
||||||
expected_amount = amount - (amount * fee_reduction_amount)
|
expected_amount = amount * fee_reduction_amount
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount
|
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount
|
||||||
assert log_has(
|
assert log_has(
|
||||||
(
|
(
|
||||||
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
|
||||||
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) '
|
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), '
|
||||||
f'(from 8.0 to {expected_log_amount}).'
|
f'fee={expected_amount}.'
|
||||||
),
|
),
|
||||||
caplog
|
caplog
|
||||||
)
|
)
|
||||||
@ -4462,7 +4459,7 @@ def test_get_real_amount_invalid_order(default_conf_usdt, trades_for_order, buy_
|
|||||||
|
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
# Amount does not change
|
# Amount does not change
|
||||||
assert freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) == amount
|
assert freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) is None
|
||||||
|
|
||||||
|
|
||||||
def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_doublefee,
|
def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_doublefee,
|
||||||
@ -4485,7 +4482,7 @@ def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_dou
|
|||||||
# Amount does not change
|
# Amount does not change
|
||||||
assert trade.fee_open == 0.0025
|
assert trade.fee_open == 0.0025
|
||||||
order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, 'LTC/ETH', 'buy')
|
||||||
assert freqtrade.get_real_amount(trade, market_buy_order_usdt_doublefee, order_obj) == 30.0
|
assert freqtrade.get_real_amount(trade, market_buy_order_usdt_doublefee, order_obj) is None
|
||||||
assert tfo_mock.call_count == 0
|
assert tfo_mock.call_count == 0
|
||||||
# Fetch fees from trades dict if available to get "proper" values
|
# Fetch fees from trades dict if available to get "proper" values
|
||||||
assert round(trade.fee_open, 4) == 0.001
|
assert round(trade.fee_open, 4) == 0.001
|
||||||
@ -4537,7 +4534,7 @@ def test_get_real_amount_wrong_amount_rounding(default_conf_usdt, trades_for_ord
|
|||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
# Amount changes by fee amount.
|
# Amount changes by fee amount.
|
||||||
assert pytest.approx(freqtrade.get_real_amount(
|
assert pytest.approx(freqtrade.get_real_amount(
|
||||||
trade, limit_buy_order_usdt, order_obj)) == amount - (amount * 0.001)
|
trade, limit_buy_order_usdt, order_obj)) == (amount * 0.001)
|
||||||
|
|
||||||
|
|
||||||
def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker):
|
def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker):
|
||||||
@ -4559,7 +4556,7 @@ def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker):
|
|||||||
}
|
}
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||||
order_obj = Order.parse_from_ccxt_object(order, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(order, 'LTC/ETH', 'buy')
|
||||||
assert freqtrade.get_real_amount(trade, order, order_obj) == amount
|
assert freqtrade.get_real_amount(trade, order, order_obj) is None
|
||||||
|
|
||||||
|
|
||||||
def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, caplog):
|
def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, caplog):
|
||||||
@ -4616,7 +4613,7 @@ def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker,
|
|||||||
|
|
||||||
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
|
||||||
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
|
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
|
||||||
assert res == amount
|
assert res is None
|
||||||
assert trade.fee_open_currency is None
|
assert trade.fee_open_currency is None
|
||||||
assert trade.fee_open_cost is None
|
assert trade.fee_open_cost is None
|
||||||
message = "Not updating buy-fee - rate: None, POINT."
|
message = "Not updating buy-fee - rate: None, POINT."
|
||||||
@ -4624,7 +4621,7 @@ def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker,
|
|||||||
caplog.clear()
|
caplog.clear()
|
||||||
freqtrade.config['exchange']['unknown_fee_rate'] = 1
|
freqtrade.config['exchange']['unknown_fee_rate'] = 1
|
||||||
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
|
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
|
||||||
assert res == amount
|
assert res is None
|
||||||
assert trade.fee_open_currency == 'POINT'
|
assert trade.fee_open_currency == 'POINT'
|
||||||
assert pytest.approx(trade.fee_open_cost) == 0.3046651026
|
assert pytest.approx(trade.fee_open_cost) == 0.3046651026
|
||||||
assert trade.fee_open == 0.002
|
assert trade.fee_open == 0.002
|
||||||
@ -4633,12 +4630,12 @@ def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
|
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
|
||||||
(8.0, 0.0, 10, 8),
|
(8.0, 0.0, 10, None),
|
||||||
(8.0, 0.0, 0, 8),
|
(8.0, 0.0, 0, None),
|
||||||
(8.0, 0.1, 0, 7.9),
|
(8.0, 0.1, 0, 0.1),
|
||||||
(8.0, 0.1, 10, 8),
|
(8.0, 0.1, 10, None),
|
||||||
(8.0, 0.1, 8.0, 8.0),
|
(8.0, 0.1, 8.0, None),
|
||||||
(8.0, 0.1, 7.9, 7.9),
|
(8.0, 0.1, 7.9, 0.1),
|
||||||
])
|
])
|
||||||
def test_apply_fee_conditional(default_conf_usdt, fee, mocker,
|
def test_apply_fee_conditional(default_conf_usdt, fee, mocker,
|
||||||
amount, fee_abs, wallet, amount_exp):
|
amount, fee_abs, wallet, amount_exp):
|
||||||
@ -4653,11 +4650,17 @@ def test_apply_fee_conditional(default_conf_usdt, fee, mocker,
|
|||||||
fee_close=fee.return_value,
|
fee_close=fee.return_value,
|
||||||
open_order_id="123456"
|
open_order_id="123456"
|
||||||
)
|
)
|
||||||
|
order = Order(
|
||||||
|
ft_order_side='buy',
|
||||||
|
order_id='100',
|
||||||
|
ft_pair=trade.pair,
|
||||||
|
ft_is_open=True,
|
||||||
|
)
|
||||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||||
|
|
||||||
walletmock.reset_mock()
|
walletmock.reset_mock()
|
||||||
# Amount is kept as is
|
# Amount is kept as is
|
||||||
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs) == amount_exp
|
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order) == amount_exp
|
||||||
assert walletmock.call_count == 1
|
assert walletmock.call_count == 1
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
# pragma pylint: disable=missing-docstring, C0103
|
# pragma pylint: disable=missing-docstring, C0103
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from math import isclose
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from types import FunctionType
|
from types import FunctionType
|
||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
@ -582,25 +581,25 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee,
|
|||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'exchange,is_short,lev,open_value,close_value,profit,profit_ratio,trading_mode,funding_fees', [
|
'exchange,is_short,lev,open_value,close_value,profit,profit_ratio,trading_mode,funding_fees', [
|
||||||
("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0),
|
("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0),
|
||||||
("binance", True, 1, 59.850, 66.1663784375, -6.3163784375, -0.1055368, margin, 0.0),
|
("binance", True, 1, 65.835, 60.151253125, 5.68374687, 0.08633321, margin, 0.0),
|
||||||
("binance", False, 3, 60.15, 65.83416667, 5.68416667, 0.28349958, margin, 0.0),
|
("binance", False, 3, 60.15, 65.83416667, 5.68416667, 0.28349958, margin, 0.0),
|
||||||
("binance", True, 3, 59.85, 66.1663784375, -6.3163784375, -0.31661044, margin, 0.0),
|
("binance", True, 3, 65.835, 60.151253125, 5.68374687, 0.25899963, margin, 0.0),
|
||||||
|
|
||||||
("kraken", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0),
|
("kraken", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0),
|
||||||
("kraken", True, 1, 59.850, 66.231165, -6.381165, -0.1066192, margin, 0.0),
|
("kraken", True, 1, 65.835, 60.21015, 5.62485, 0.0854386, margin, 0.0),
|
||||||
("kraken", False, 3, 60.15, 65.795, 5.645, 0.28154613, margin, 0.0),
|
("kraken", False, 3, 60.15, 65.795, 5.645, 0.28154613, margin, 0.0),
|
||||||
("kraken", True, 3, 59.850, 66.231165, -6.381165, -0.3198578, margin, 0.0),
|
("kraken", True, 3, 65.835, 60.21015, 5.62485, 0.25631579, margin, 0.0),
|
||||||
|
|
||||||
("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, futures, 0.0),
|
("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, futures, 0.0),
|
||||||
("binance", False, 1, 60.15, 66.835, 6.685, 0.11113881, futures, 1.0),
|
("binance", False, 1, 60.15, 66.835, 6.685, 0.11113881, futures, 1.0),
|
||||||
("binance", True, 1, 59.85, 66.165, -6.315, -0.10551378, futures, 0.0),
|
("binance", True, 1, 65.835, 60.15, 5.685, 0.08635224, futures, 0.0),
|
||||||
("binance", True, 1, 59.85, 67.165, -7.315, -0.12222222, futures, -1.0),
|
("binance", True, 1, 65.835, 61.15, 4.685, 0.07116276, futures, -1.0),
|
||||||
|
("binance", True, 3, 65.835, 59.15, 6.685, 0.3046252, futures, 1.0),
|
||||||
("binance", False, 3, 60.15, 64.835, 4.685, 0.23366583, futures, -1.0),
|
("binance", False, 3, 60.15, 64.835, 4.685, 0.23366583, futures, -1.0),
|
||||||
("binance", True, 3, 59.85, 65.165, -5.315, -0.26641604, futures, 1.0),
|
|
||||||
])
|
])
|
||||||
@pytest.mark.usefixtures("init_persistence")
|
@pytest.mark.usefixtures("init_persistence")
|
||||||
def test_calc_open_close_trade_price(
|
def test_calc_open_close_trade_price(
|
||||||
limit_buy_order_usdt, limit_sell_order_usdt, fee, exchange, is_short, lev,
|
limit_order, fee, exchange, is_short, lev,
|
||||||
open_value, close_value, profit, profit_ratio, trading_mode, funding_fees
|
open_value, close_value, profit, profit_ratio, trading_mode, funding_fees
|
||||||
):
|
):
|
||||||
trade: Trade = Trade(
|
trade: Trade = Trade(
|
||||||
@ -616,24 +615,30 @@ def test_calc_open_close_trade_price(
|
|||||||
is_short=is_short,
|
is_short=is_short,
|
||||||
leverage=lev,
|
leverage=lev,
|
||||||
trading_mode=trading_mode,
|
trading_mode=trading_mode,
|
||||||
funding_fees=funding_fees
|
|
||||||
)
|
)
|
||||||
|
entry_order = limit_order[trade.entry_side]
|
||||||
|
exit_order = limit_order[trade.exit_side]
|
||||||
trade.open_order_id = f'something-{is_short}-{lev}-{exchange}'
|
trade.open_order_id = f'something-{is_short}-{lev}-{exchange}'
|
||||||
|
|
||||||
oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, 'ADA/USDT', 'buy')
|
oobj = Order.parse_from_ccxt_object(entry_order, 'ADA/USDT', trade.entry_side)
|
||||||
|
oobj.trade = trade
|
||||||
|
oobj.update_from_ccxt_object(entry_order)
|
||||||
trade.update_trade(oobj)
|
trade.update_trade(oobj)
|
||||||
|
|
||||||
oobj = Order.parse_from_ccxt_object(limit_sell_order_usdt, 'ADA/USDT', 'sell')
|
trade.funding_fees = funding_fees
|
||||||
|
|
||||||
|
oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', trade.exit_side)
|
||||||
|
oobj.trade = trade
|
||||||
|
oobj.update_from_ccxt_object(exit_order)
|
||||||
trade.update_trade(oobj)
|
trade.update_trade(oobj)
|
||||||
|
|
||||||
trade.open_rate = 2.0
|
assert trade.is_open is False
|
||||||
trade.close_rate = 2.2
|
assert trade.funding_fees == funding_fees
|
||||||
trade.recalc_open_trade_value()
|
|
||||||
assert isclose(trade._calc_open_trade_value(trade.amount, trade.open_rate), open_value)
|
assert pytest.approx(trade._calc_open_trade_value(trade.amount, trade.open_rate)) == open_value
|
||||||
assert isclose(trade.calc_close_trade_value(trade.close_rate), close_value)
|
assert pytest.approx(trade.calc_close_trade_value(trade.close_rate)) == close_value
|
||||||
assert isclose(trade.calc_profit(trade.close_rate), round(profit, 8))
|
assert pytest.approx(trade.close_profit_abs) == profit
|
||||||
assert pytest.approx(trade.calc_profit_ratio(trade.close_rate)) == profit_ratio
|
assert pytest.approx(trade.close_profit) == profit_ratio
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures("init_persistence")
|
@pytest.mark.usefixtures("init_persistence")
|
||||||
@ -655,6 +660,7 @@ def test_trade_close(fee):
|
|||||||
trade.orders.append(Order(
|
trade.orders.append(Order(
|
||||||
ft_order_side=trade.entry_side,
|
ft_order_side=trade.entry_side,
|
||||||
order_id=f'{trade.pair}-{trade.entry_side}-{trade.open_date}',
|
order_id=f'{trade.pair}-{trade.entry_side}-{trade.open_date}',
|
||||||
|
ft_is_open=False,
|
||||||
ft_pair=trade.pair,
|
ft_pair=trade.pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
filled=trade.amount,
|
filled=trade.amount,
|
||||||
@ -668,6 +674,7 @@ def test_trade_close(fee):
|
|||||||
trade.orders.append(Order(
|
trade.orders.append(Order(
|
||||||
ft_order_side=trade.exit_side,
|
ft_order_side=trade.exit_side,
|
||||||
order_id=f'{trade.pair}-{trade.exit_side}-{trade.open_date}',
|
order_id=f'{trade.pair}-{trade.exit_side}-{trade.open_date}',
|
||||||
|
ft_is_open=False,
|
||||||
ft_pair=trade.pair,
|
ft_pair=trade.pair,
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
filled=trade.amount,
|
filled=trade.amount,
|
||||||
|
Loading…
Reference in New Issue
Block a user