commit
8f9e6efc5e
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -351,7 +351,7 @@ jobs:
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI (Test)
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.0
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
@ -359,7 +359,7 @@ jobs:
|
||||
repository_url: https://test.pypi.org/legacy/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.0
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -111,5 +111,4 @@ target/
|
||||
!config_examples/config_ftx.example.json
|
||||
!config_examples/config_full.example.json
|
||||
!config_examples/config_kraken.example.json
|
||||
!config_examples/config_freqai_futures.example.json
|
||||
!config_examples/config_freqai_spot.example.json
|
||||
!config_examples/config_freqai.example.json
|
||||
|
@ -15,7 +15,7 @@ repos:
|
||||
additional_dependencies:
|
||||
- types-cachetools==5.2.1
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.28.3
|
||||
- types-requests==2.28.8
|
||||
- types-tabulate==0.8.11
|
||||
- types-python-dateutil==2.8.19
|
||||
# stages: [push]
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM python:3.10.5-slim-bullseye as base
|
||||
FROM python:3.10.6-slim-bullseye as base
|
||||
|
||||
# Setup env
|
||||
ENV LANG C.UTF-8
|
||||
|
@ -194,7 +194,7 @@ Issues labeled [good first issue](https://github.com/freqtrade/freqtrade/labels/
|
||||
|
||||
The clock must be accurate, synchronized to a NTP server very frequently to avoid problems with communication to the exchanges.
|
||||
|
||||
### Min hardware required
|
||||
### Minimum hardware required
|
||||
|
||||
To run this bot we recommend you a cloud instance with a minimum of:
|
||||
|
||||
|
@ -52,13 +52,13 @@
|
||||
}
|
||||
],
|
||||
"freqai": {
|
||||
"enabled": true,
|
||||
"startup_candles": 10000,
|
||||
"purge_old_models": true,
|
||||
"train_period_days": 15,
|
||||
"backtest_period_days": 7,
|
||||
"live_retrain_hours": 0,
|
||||
"identifier": "uniqe-id6",
|
||||
"live_trained_timestamp": 0,
|
||||
"identifier": "uniqe-id",
|
||||
"feature_parameters": {
|
||||
"include_timeframes": [
|
||||
"3m",
|
||||
@ -84,8 +84,7 @@
|
||||
"random_state": 1
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"n_estimators": 1000,
|
||||
"task_type": "CPU"
|
||||
"n_estimators": 1000
|
||||
}
|
||||
},
|
||||
"bot_name": "",
|
@ -1,96 +0,0 @@
|
||||
{
|
||||
"max_open_trades": 1,
|
||||
"stake_currency": "USDT",
|
||||
"stake_amount": 900,
|
||||
"tradable_balance_ratio": 1,
|
||||
"fiat_display_currency": "USD",
|
||||
"dry_run": true,
|
||||
"timeframe": "5m",
|
||||
"dry_run_wallet": 4000,
|
||||
"dataformat_ohlcv": "json",
|
||||
"cancel_open_orders_on_exit": true,
|
||||
"unfilledtimeout": {
|
||||
"entry": 10,
|
||||
"exit": 30
|
||||
},
|
||||
"exchange": {
|
||||
"name": "binance",
|
||||
"key": "",
|
||||
"secret": "",
|
||||
"ccxt_config": {
|
||||
"enableRateLimit": true
|
||||
},
|
||||
"ccxt_async_config": {
|
||||
"enableRateLimit": true,
|
||||
"rateLimit": 200
|
||||
},
|
||||
"pair_whitelist": [
|
||||
"BTC/USDT",
|
||||
"ETH/USDT"
|
||||
],
|
||||
"pair_blacklist": []
|
||||
},
|
||||
"entry_pricing": {
|
||||
"price_side": "same",
|
||||
"use_order_book": true,
|
||||
"order_book_top": 1,
|
||||
"price_last_balance": 0.0,
|
||||
"check_depth_of_market": {
|
||||
"enabled": false,
|
||||
"bids_to_ask_delta": 1
|
||||
}
|
||||
},
|
||||
"exit_pricing": {
|
||||
"price_side": "other",
|
||||
"use_order_book": true,
|
||||
"order_book_top": 1
|
||||
},
|
||||
"pairlists": [
|
||||
{
|
||||
"method": "StaticPairList"
|
||||
}
|
||||
],
|
||||
"freqai": {
|
||||
"startup_candles": 10000,
|
||||
|
||||
"train_period_days": 30,
|
||||
"backtest_period_days": 7,
|
||||
"live_retrain_hours": 1,
|
||||
"identifier": "example",
|
||||
"live_trained_timestamp": 0,
|
||||
"feature_parameters": {
|
||||
"include_timeframes": [
|
||||
"5m",
|
||||
"15m",
|
||||
"4h"
|
||||
],
|
||||
"include_corr_pairlist": [
|
||||
"BTC/USDT",
|
||||
"ETH/USDT"
|
||||
],
|
||||
"label_period_candles": 500,
|
||||
"include_shifted_candles": 1,
|
||||
"DI_threshold": 0,
|
||||
"weight_factor": 0.9,
|
||||
"principal_component_analysis": false,
|
||||
"use_SVM_to_remove_outliers": false,
|
||||
"stratify_training_data": 0,
|
||||
"indicator_max_period_candles": 50,
|
||||
"indicator_periods_candles": [10, 20]
|
||||
},
|
||||
"data_split_parameters": {
|
||||
"test_size": 0.33,
|
||||
"random_state": 1
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"n_estimators": 1000,
|
||||
"task_type": "CPU"
|
||||
}
|
||||
},
|
||||
"bot_name": "",
|
||||
"initial_state": "running",
|
||||
"forcebuy_enable": false,
|
||||
"internals": {
|
||||
"process_throttle_secs": 5
|
||||
}
|
||||
}
|
BIN
docs/assets/freqai_algo.png
Normal file
BIN
docs/assets/freqai_algo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 328 KiB |
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 44 KiB |
@ -514,6 +514,7 @@ You can then load the trades to perform further analysis as shown in the [data a
|
||||
|
||||
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
||||
|
||||
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
||||
- Buys happen at open-price
|
||||
- All orders are filled at the requested price (no slippage, no unfilled orders)
|
||||
- Exit-signal exits happen at open-price of the consecutive candle
|
||||
@ -543,7 +544,24 @@ Also, keep in mind that past results don't guarantee future success.
|
||||
|
||||
In addition to the above assumptions, strategy authors should carefully read the [Common Mistakes](strategy-customization.md#common-mistakes-when-developing-strategies) section, to avoid using data in backtesting which is not available in real market conditions.
|
||||
|
||||
### Improved backtest accuracy
|
||||
### Trading limits in backtesting
|
||||
|
||||
Exchanges have certain trading limits, like minimum base currency, or minimum stake (quote) currency.
|
||||
These limits are usually listed in the exchange documentation as "trading rules" or similar.
|
||||
|
||||
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
|
||||
Freqtrade has however no information about historic limits.
|
||||
|
||||
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50$.
|
||||
|
||||
For example:
|
||||
|
||||
BTC minimum tradable amount is 0.001.
|
||||
BTC trades at 22.000\$ today (0.001 BTC is related to this) - but the backtesting period includes prices as high as 50.000\$.
|
||||
Today's minimum would be `0.001 * 22_000` - or 22\$.
|
||||
However the limit could also be 50$ - based on `0.001 * 50_000` in some historic setting.
|
||||
|
||||
## Improved backtest accuracy
|
||||
|
||||
One big limitation of backtesting is it's inability to know how prices moved intra-candle (was high before close, or viceversa?).
|
||||
So assuming you run backtesting with a 1h timeframe, there will be 4 prices for that candle (Open, High, Low, Close).
|
||||
|
@ -105,7 +105,7 @@ This is similar to using multiple `--config` parameters, but simpler in usage as
|
||||
|
||||
``` json title="Result"
|
||||
{
|
||||
"max_open_trades": 10,
|
||||
"max_open_trades": 3,
|
||||
"stake_currency": "USDT",
|
||||
"stake_amount": "unlimited"
|
||||
}
|
||||
|
@ -68,6 +68,36 @@ def test_method_to_test(caplog):
|
||||
|
||||
```
|
||||
|
||||
### Debug configuration
|
||||
|
||||
To debug freqtrade, we recommend VSCode with the following launch configuration (located in `.vscode/launch.json`).
|
||||
Details will obviously vary between setups - but this should work to get you started.
|
||||
|
||||
``` json
|
||||
{
|
||||
"name": "freqtrade trade",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "freqtrade",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"trade",
|
||||
// Optional:
|
||||
// "--userdir", "user_data",
|
||||
"--strategy",
|
||||
"MyAwesomeStrategy",
|
||||
]
|
||||
},
|
||||
```
|
||||
|
||||
Command line arguments can be added in the `"args"` array.
|
||||
This method can also be used to debug a strategy, by setting the breakpoints within the strategy.
|
||||
|
||||
A similar setup can also be taken for Pycharm - using `freqtrade` as module name, and setting the command line arguments as "parameters".
|
||||
|
||||
!!! Note "Startup directory"
|
||||
This assumes that you have the repository checked out, and the editor is started at the repository root level (so setup.py is at the top level of your repository).
|
||||
|
||||
## ErrorHandling
|
||||
|
||||
Freqtrade Exceptions all inherit from `FreqtradeException`.
|
||||
|
206
docs/freqai.md
206
docs/freqai.md
@ -1,35 +1,52 @@
|
||||

|
||||

|
||||
|
||||
# FreqAI
|
||||
|
||||
FreqAI is a module designed to automate a variety of tasks associated with
|
||||
training a predictive model to provide signals based on input features.
|
||||
FreqAI is a module designed to automate a variety of tasks associated with training a predictive model to generate market forecasts given a set of input features.
|
||||
|
||||
Among the the features included:
|
||||
|
||||
* Create large rich feature sets (10k+ features) based on simple user created strategies.
|
||||
* Sweep model training and backtesting to simulate consistent model retraining through time.
|
||||
* Remove outliers automatically from training and prediction sets using a Dissimilarity Index and Support Vector Machines.
|
||||
* Reduce the dimensionality of the data with Principal Component Analysis.
|
||||
* Store models to disk to make reloading from a crash fast and easy (and purge obsolete files automatically for sustained dry/live runs).
|
||||
* Normalize the data automatically in a smart and statistically safe way.
|
||||
* Automated data download and data handling.
|
||||
* Clean the incoming data of NaNs in a safe way before training and prediction.
|
||||
* Retrain live automatically so that the model self-adapts to the market in an unsupervised manner.
|
||||
* **Self-adaptive retraining**: automatically retrain models during live deployments to self-adapt to the market in an unsupervised manner.
|
||||
* **Rapid feature engineering**: create large rich feature sets (10k+ features) based on simple user created strategies.
|
||||
* **High performance**: adaptive retraining occurs on separate thread (or on GPU if available) from inferencing and bot trade operations. Keep newest models and data in memory for rapid inferencing.
|
||||
* **Realistic backtesting**: emulate self-adaptive retraining with backtesting module that automates past retraining.
|
||||
* **Modifiable**: use the generalized and robust architecture for incorporating any machine learning library/method available in Python. Seven examples available.
|
||||
* **Smart outlier removal**: remove outliers automatically from training and prediction sets using a variety of outlier detection techniques.
|
||||
* **Crash resilience**: automatic model storage to disk to make reloading from a crash fast and easy (and purge obsolete files automatically for sustained dry/live runs).
|
||||
* **Automated data normalization**: automatically normalize the data automatically in a smart and statistically safe way.
|
||||
* **Automatic data download**: automatically compute the data download timerange and downloads data accordingly (in live deployments).
|
||||
* **Clean the incoming data of NaNs in a safe way before training and prediction.
|
||||
* **Dimensionality reduction**: reduce the size of the training data via Principal Component Analysis.
|
||||
* **Deploy bot fleets**: set one bot to train models while a fleet of other bots inference into the models and handle trades.
|
||||
|
||||
## Quick start
|
||||
|
||||
The easiest way to quickly test FreqAI is to run it in dry run with the following command
|
||||
|
||||
```bash
|
||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates
|
||||
```
|
||||
|
||||
where the user will see the boot-up process of auto-data downloading, followed by simultaneous training and trading.
|
||||
|
||||
The example strategy, example prediction model, and example config can all be found in
|
||||
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`,
|
||||
`config_examples/config_freqai.example.json`, respectively.
|
||||
|
||||
## General approach
|
||||
|
||||
The user provides FreqAI with a set of custom indicators (created inside the strategy the same way
|
||||
a typical Freqtrade strategy is created) as well as a target value (typically some price change into the future).
|
||||
FreqAI trains a model to predict the target value based on the input of custom indicators.
|
||||
FreqAI will train and save a new model for each pair in the config whitelist.
|
||||
Users employ FreqAI to backtest a strategy (emulate reality with retraining a model as new data is introduced) and run the model live to generate entry and exit signals.
|
||||
In dry/live, FreqAI works in a background thread to keep all models as updated as possible with consistent retraining.
|
||||
The user provides FreqAI with a set of custom *base* indicators (created inside the strategy the same way
|
||||
a typical Freqtrade strategy is created) as well as target values which look into the future.
|
||||
FreqAI trains a model to predict the target value based on the input of custom indicators for each pair in the whitelist. These models are consistently retrained to adapt to market conditions. FreqAI offers the ability to both backtest strategies (emulating reality with periodic retraining) and deploy dry/live. In dry/live conditions, FreqAI can be set to constant retraining in a background thread in an effort to keep models as young as possible.
|
||||
|
||||
An overview of the algorithm is shown here to help users understand the data processing pipeline and the model usage.
|
||||
|
||||

|
||||
|
||||
## Background and vocabulary
|
||||
|
||||
**Features** are the quantities with which a model is trained. $X_i$ represents the
|
||||
vector of all features for a single candle. In Freqai, the user
|
||||
vector of all features for a single candle. In FreqAI, the user
|
||||
builds the features from anything they can construct in the strategy.
|
||||
|
||||
**Labels** are the target values with which the weights inside a model are trained
|
||||
@ -49,23 +66,14 @@ directly influence nodal weights within the model.
|
||||
|
||||
## Install prerequisites
|
||||
|
||||
Use `pip` to install the prerequisites with:
|
||||
The normal Freqtrade install process will ask the user if they wish to install `FreqAI` dependencies. The user should reply "yes" to this question if they wish to use FreqAI. If the user did not reply yes, they can manually install these dependencies after the install with:
|
||||
|
||||
``` bash
|
||||
pip install -r requirements-freqai.txt
|
||||
```
|
||||
|
||||
## Running from the example files
|
||||
|
||||
An example strategy, an example prediction model, and example config can all be found in
|
||||
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMPredictionModel.py`,
|
||||
`config_examples/config_freqai_futures.example.json`, respectively.
|
||||
|
||||
Assuming the user has downloaded the necessary data, Freqai can be executed from these templates with:
|
||||
|
||||
```bash
|
||||
freqtrade backtesting --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMPredictionModel --strategy-path freqtrade/templates --timerange 20220101-20220201
|
||||
```
|
||||
!!! Note
|
||||
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since Catboost does not provide wheels for this platform.
|
||||
|
||||
## Configuring the bot
|
||||
|
||||
@ -88,28 +96,32 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
||||
| `purge_old_models` | Tell FreqAI to delete obsolete models. Otherwise, all historic models will remain on disk. Defaults to `False`. <br> **Datatype:** boolean.
|
||||
| `expiration_hours` | Ask FreqAI to avoid making predictions if a model is more than `expiration_hours` old. Defaults to 0 which means models never expire. <br> **Datatype:** positive integer.
|
||||
| | **Feature Parameters**
|
||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples shown [here](#building-the-feature-set) <br> **Datatype:** dictionary.
|
||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples shown [here](#feature-engineering) <br> **Datatype:** dictionary.
|
||||
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` will be created for each coin in this list, and that set of features is added to the base asset feature set. <br> **Datatype:** list of assets (strings).
|
||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for and added as features to the base asset feature set. <br> **Datatype:** list of timeframes (strings).
|
||||
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators`, refer to `templates/FreqaiExampleStrategy.py` for detailed usage. The user can create custom labels, making use of this parameter not. <br> **Datatype:** positive integer.
|
||||
| `include_shifted_candles` | Parameter used to add a sense of temporal recency to flattened regression type input data. `include_shifted_candles` takes all features, duplicates and shifts them by the number indicated by user. <br> **Datatype:** positive integer.
|
||||
| `DI_threshold` | Activates the Dissimilarity Index for outlier detection when above 0, explained more [here](#removing-outliers-with-the-dissimilarity-index). <br> **Datatype:** positive float (typically below 1).
|
||||
| `weight_factor` | Used to set weights for training data points according to their recency, see details and a figure of how it works [here](##controlling-the-model-learning-process). <br> **Datatype:** positive float (typically below 1).
|
||||
| `weight_factor` | Used to set weights for training data points according to their recency, see details and a figure of how it works [here](#controlling-the-model-learning-process). <br> **Datatype:** positive float (typically below 1).
|
||||
| `principal_component_analysis` | Ask FreqAI to automatically reduce the dimensionality of the data set using PCA. <br> **Datatype:** boolean.
|
||||
| `use_SVM_to_remove_outliers` | Ask FreqAI to train a support vector machine to detect and remove outliers from the training data set as well as from incoming data points. <br> **Datatype:** boolean.
|
||||
| `svm_nu` | The `nu` parameter for the support vector machine. *Very* broadly, this is the percentage of data points that should be considered outliers. <br> **Datatype:** float between 0 and 1.
|
||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. E.g. `nu` *Very* broadly, is the percentage of data points that should be considered outliers. `shuffle` is by default false to maintain reprodicibility. But these and all others can be added/changed in this dictionary. <br> **Datatype:** dictionary.
|
||||
| `stratify_training_data` | This value is used to indicate the stratification of the data. e.g. 2 would set every 2nd data point into a separate dataset to be pulled from during training/testing. <br> **Datatype:** positive integer.
|
||||
| `indicator_max_period_candles` | The maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this information in combination with the maximum timeframe to calculate how many data points it should download so that the first data point does not have a NaN <br> **Datatype:** positive integer.
|
||||
| `indicator_periods_candles` | A list of integers used to duplicate all indicators according to a set of periods and add them to the feature set. <br> **Datatype:** list of positive integers.
|
||||
| `use_DBSCAN_to_remove_outliers` | Inactive by default. If true, FreqAI clusters data using DBSCAN to identify and remove outliers from training and prediction data. <br> **Datatype:** float (fraction of 1).
|
||||
| | **Data split parameters**
|
||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) <br> **Datatype:** dictionary.
|
||||
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** positive float below 1.
|
||||
| `shuffle` | Shuffle the training data points during training. Typically for time-series forecasting, this is set to False. **Datatype:** boolean.
|
||||
| | **Model training parameters**
|
||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected library. For example, if the user uses `LightGBMPredictionModel`, then this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html). If the user selects a different model, then this dictionary can contain any parameter from that different model. <br> **Datatype:** dictionary.
|
||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected library. For example, if the user uses `LightGBMRegressor`, then this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html). If the user selects a different model, then this dictionary can contain any parameter from that different model. <br> **Datatype:** dictionary.
|
||||
| `n_estimators` | A common parameter among regressors which sets the number of boosted trees to fit <br> **Datatype:** integer.
|
||||
| `learning_rate` | A common parameter among regressors which sets the boosting learning rate. <br> **Datatype:** float.
|
||||
| `n_jobs`, `thread_count`, `task_type` | Different libraries use different parameter names to control the number of threads used for parallel processing or whether or not it is a `task_type` of `gpu` or `cpu`. <br> **Datatype:** float.
|
||||
| | **Extraneous parameters**
|
||||
| `keras` | If your model makes use of keras (typical of Tensorflow based prediction models), activate this flag so that the model save/loading follows keras standards. Default value `false` <br> **Datatype:** boolean.
|
||||
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for `shift` by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. Default value, 2 <br> **Datatype:** integer.
|
||||
|
||||
|
||||
### Important FreqAI dataframe key patterns
|
||||
@ -125,8 +137,8 @@ Here are the values the user can expect to include/use inside the typical strate
|
||||
|
||||
### Example config file
|
||||
|
||||
The user interface is isolated to the typical config file. A typical Freqai
|
||||
config setup includes:
|
||||
The user interface is isolated to the typical config file. A typical FreqAI
|
||||
config setup could include:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
@ -161,7 +173,7 @@ config setup includes:
|
||||
}
|
||||
```
|
||||
|
||||
### Building the feature set
|
||||
### Feature engineering
|
||||
|
||||
Features are added by the user inside the `populate_any_indicators()` method of the strategy
|
||||
by prepending indicators with `%` and labels are added by prepending `&`.
|
||||
@ -174,7 +186,7 @@ various configuration parameters which multiply the feature set such as `include
|
||||
|
||||
```python
|
||||
def populate_any_indicators(
|
||||
self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
@ -190,6 +202,8 @@ various configuration parameters which multiply the feature set such as `include
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coint = pair.split('/')[0]
|
||||
|
||||
with self.freqai.lock:
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
@ -257,7 +271,7 @@ various configuration parameters which multiply the feature set such as `include
|
||||
return df
|
||||
```
|
||||
|
||||
The user of the present example does not want to pass the `bb_lowerband` as a feature to the model,
|
||||
The user of the present example does not wish to pass the `bb_lowerband` as a feature to the model,
|
||||
and has therefore not prepended it with `%`. The user does, however, wish to pass `bb_width` to the
|
||||
model for training/prediction and has therefore prepended it with `%`.
|
||||
|
||||
@ -305,7 +319,7 @@ set will include all the features from `populate_any_indicators` on all the `inc
|
||||
|
||||
`include_shifted_candles` is another user controlled parameter which indicates the number of previous
|
||||
candles to include in the present feature set. In other words, `include_shifted_candles: 2`, tells
|
||||
Freqai to include the the past 2 candles for each of the features included in the dataset.
|
||||
FreqAI to include the the past 2 candles for each of the features included in the dataset.
|
||||
|
||||
In total, the number of features the present user has created is:
|
||||
|
||||
@ -318,12 +332,12 @@ Users define the backtesting timerange with the typical `--timerange` parameter
|
||||
configuration file. `train_period_days` is the duration of the sliding training window, while
|
||||
`backtest_period_days` is the sliding backtesting window, both in number of days (`backtest_period_days` can be
|
||||
a float to indicate sub daily retraining in live/dry mode). In the present example,
|
||||
the user is asking Freqai to use a training period of 30 days and backtest the subsequent 7 days.
|
||||
the user is asking FreqAI to use a training period of 30 days and backtest the subsequent 7 days.
|
||||
This means that if the user sets `--timerange 20210501-20210701`,
|
||||
Freqai will train 8 separate models (because the full range comprises 8 weeks),
|
||||
FreqAI will train 8 separate models (because the full range comprises 8 weeks),
|
||||
and then backtest the subsequent week associated with each of the 8 training
|
||||
data set timerange months. Users can think of this as a "sliding window" which
|
||||
emulates Freqai retraining itself once per week in live using the previous
|
||||
emulates FreqAI retraining itself once per week in live using the previous
|
||||
month of data.
|
||||
|
||||
In live, the required training data is automatically computed and downloaded. However, in backtesting
|
||||
@ -341,16 +355,18 @@ and adding this to the `train_period_days`. The units need to be in the base can
|
||||
!!! Note
|
||||
Although fractional `backtest_period_days` is allowed, the user should be ware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. This is why it is physically impossible to truly backtest FreqAI adaptive training. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
||||
|
||||
## Running Freqai
|
||||
## Running FreqAI
|
||||
|
||||
### Training and backtesting
|
||||
### Backtesting
|
||||
|
||||
The freqai training/backtesting module can be executed with the following command:
|
||||
The FreqAI backtesting module can be executed with the following command:
|
||||
|
||||
```bash
|
||||
freqtrade backtesting --strategy FreqaiExampleStrategy --config config_freqai_futures.example.json --freqaimodel LightGBMPredictionModel --timerange 20210501-20210701
|
||||
freqtrade backtesting --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
||||
```
|
||||
|
||||
Backtesting mode requires the user to have the data pre-downloaded (unlike dry/live, where FreqAI automatically downloads the necessary data). The user should be careful to consider that the range of the downloaded data is more than the backtesting range. This is because FreqAI needs data prior to the desired backtesting range in order to train a model to be ready to make predictions on the first candle of the user set backtesting range. More details on how to calculate the data download timerange can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
||||
|
||||
If this command has never been executed with the existing config file, then it will train a new model
|
||||
for each pair, for each backtesting window within the bigger `--timerange`.
|
||||
|
||||
@ -366,7 +382,7 @@ for each pair, for each backtesting window within the bigger `--timerange`.
|
||||
|
||||
### Building a freqai strategy
|
||||
|
||||
The Freqai strategy requires the user to include the following lines of code in the strategy:
|
||||
The FreqAI strategy requires the user to include the following lines of code in the strategy:
|
||||
|
||||
```python
|
||||
|
||||
@ -385,8 +401,6 @@ The Freqai strategy requires the user to include the following lines of code in
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
# All indicators must be populated by populate_any_indicators() for live functionality
|
||||
# to work correctly.
|
||||
|
||||
@ -403,21 +417,24 @@ The Freqai strategy requires the user to include the following lines of code in
|
||||
The user should also include `populate_any_indicators()` from `templates/FreqaiExampleStrategy.py` which builds
|
||||
the feature set with a proper naming convention for the IFreqaiModel to use later.
|
||||
|
||||
### Building an IFreqaiModel
|
||||
### Setting classifier targets
|
||||
|
||||
FreqAI includes a the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. Typically, the user would set the targets using strings:
|
||||
|
||||
```python
|
||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||
```
|
||||
|
||||
FreqAI has multiple example prediction model based libraries such as `Catboost` regression (`freqai/prediction_models/CatboostPredictionModel.py`) and `LightGBM` regression.
|
||||
However, users can customize and create their own prediction models using the `IFreqaiModel` class.
|
||||
Users are encouraged to inherit `train()` and `predict()` to let them customize various aspects of their training procedures.
|
||||
|
||||
### Running the model live
|
||||
|
||||
Freqai can be run dry/live using the following command
|
||||
FreqAI can be run dry/live using the following command
|
||||
|
||||
```bash
|
||||
freqtrade trade --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel ExamplePredictionModel
|
||||
freqtrade trade --strategy FreqaiExampleStrategy --config config_freqai.example.json --freqaimodel LightGBMRegressor
|
||||
```
|
||||
|
||||
By default, Freqai will not find find any existing models and will start by training a new one
|
||||
By default, FreqAI will not find find any existing models and will start by training a new one
|
||||
given the user configuration settings. Following training, it will use that model to make predictions on incoming candles until a new model is available. New models are typically generated as often as possible, with FreqAI managing an internal queue of the pairs to try and keep all models equally "young." FreqAI will always use the newest trained model to make predictions on incoming live data. If users do not want FreqAI to retrain new models as often as possible, they can set `live_retrain_hours` to tell FreqAI to wait at least that number of hours before retraining a new model. Additionally, users can set `expired_hours` to tell FreqAI to avoid making predictions on models aged over this number of hours.
|
||||
|
||||
If the user wishes to start dry/live from a backtested saved model, the user only needs to reuse
|
||||
@ -430,7 +447,7 @@ the same `identifier` parameter
|
||||
}
|
||||
```
|
||||
|
||||
In this case, although Freqai will initiate with a
|
||||
In this case, although FreqAI will initiate with a
|
||||
pre-trained model, it will still check to see how much time has elapsed since the model was trained,
|
||||
and if a full `live_retrain_hours` has elapsed since the end of the loaded model, FreqAI will self retrain.
|
||||
|
||||
@ -457,7 +474,7 @@ the user is asking for `labels` that are 24 candles in the future.
|
||||
### Removing outliers with the Dissimilarity Index
|
||||
|
||||
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each
|
||||
prediction by the model. To do so, Freqai measures the distance between each training
|
||||
prediction by the model. To do so, FreqAI measures the distance between each training
|
||||
data point and all other training data points:
|
||||
|
||||
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
||||
@ -512,7 +529,7 @@ variance of the data set is >= 0.999.
|
||||
|
||||
### Removing outliers using a Support Vector Machine (SVM)
|
||||
|
||||
The user can tell Freqai to remove outlier data points from the training/test data sets by setting:
|
||||
The user can tell FreqAI to remove outlier data points from the training/test data sets by setting:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
@ -522,9 +539,21 @@ The user can tell Freqai to remove outlier data points from the training/test da
|
||||
}
|
||||
```
|
||||
|
||||
Freqai will train an SVM on the training data (or components if the user activated
|
||||
FreqAI will train an SVM on the training data (or components if the user activated
|
||||
`principal_component_analysis`) and remove any data point that it deems to be sitting beyond the feature space.
|
||||
|
||||
### Clustering the training data and removing outliers with DBSCAN
|
||||
|
||||
The user can tell FreqAI to use DBSCAN to cluster training data and remove outliers from the training data set. The user activates `use_DBSCAN_to_remove_outliers` to cluster training data for identification of outliers. Also used to detect incoming outliers for prediction data points.
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"use_DBSCAN_to_remove_outliers": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Stratifying the data
|
||||
|
||||
The user can stratify the training/testing data using:
|
||||
@ -612,53 +641,32 @@ If the user sets this value, FreqAI will initially use the predictions from the
|
||||
and then subsequently begin introducing real prediction data as it is generated. FreqAI will save
|
||||
this historical data to be reloaded if the user stops and restarts with the same `identifier`.
|
||||
|
||||
<!-- ## Dynamic target expectation
|
||||
## Extra returns per train
|
||||
|
||||
The labels used for model training have a unique statistical distribution for each separate model training.
|
||||
We can use this information to know if our current prediction is in the realm of what the model was trained on,
|
||||
and if so, what is the statistical probability of the current prediction. With this information, we can
|
||||
make more informed prediction.
|
||||
FreqAI builds this label distribution and provides a quantile to the strategy, which can be optionally used as a
|
||||
dynamic threshold. The `target_quantile: X` means that X% of the labels are below this value. So setting:
|
||||
Users may find that there are some important metrics that they'd like to return to the strategy at the end of each retrain.
|
||||
Users can include these metrics by assigining them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside their custom prediction
|
||||
model class. FreqAI takes the `my_new_value` assigned in this dictionary and expands it to fit the return dataframe to the strategy.
|
||||
The user can then use the value in the strategy with `dataframe['my_new_value']`. An example of how this is already used in FreqAI is
|
||||
the `&*_mean` and `&*_std` values, which indicate the mean and standard deviation of that particular label during the most recent training.
|
||||
Another example is shown below if the user wants to use live metrics from the trade databse.
|
||||
|
||||
The user needs to set the standard dictionary in the config so FreqAI can return proper dataframe shapes:
|
||||
|
||||
```json
|
||||
"freqai": {
|
||||
"feature_parameters" : {
|
||||
"target_quantile": 0.9
|
||||
}
|
||||
"extra_returns_per_train": {"total_profit": 4}
|
||||
}
|
||||
```
|
||||
|
||||
Means the user will get back in the strategy the label threshold at which 90% of the labels were
|
||||
below this value. An example usage in the strategy may look something like:
|
||||
These values will likely be overridden by the user prediction model, but in the case where the user model has yet to set them, or needs
|
||||
a default initial value - this is the value that will be returned.
|
||||
|
||||
```python
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
## Building an IFreqaiModel
|
||||
|
||||
# ... #
|
||||
|
||||
(
|
||||
dataframe["prediction"],
|
||||
dataframe["do_predict"],
|
||||
dataframe["target_upper_quantile"],
|
||||
dataframe["target_lower_quantile"],
|
||||
) = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
return dataframe
|
||||
|
||||
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
buy_conditions = [
|
||||
(dataframe["prediction"] > dataframe["target_upper_quantile"]) & (dataframe["do_predict"] == 1)
|
||||
]
|
||||
|
||||
if buy_conditions:
|
||||
dataframe.loc[reduce(lambda x, y: x | y, buy_conditions), "buy"] = 1
|
||||
|
||||
return dataframe
|
||||
|
||||
``` -->
|
||||
FreqAI has multiple example prediction model based libraries such as `Catboost` regression (`freqai/prediction_models/CatboostRegressor.py`) and `LightGBM` regression.
|
||||
However, users can customize and create their own prediction models using the `IFreqaiModel` class.
|
||||
Users are encouraged to inherit `train()` and `predict()` to let them customize various aspects of their training procedures.
|
||||
|
||||
## Additional information
|
||||
|
||||
|
@ -623,12 +623,13 @@ class AwesomeStrategy(IStrategy):
|
||||
|
||||
!!! Warning
|
||||
`confirm_trade_exit()` can prevent stoploss exits, causing significant losses as this would ignore stoploss exits.
|
||||
`confirm_trade_exit()` will not be called for Liquidations - as liquidations are forced by the exchange, and therefore cannot be rejected.
|
||||
|
||||
## Adjust trade position
|
||||
|
||||
The `position_adjustment_enable` strategy property enables the usage of `adjust_trade_position()` callback in the strategy.
|
||||
For performance reasons, it's disabled by default and freqtrade will show a warning message on startup if enabled.
|
||||
`adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging).
|
||||
`adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging) or to increase or decrease positions.
|
||||
|
||||
`max_entry_position_adjustment` property is used to limit the number of additional buys per trade (on top of the first buy) that the bot can execute. By default, the value is -1 which means the bot have no limit on number of adjustment buys.
|
||||
|
||||
@ -636,10 +637,13 @@ The strategy is expected to return a stake_amount (in stake currency) between `m
|
||||
If there are not enough funds in the wallet (the return value is above `max_stake`) then the signal will be ignored.
|
||||
Additional orders also result in additional fees and those orders don't count towards `max_open_trades`.
|
||||
|
||||
This callback is **not** called when there is an open order (either buy or sell) waiting for execution, or when you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`.
|
||||
This callback is **not** called when there is an open order (either buy or sell) waiting for execution.
|
||||
|
||||
`adjust_trade_position()` is called very frequently for the duration of a trade, so you must keep your implementation as performant as possible.
|
||||
|
||||
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position, no matter if it's a long or short trade. Modifications to leverage are not possible.
|
||||
Additional Buys are ignored once you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits.
|
||||
|
||||
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade. Modifications to leverage are not possible.
|
||||
|
||||
!!! Note "About stake size"
|
||||
Using fixed stake size means it will be the amount used for the first order, just like without position adjustment.
|
||||
@ -648,12 +652,12 @@ Position adjustments will always be applied in the direction of the trade, so a
|
||||
|
||||
!!! Warning
|
||||
Stoploss is still calculated from the initial opening price, not averaged price.
|
||||
Regular stoploss rules still apply (cannot move down).
|
||||
|
||||
!!! Warning "/stopbuy"
|
||||
While `/stopbuy` command stops the bot from entering new trades, the position adjustment feature will continue buying new orders on existing trades.
|
||||
|
||||
!!! Warning "Backtesting"
|
||||
During backtesting this callback is called for each candle in `timeframe` or `timeframe_detail`, so performance will be affected.
|
||||
During backtesting this callback is called for each candle in `timeframe` or `timeframe_detail`, so run-time performance will be affected.
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
@ -684,22 +688,41 @@ def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: f
|
||||
return proposed_stake / self.max_dca_multiplier
|
||||
|
||||
def adjust_trade_position(self, trade: Trade, current_time: datetime,
|
||||
current_rate: float, current_profit: float, min_stake: Optional[float],
|
||||
max_stake: float, **kwargs):
|
||||
current_rate: float, current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float,
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs) -> Optional[float]:
|
||||
"""
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be increased.
|
||||
This means extra buy orders with additional fees.
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be
|
||||
increased or decreased.
|
||||
This means extra buy or sell orders with additional fees.
|
||||
Only called when `position_adjustment_enable` is set to True.
|
||||
|
||||
For full documentation please go to https://www.freqtrade.io/en/latest/strategy-advanced/
|
||||
|
||||
When not implemented by a strategy, returns None
|
||||
|
||||
:param trade: trade object.
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param current_rate: Current buy rate.
|
||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||
:param min_stake: Minimal stake size allowed by exchange.
|
||||
:param max_stake: Balance available for trading.
|
||||
:param min_stake: Minimal stake size allowed by exchange (for both entries and exits)
|
||||
:param max_stake: Maximum stake allowed (either through balance, or by exchange limits).
|
||||
:param current_entry_rate: Current rate using entry pricing.
|
||||
:param current_exit_rate: Current rate using exit pricing.
|
||||
:param current_entry_profit: Current profit using entry pricing.
|
||||
:param current_exit_profit: Current profit using exit pricing.
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return float: Stake amount to adjust your trade
|
||||
:return float: Stake amount to adjust your trade,
|
||||
Positive values to increase position, Negative values to decrease position.
|
||||
Return None for no action.
|
||||
"""
|
||||
|
||||
if current_profit > 0.05 and trade.nr_of_successful_exits == 0:
|
||||
# Take half of the profit at +5%
|
||||
return -(trade.stake_amount / 2)
|
||||
|
||||
if current_profit > -0.05:
|
||||
return None
|
||||
|
||||
@ -734,6 +757,25 @@ def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: f
|
||||
|
||||
```
|
||||
|
||||
### Position adjust calculations
|
||||
|
||||
* Entry rates are calculated using weighted averages.
|
||||
* Exits will not influence the average entry rate.
|
||||
* Partial exit relative profit is relative to the average entry price at this point.
|
||||
* Final exit relative profit is calculated based on the total invested capital. (See example below)
|
||||
|
||||
??? example "Calculation example"
|
||||
*This example assumes 0 fees for simplicity, and a long position on an imaginary coin.*
|
||||
|
||||
* Buy 100@8\$
|
||||
* Buy 100@9\$ -> Avg price: 8.5\$
|
||||
* Sell 100@10\$ -> Avg price: 8.5\$, realized profit 150\$, 17.65%
|
||||
* Buy 150@11\$ -> Avg price: 10\$, realized profit 150\$, 17.65%
|
||||
* Sell 100@12\$ -> Avg price: 10\$, total realized profit 350\$, 20%
|
||||
* Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40%
|
||||
|
||||
The total profit for this trade was 950$ on a 3350$ investment (`100@8$ + 100@9$ + 150@11$`). As such - the final relative profit is 28.35% (`950 / 3350`).
|
||||
|
||||
## Adjust Entry Price
|
||||
|
||||
The `adjust_entry_price()` callback may be used by strategy developer to refresh/replace limit orders upon arrival of new candles.
|
||||
|
@ -646,6 +646,9 @@ This is where calling `self.dp.current_whitelist()` comes in handy.
|
||||
return informative_pairs
|
||||
```
|
||||
|
||||
??? Note "Plotting with current_whitelist"
|
||||
Current whitelist is not supported for `plot-dataframe`, as this command is usually used by providing an explicit pairlist - and would therefore make the return values of this method misleading.
|
||||
|
||||
### *get_pair_dataframe(pair, timeframe)*
|
||||
|
||||
``` python
|
||||
@ -731,6 +734,23 @@ if self.dp:
|
||||
!!! Warning "Warning about backtesting"
|
||||
This method will always return up-to-date values - so usage during backtesting / hyperopt will lead to wrong results.
|
||||
|
||||
### Send Notification
|
||||
|
||||
The dataprovider `.send_msg()` function allows you to send custom notifications from your strategy.
|
||||
Identical notifications will only be sent once per candle, unless the 2nd argument (`always_send`) is set to True.
|
||||
|
||||
``` python
|
||||
self.dp.send_msg(f"{metadata['pair']} just got hot!")
|
||||
|
||||
# Force send this notification, avoid caching (Please read warning below!)
|
||||
self.dp.send_msg(f"{metadata['pair']} just got hot!", always_send=True)
|
||||
```
|
||||
|
||||
Notifications will only be sent in trading modes (Live/Dry-run) - so this method can be called without conditions for backtesting.
|
||||
|
||||
!!! Warning "Spamming"
|
||||
You can spam yourself pretty good by setting `always_send=True` in this method. Use this with great care and only in conditions you know will not happen throughout a candle to avoid a message every 5 seconds.
|
||||
|
||||
### Complete Data-provider sample
|
||||
|
||||
```python
|
||||
|
@ -18,7 +18,7 @@ Note : `forcesell`, `forcebuy`, `emergencysell` are changed to `force_exit`, `fo
|
||||
* [`check_buy_timeout()` -> `check_entry_timeout()`](#custom_entry_timeout)
|
||||
* [`check_sell_timeout()` -> `check_exit_timeout()`](#custom_entry_timeout)
|
||||
* New `side` argument to callbacks without trade object
|
||||
* [`custom_stake_amount`](#custom-stake-amount)
|
||||
* [`custom_stake_amount`](#custom_stake_amount)
|
||||
* [`confirm_trade_entry`](#confirm_trade_entry)
|
||||
* [`custom_entry_price`](#custom_entry_price)
|
||||
* [Changed argument name in `confirm_trade_exit`](#confirm_trade_exit)
|
||||
@ -192,7 +192,7 @@ class AwesomeStrategy(IStrategy):
|
||||
return False
|
||||
```
|
||||
|
||||
### Custom-stake-amount
|
||||
### `custom_stake_amount`
|
||||
|
||||
New string argument `side` - which can be either `"long"` or `"short"`.
|
||||
|
||||
|
@ -98,6 +98,7 @@ Example configuration showing the different settings:
|
||||
"exit_fill": "off",
|
||||
"protection_trigger": "off",
|
||||
"protection_trigger_global": "on",
|
||||
"strategy_msg": "off",
|
||||
"show_candle": "off"
|
||||
},
|
||||
"reload": true,
|
||||
@ -109,7 +110,8 @@ Example configuration showing the different settings:
|
||||
`exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
|
||||
`*_fill` notifications are off by default and must be explicitly enabled.
|
||||
`protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
|
||||
`show_candle` - show candle values as part of entry/exit messages. Only possible value is "ohlc".
|
||||
`strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
|
||||
`show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
|
||||
|
||||
`balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
|
||||
`reload` allows you to disable reload-buttons on selected messages.
|
||||
|
@ -1,5 +1,5 @@
|
||||
""" Freqtrade bot """
|
||||
__version__ = 'develop'
|
||||
__version__ = '2022.8.dev'
|
||||
|
||||
if 'dev' in __version__:
|
||||
try:
|
||||
|
@ -67,7 +67,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"type": "text",
|
||||
"name": "stake_amount",
|
||||
"message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):",
|
||||
"default": "100",
|
||||
"default": "unlimited",
|
||||
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
|
||||
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
|
||||
if val == UNLIMITED_STAKE_AMOUNT
|
||||
@ -164,7 +164,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"when": lambda x: x['telegram']
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "password",
|
||||
"name": "telegram_chat_id",
|
||||
"message": "Insert Telegram chat id",
|
||||
"when": lambda x: x['telegram']
|
||||
@ -191,7 +191,7 @@ def ask_user_config() -> Dict[str, Any]:
|
||||
"when": lambda x: x['api_server']
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "password",
|
||||
"name": "api_server_password",
|
||||
"message": "Insert api-server password",
|
||||
"when": lambda x: x['api_server']
|
||||
|
@ -4,5 +4,4 @@ from freqtrade.configuration.check_exchange import check_exchange
|
||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||
from freqtrade.configuration.configuration import Configuration
|
||||
from freqtrade.configuration.PeriodicCache import PeriodicCache
|
||||
from freqtrade.configuration.timerange import TimeRange
|
||||
|
@ -85,7 +85,6 @@ def validate_config_consistency(conf: Dict[str, Any], preliminary: bool = False)
|
||||
_validate_unlimited_amount(conf)
|
||||
_validate_ask_orderbook(conf)
|
||||
validate_migrated_strategy_settings(conf)
|
||||
_validate_freqai(conf)
|
||||
|
||||
# validate configuration before returning
|
||||
logger.info('Validating configuration ...')
|
||||
@ -164,22 +163,6 @@ def _validate_edge(conf: Dict[str, Any]) -> None:
|
||||
)
|
||||
|
||||
|
||||
def _validate_freqai(conf: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Freqai param validator
|
||||
"""
|
||||
|
||||
if not conf.get('freqai', {}):
|
||||
return
|
||||
|
||||
for param in constants.SCHEMA_FREQAI_REQUIRED:
|
||||
if param not in conf.get('freqai', {}):
|
||||
if param not in conf.get('freqai', {}).get('feature_parameters', {}):
|
||||
raise OperationalException(
|
||||
f'{param} not found in Freqai config'
|
||||
)
|
||||
|
||||
|
||||
def _validate_whitelist(conf: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Dynamic whitelist does not require pair_whitelist to be set - however StaticWhitelist does.
|
||||
|
@ -241,6 +241,7 @@ CONF_SCHEMA = {
|
||||
},
|
||||
'exchange': {'$ref': '#/definitions/exchange'},
|
||||
'edge': {'$ref': '#/definitions/edge'},
|
||||
'freqai': {'$ref': '#/definitions/freqai'},
|
||||
'experimental': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
@ -318,6 +319,10 @@ CONF_SCHEMA = {
|
||||
'type': 'string',
|
||||
'enum': ['off', 'ohlc'],
|
||||
},
|
||||
'strategy_msg': {
|
||||
'type': 'string',
|
||||
'enum': TELEGRAM_SETTING_OPTIONS,
|
||||
},
|
||||
}
|
||||
},
|
||||
'reload': {'type': 'boolean'},
|
||||
@ -481,21 +486,31 @@ CONF_SCHEMA = {
|
||||
"freqai": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {"type": "boolean", "default": False},
|
||||
"keras": {"type": "boolean", "default": False},
|
||||
"conv_width": {"type": "integer", "default": 2},
|
||||
"train_period_days": {"type": "integer", "default": 0},
|
||||
"backtest_period_days": {"type": "float", "default": 7},
|
||||
"identifier": {"type": "str", "default": "example"},
|
||||
"backtest_period_days": {"type": "number", "default": 7},
|
||||
"identifier": {"type": "string", "default": "example"},
|
||||
"feature_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"include_corr_pairlist": {"type": "list"},
|
||||
"include_timeframes": {"type": "list"},
|
||||
"include_corr_pairlist": {"type": "array"},
|
||||
"include_timeframes": {"type": "array"},
|
||||
"label_period_candles": {"type": "integer"},
|
||||
"include_shifted_candles": {"type": "integer", "default": 0},
|
||||
"DI_threshold": {"type": "float", "default": 0},
|
||||
"DI_threshold": {"type": "number", "default": 0},
|
||||
"weight_factor": {"type": "number", "default": 0},
|
||||
"principal_component_analysis": {"type": "boolean", "default": False},
|
||||
"use_SVM_to_remove_outliers": {"type": "boolean", "default": False},
|
||||
"svm_params": {"type": "object",
|
||||
"properties": {
|
||||
"shuffle": {"type": "boolean", "default": False},
|
||||
"nu": {"type": "number", "default": 0.1}
|
||||
},
|
||||
}
|
||||
},
|
||||
"required": ["include_timeframes", "include_corr_pairlist", ]
|
||||
},
|
||||
"data_split_parameters": {
|
||||
"type": "object",
|
||||
@ -507,13 +522,19 @@ CONF_SCHEMA = {
|
||||
"model_training_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"n_estimators": {"type": "integer", "default": 2000},
|
||||
"random_state": {"type": "integer", "default": 1},
|
||||
"learning_rate": {"type": "number", "default": 0.02},
|
||||
"task_type": {"type": "string", "default": "CPU"},
|
||||
"n_estimators": {"type": "integer", "default": 1000}
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"enabled",
|
||||
"train_period_days",
|
||||
"backtest_period_days",
|
||||
"identifier",
|
||||
"feature_parameters",
|
||||
"data_split_parameters",
|
||||
"model_training_parameters"
|
||||
]
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -558,17 +579,6 @@ SCHEMA_MINIMAL_REQUIRED = [
|
||||
'dataformat_trades',
|
||||
]
|
||||
|
||||
SCHEMA_FREQAI_REQUIRED = [
|
||||
'include_timeframes',
|
||||
'train_period_days',
|
||||
'backtest_period_days',
|
||||
'identifier',
|
||||
'include_corr_pairlist',
|
||||
'feature_parameters',
|
||||
'data_split_parameters',
|
||||
'model_training_parameters'
|
||||
]
|
||||
|
||||
CANCEL_REASON = {
|
||||
"TIMEOUT": "cancelled due to timeout",
|
||||
"PARTIALLY_FILLED_KEEP_OPEN": "partially filled - keeping order open",
|
||||
|
@ -5,6 +5,7 @@ including ticker and orderbook data, live and historical candle (OHLCV) data
|
||||
Common Interface for bot and strategy to access data.
|
||||
"""
|
||||
import logging
|
||||
from collections import deque
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
@ -16,6 +17,7 @@ from freqtrade.data.history import load_pair_history
|
||||
from freqtrade.enums import CandleType, RunMode
|
||||
from freqtrade.exceptions import ExchangeError, OperationalException
|
||||
from freqtrade.exchange import Exchange, timeframe_to_seconds
|
||||
from freqtrade.util import PeriodicCache
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -33,6 +35,10 @@ class DataProvider:
|
||||
self.__cached_pairs: Dict[PairWithTimeframe, Tuple[DataFrame, datetime]] = {}
|
||||
self.__slice_index: Optional[int] = None
|
||||
self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {}
|
||||
self._msg_queue: deque = deque()
|
||||
|
||||
self.__msg_cache = PeriodicCache(
|
||||
maxsize=1000, ttl=timeframe_to_seconds(self._config.get('timeframe', '1h')))
|
||||
|
||||
def _set_dataframe_max_index(self, limit_index: int):
|
||||
"""
|
||||
@ -265,3 +271,20 @@ class DataProvider:
|
||||
if self._exchange is None:
|
||||
raise OperationalException(NO_EXCHANGE_EXCEPTION)
|
||||
return self._exchange.fetch_l2_order_book(pair, maximum)
|
||||
|
||||
def send_msg(self, message: str, *, always_send: bool = False) -> None:
|
||||
"""
|
||||
Send custom RPC Notifications from your bot.
|
||||
Will not send any bot in modes other than Dry-run or Live.
|
||||
:param message: Message to be sent. Must be below 4096.
|
||||
:param always_send: If False, will send the message only once per candle, and surpress
|
||||
identical messages.
|
||||
Careful as this can end up spaming your chat.
|
||||
Defaults to False
|
||||
"""
|
||||
if self.runmode not in (RunMode.DRY_RUN, RunMode.LIVE):
|
||||
return
|
||||
|
||||
if always_send or message not in self.__msg_cache:
|
||||
self._msg_queue.append(message)
|
||||
self.__msg_cache[message] = True
|
||||
|
@ -9,10 +9,12 @@ class ExitType(Enum):
|
||||
STOP_LOSS = "stop_loss"
|
||||
STOPLOSS_ON_EXCHANGE = "stoploss_on_exchange"
|
||||
TRAILING_STOP_LOSS = "trailing_stop_loss"
|
||||
LIQUIDATION = "liquidation"
|
||||
EXIT_SIGNAL = "exit_signal"
|
||||
FORCE_EXIT = "force_exit"
|
||||
EMERGENCY_EXIT = "emergency_exit"
|
||||
CUSTOM_EXIT = "custom_exit"
|
||||
PARTIAL_EXIT = "partial_exit"
|
||||
NONE = ""
|
||||
|
||||
def __str__(self):
|
||||
|
@ -17,6 +17,8 @@ class RPCMessageType(Enum):
|
||||
PROTECTION_TRIGGER = 'protection_trigger'
|
||||
PROTECTION_TRIGGER_GLOBAL = 'protection_trigger_global'
|
||||
|
||||
STRATEGY_MSG = 'strategy_msg'
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
||||
|
@ -16,7 +16,7 @@ import arrow
|
||||
import ccxt
|
||||
import ccxt.async_support as ccxt_async
|
||||
from cachetools import TTLCache
|
||||
from ccxt import ROUND_DOWN, ROUND_UP, TICK_SIZE, TRUNCATE, Precise, decimal_to_precision
|
||||
from ccxt import ROUND_DOWN, ROUND_UP, TICK_SIZE, TRUNCATE, decimal_to_precision
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BuySell,
|
||||
@ -32,6 +32,7 @@ from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, BAD_EXCHANGE
|
||||
retrier_async)
|
||||
from freqtrade.misc import chunks, deep_merge_dicts, safe_value_fallback2
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||
from freqtrade.util import FtPrecise
|
||||
|
||||
|
||||
CcxtModuleType = Any
|
||||
@ -708,10 +709,10 @@ class Exchange:
|
||||
# counting_mode=self.precisionMode,
|
||||
# ))
|
||||
if self.precisionMode == TICK_SIZE:
|
||||
precision = Precise(str(self.markets[pair]['precision']['price']))
|
||||
price_str = Precise(str(price))
|
||||
precision = FtPrecise(self.markets[pair]['precision']['price'])
|
||||
price_str = FtPrecise(price)
|
||||
missing = price_str % precision
|
||||
if not missing == Precise("0"):
|
||||
if not missing == FtPrecise("0"):
|
||||
price = round(float(str(price_str - missing + precision)), 14)
|
||||
else:
|
||||
symbol_prec = self.markets[pair]['precision']['price']
|
||||
@ -849,6 +850,7 @@ class Exchange:
|
||||
dry_order.update({
|
||||
'average': average,
|
||||
'filled': _amount,
|
||||
'remaining': 0.0,
|
||||
'cost': (dry_order['amount'] * average) / leverage
|
||||
})
|
||||
# market orders will always incurr taker fees
|
||||
@ -1332,11 +1334,19 @@ class Exchange:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
@retrier
|
||||
def fetch_positions(self) -> List[Dict]:
|
||||
def fetch_positions(self, pair: str = None) -> List[Dict]:
|
||||
"""
|
||||
Fetch positions from the exchange.
|
||||
If no pair is given, all positions are returned.
|
||||
:param pair: Pair for the query
|
||||
"""
|
||||
if self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES:
|
||||
return []
|
||||
try:
|
||||
positions: List[Dict] = self._api.fetch_positions()
|
||||
symbols = []
|
||||
if pair:
|
||||
symbols.append(pair)
|
||||
positions: List[Dict] = self._api.fetch_positions(symbols)
|
||||
self._log_exchange_response('fetch_positions', positions)
|
||||
return positions
|
||||
except ccxt.DDoSProtection as e:
|
||||
@ -1499,7 +1509,8 @@ class Exchange:
|
||||
return price_side
|
||||
|
||||
def get_rate(self, pair: str, refresh: bool,
|
||||
side: EntryExit, is_short: bool) -> float:
|
||||
side: EntryExit, is_short: bool,
|
||||
order_book: Optional[dict] = None, ticker: Optional[dict] = None) -> float:
|
||||
"""
|
||||
Calculates bid/ask target
|
||||
bid rate - between current ask price and last price
|
||||
@ -1531,6 +1542,7 @@ class Exchange:
|
||||
if conf_strategy.get('use_order_book', False):
|
||||
|
||||
order_book_top = conf_strategy.get('order_book_top', 1)
|
||||
if order_book is None:
|
||||
order_book = self.fetch_l2_order_book(pair, order_book_top)
|
||||
logger.debug('order_book %s', order_book)
|
||||
# top 1 = index 0
|
||||
@ -1538,14 +1550,15 @@ class Exchange:
|
||||
rate = order_book[f"{price_side}s"][order_book_top - 1][0]
|
||||
except (IndexError, KeyError) as e:
|
||||
logger.warning(
|
||||
f"{name} Price at location {order_book_top} from orderbook could not be "
|
||||
f"determined. Orderbook: {order_book}"
|
||||
f"{pair} - {name} Price at location {order_book_top} from orderbook "
|
||||
f"could not be determined. Orderbook: {order_book}"
|
||||
)
|
||||
raise PricingError from e
|
||||
logger.debug(f"{name} price from orderbook {price_side_word}"
|
||||
logger.debug(f"{pair} - {name} price from orderbook {price_side_word}"
|
||||
f"side - top {order_book_top} order book {side} rate {rate:.8f}")
|
||||
else:
|
||||
logger.debug(f"Using Last {price_side_word} / Last Price")
|
||||
if ticker is None:
|
||||
ticker = self.fetch_ticker(pair)
|
||||
ticker_rate = ticker[price_side]
|
||||
if ticker['last'] and ticker_rate:
|
||||
@ -1563,6 +1576,33 @@ class Exchange:
|
||||
|
||||
return rate
|
||||
|
||||
def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]:
|
||||
entry_rate = None
|
||||
exit_rate = None
|
||||
if not refresh:
|
||||
entry_rate = self._entry_rate_cache.get(pair)
|
||||
exit_rate = self._exit_rate_cache.get(pair)
|
||||
if entry_rate:
|
||||
logger.debug(f"Using cached buy rate for {pair}.")
|
||||
if exit_rate:
|
||||
logger.debug(f"Using cached sell rate for {pair}.")
|
||||
|
||||
entry_pricing = self._config.get('entry_pricing', {})
|
||||
exit_pricing = self._config.get('exit_pricing', {})
|
||||
order_book = ticker = None
|
||||
if not entry_rate and entry_pricing.get('use_order_book', False):
|
||||
order_book_top = max(entry_pricing.get('order_book_top', 1),
|
||||
exit_pricing.get('order_book_top', 1))
|
||||
order_book = self.fetch_l2_order_book(pair, order_book_top)
|
||||
entry_rate = self.get_rate(pair, refresh, 'entry', is_short, order_book=order_book)
|
||||
elif not entry_rate:
|
||||
ticker = self.fetch_ticker(pair)
|
||||
entry_rate = self.get_rate(pair, refresh, 'entry', is_short, ticker=ticker)
|
||||
if not exit_rate:
|
||||
exit_rate = self.get_rate(pair, refresh, 'exit',
|
||||
is_short, order_book=order_book, ticker=ticker)
|
||||
return entry_rate, exit_rate
|
||||
|
||||
# Fee handling
|
||||
|
||||
@retrier
|
||||
@ -2539,7 +2579,6 @@ class Exchange:
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
@retrier
|
||||
def get_or_calculate_liquidation_price(
|
||||
self,
|
||||
pair: str,
|
||||
@ -2573,20 +2612,12 @@ class Exchange:
|
||||
upnl_ex_1=upnl_ex_1
|
||||
)
|
||||
else:
|
||||
try:
|
||||
positions = self._api.fetch_positions([pair])
|
||||
positions = self.fetch_positions(pair)
|
||||
if len(positions) > 0:
|
||||
pos = positions[0]
|
||||
isolated_liq = pos['liquidationPrice']
|
||||
else:
|
||||
return None
|
||||
except ccxt.DDoSProtection as e:
|
||||
raise DDosProtection(e) from e
|
||||
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
||||
raise TemporaryError(
|
||||
f'Could not set margin mode due to {e.__class__.__name__}. Message: {e}') from e
|
||||
except ccxt.BaseError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
if isolated_liq:
|
||||
buffer_amount = abs(open_rate - isolated_liq) * self.liquidation_buffer
|
||||
|
@ -1,6 +1,6 @@
|
||||
""" FTX exchange subclass """
|
||||
import logging
|
||||
from typing import Any, Dict, List, Tuple
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import ccxt
|
||||
|
||||
@ -116,9 +116,17 @@ class Ftx(Exchange):
|
||||
if len(order) == 1:
|
||||
if order[0].get('status') == 'closed':
|
||||
# Trigger order was triggered ...
|
||||
real_order_id = order[0].get('info', {}).get('orderId')
|
||||
real_order_id: Optional[str] = order[0].get('info', {}).get('orderId')
|
||||
# OrderId may be None for stoploss-market orders
|
||||
# But contains "average" in these cases.
|
||||
# So we need to get it through the endpoint
|
||||
# /conditional_orders/{conditional_order_id}/triggers
|
||||
if not real_order_id:
|
||||
res = self._api.privateGetConditionalOrdersConditionalOrderIdTriggers(
|
||||
params={'conditional_order_id': order_id})
|
||||
self._log_exchange_response('fetch_stoploss_order2', res)
|
||||
real_order_id = res['result'][0]['orderId'] if res.get(
|
||||
'result', []) else None
|
||||
|
||||
if real_order_id:
|
||||
order1 = self._api.fetch_order(real_order_id, pair)
|
||||
self._log_exchange_response('fetch_stoploss_order1', order1)
|
||||
|
@ -5,13 +5,14 @@ import re
|
||||
import shutil
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Tuple
|
||||
from typing import Any, Dict, Tuple, TypedDict
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import rapidjson
|
||||
from joblib import dump, load
|
||||
from joblib.externals import cloudpickle
|
||||
from numpy.typing import ArrayLike
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
@ -24,6 +25,15 @@ from freqtrade.strategy.interface import IStrategy
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class pair_info(TypedDict):
|
||||
model_filename: str
|
||||
first: bool
|
||||
trained_timestamp: int
|
||||
priority: int
|
||||
data_path: str
|
||||
extras: dict
|
||||
|
||||
|
||||
class FreqaiDataDrawer:
|
||||
"""
|
||||
Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving
|
||||
@ -39,7 +49,7 @@ class FreqaiDataDrawer:
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming:
|
||||
Elin Törnquist @thorntwig
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
@ -54,14 +64,13 @@ class FreqaiDataDrawer:
|
||||
self.config = config
|
||||
self.freqai_info = config.get("freqai", {})
|
||||
# dictionary holding all pair metadata necessary to load in from disk
|
||||
self.pair_dict: Dict[str, Any] = {}
|
||||
self.pair_dict: Dict[str, pair_info] = {}
|
||||
# dictionary holding all actively inferenced models in memory given a model filename
|
||||
self.model_dictionary: Dict[str, Any] = {}
|
||||
self.model_return_values: Dict[str, Any] = {}
|
||||
self.pair_data_dict: Dict[str, Any] = {}
|
||||
self.historic_data: Dict[str, Any] = {}
|
||||
self.historic_predictions: Dict[str, Any] = {}
|
||||
self.follower_dict: Dict[str, Any] = {}
|
||||
self.model_return_values: Dict[str, DataFrame] = {}
|
||||
self.historic_data: Dict[str, Dict[str, DataFrame]] = {}
|
||||
self.historic_predictions: Dict[str, DataFrame] = {}
|
||||
self.follower_dict: Dict[str, pair_info] = {}
|
||||
self.full_path = full_path
|
||||
self.follower_name: str = self.config.get("bot_name", "follower1")
|
||||
self.follower_dict_path = Path(
|
||||
@ -76,6 +85,10 @@ class FreqaiDataDrawer:
|
||||
self.load_historic_predictions_from_disk()
|
||||
self.training_queue: Dict[str, int] = {}
|
||||
self.history_lock = threading.Lock()
|
||||
self.old_DBSCAN_eps: Dict[str, float] = {}
|
||||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
"priority": 1, "first": True, "data_path": "", "extras": {}}
|
||||
|
||||
def load_drawer_from_disk(self):
|
||||
"""
|
||||
@ -132,15 +145,17 @@ class FreqaiDataDrawer:
|
||||
"""
|
||||
Save data drawer full of all pair model metadata in present model folder.
|
||||
"""
|
||||
with open(self.pair_dictionary_path, "w") as fp:
|
||||
json.dump(self.pair_dict, fp, default=self.np_encoder)
|
||||
with open(self.pair_dictionary_path, 'w') as fp:
|
||||
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def save_follower_dict_to_disk(self):
|
||||
"""
|
||||
Save follower dictionary to disk (used by strategy for persistent prediction targets)
|
||||
"""
|
||||
with open(self.follower_dict_path, "w") as fp:
|
||||
json.dump(self.follower_dict, fp, default=self.np_encoder)
|
||||
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def create_follower_dict(self):
|
||||
"""
|
||||
@ -174,18 +189,19 @@ class FreqaiDataDrawer:
|
||||
trained_timestamp: int = the last time the coin was trained
|
||||
return_null_array: bool = Follower could not find pair metadata
|
||||
"""
|
||||
|
||||
pair_dict = self.pair_dict.get(pair)
|
||||
data_path_set = self.pair_dict.get(pair, {}).get("data_path", None)
|
||||
data_path_set = self.pair_dict.get(pair, self.empty_pair_dict).get("data_path", "")
|
||||
return_null_array = False
|
||||
|
||||
if pair_dict:
|
||||
model_filename = pair_dict["model_filename"]
|
||||
trained_timestamp = pair_dict["trained_timestamp"]
|
||||
elif not self.follow_mode:
|
||||
pair_dict = self.pair_dict[pair] = {}
|
||||
model_filename = pair_dict["model_filename"] = ""
|
||||
trained_timestamp = pair_dict["trained_timestamp"] = 0
|
||||
pair_dict["priority"] = len(self.pair_dict)
|
||||
self.pair_dict[pair] = self.empty_pair_dict.copy()
|
||||
model_filename = ""
|
||||
trained_timestamp = 0
|
||||
self.pair_dict[pair]["priority"] = len(self.pair_dict)
|
||||
|
||||
if not data_path_set and self.follow_mode:
|
||||
logger.warning(
|
||||
@ -204,11 +220,9 @@ class FreqaiDataDrawer:
|
||||
if pair_in_dict:
|
||||
return
|
||||
else:
|
||||
self.pair_dict[metadata["pair"]] = {}
|
||||
self.pair_dict[metadata["pair"]]["model_filename"] = ""
|
||||
self.pair_dict[metadata["pair"]]["first"] = True
|
||||
self.pair_dict[metadata["pair"]]["trained_timestamp"] = 0
|
||||
self.pair_dict[metadata["pair"]] = self.empty_pair_dict.copy()
|
||||
self.pair_dict[metadata["pair"]]["priority"] = len(self.pair_dict)
|
||||
|
||||
return
|
||||
|
||||
def pair_to_end_of_training_queue(self, pair: str) -> None:
|
||||
@ -225,25 +239,59 @@ class FreqaiDataDrawer:
|
||||
historical candles, and also stores historical predictions despite retrainings (so stored
|
||||
predictions are true predictions, not just inferencing on trained data)
|
||||
"""
|
||||
|
||||
# dynamic df returned to strategy and plotted in frequi
|
||||
mrv_df = self.model_return_values[pair] = pd.DataFrame()
|
||||
|
||||
for label in dk.label_list:
|
||||
# if user reused `identifier` in config and has historical predictions collected, load them
|
||||
# so that frequi remains uninterrupted after a crash
|
||||
hist_df = self.historic_predictions
|
||||
if pair in hist_df:
|
||||
len_diff = len(hist_df[pair].index) - len(pred_df.index)
|
||||
if len_diff < 0:
|
||||
df_concat = pd.concat([pred_df.iloc[:abs(len_diff)], hist_df[pair]],
|
||||
ignore_index=True, keys=hist_df[pair].keys())
|
||||
else:
|
||||
df_concat = hist_df[pair].tail(len(pred_df.index)).reset_index(drop=True)
|
||||
df_concat = df_concat.fillna(0)
|
||||
self.model_return_values[pair] = df_concat
|
||||
logger.info(f'Setting initial FreqUI plots from historical data for {pair}.')
|
||||
|
||||
else:
|
||||
for label in pred_df.columns:
|
||||
mrv_df[label] = pred_df[label]
|
||||
if mrv_df[label].dtype == object:
|
||||
continue
|
||||
mrv_df[f"{label}_mean"] = dk.data["labels_mean"][label]
|
||||
mrv_df[f"{label}_std"] = dk.data["labels_std"][label]
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0) > 0:
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
mrv_df["DI_values"] = dk.DI_values
|
||||
|
||||
mrv_df["do_predict"] = do_preds
|
||||
|
||||
def append_model_predictions(self, pair: str, predictions: DataFrame, do_preds: ArrayLike,
|
||||
if dk.data['extra_returns_per_train']:
|
||||
rets = dk.data['extra_returns_per_train']
|
||||
for return_str in rets:
|
||||
mrv_df[return_str] = rets[return_str]
|
||||
|
||||
# for keras type models, the conv_window needs to be prepended so
|
||||
# viewing is correct in frequi
|
||||
if self.freqai_info.get('keras', False):
|
||||
n_lost_points = self.freqai_info.get('conv_width', 2)
|
||||
zeros_df = DataFrame(np.zeros((n_lost_points, len(mrv_df.columns))),
|
||||
columns=mrv_df.columns)
|
||||
self.model_return_values[pair] = pd.concat(
|
||||
[zeros_df, mrv_df], axis=0, ignore_index=True)
|
||||
|
||||
def append_model_predictions(self, pair: str, predictions: DataFrame,
|
||||
do_preds: NDArray[np.int_],
|
||||
dk: FreqaiDataKitchen, len_df: int) -> None:
|
||||
|
||||
# strat seems to feed us variable sized dataframes - and since we are trying to build our
|
||||
# own return array in the same shape, we need to figure out how the size has changed
|
||||
# and adapt our stored/returned info accordingly.
|
||||
|
||||
length_difference = len(self.model_return_values[pair]) - len_df
|
||||
i = 0
|
||||
|
||||
@ -262,19 +310,28 @@ class FreqaiDataDrawer:
|
||||
hp_df = pd.concat([hp_df, nan_df], ignore_index=True, axis=0)
|
||||
self.historic_predictions[pair] = hp_df[:-1]
|
||||
|
||||
for label in dk.label_list:
|
||||
# incase user adds additional "predictions" e.g. predict_proba output:
|
||||
for label in predictions.columns:
|
||||
df[label].iloc[-1] = predictions[label].iloc[-1]
|
||||
if df[label].dtype == object:
|
||||
continue
|
||||
df[f"{label}_mean"].iloc[-1] = dk.data["labels_mean"][label]
|
||||
df[f"{label}_std"].iloc[-1] = dk.data["labels_std"][label]
|
||||
# df['prediction'].iloc[-1] = predictions[-1]
|
||||
|
||||
df["do_predict"].iloc[-1] = do_preds[-1]
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0) > 0:
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
df["DI_values"].iloc[-1] = dk.DI_values[-1]
|
||||
|
||||
if dk.data['extra_returns_per_train']:
|
||||
rets = dk.data['extra_returns_per_train']
|
||||
for return_str in rets:
|
||||
df[return_str].iloc[-1] = rets[return_str]
|
||||
|
||||
# append the new predictions to persistent storage
|
||||
if pair in self.historic_predictions:
|
||||
self.historic_predictions[pair].iloc[-1] = df[label].iloc[-1]
|
||||
for key in df.keys():
|
||||
self.historic_predictions[pair][key].iloc[-1] = df[key].iloc[-1]
|
||||
|
||||
if length_difference < 0:
|
||||
prepend_df = pd.DataFrame(
|
||||
@ -301,16 +358,25 @@ class FreqaiDataDrawer:
|
||||
|
||||
dk.find_features(dataframe)
|
||||
|
||||
for label in dk.label_list:
|
||||
if self.freqai_info.get('predict_proba', []):
|
||||
full_labels = dk.label_list + self.freqai_info['predict_proba']
|
||||
else:
|
||||
full_labels = dk.label_list
|
||||
|
||||
for label in full_labels:
|
||||
dataframe[label] = 0
|
||||
dataframe[f"{label}_mean"] = 0
|
||||
dataframe[f"{label}_std"] = 0
|
||||
|
||||
# dataframe['prediction'] = 0
|
||||
dataframe["do_predict"] = 0
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0) > 0:
|
||||
dataframe["DI_value"] = 0
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
dataframe["DI_values"] = 0
|
||||
|
||||
if dk.data['extra_returns_per_train']:
|
||||
rets = dk.data['extra_returns_per_train']
|
||||
for return_str in rets:
|
||||
dataframe[return_str] = 0
|
||||
|
||||
dk.return_dataframe = dataframe
|
||||
|
||||
@ -379,24 +445,28 @@ class FreqaiDataDrawer:
|
||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||
|
||||
if dk.svm_model is not None:
|
||||
dump(dk.svm_model, save_path / str(dk.model_filename + "_svm_model.joblib"))
|
||||
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
||||
dk.data["data_path"] = str(dk.data_path)
|
||||
dk.data["model_filename"] = str(dk.model_filename)
|
||||
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
|
||||
dk.data["label_list"] = dk.label_list
|
||||
# store the metadata
|
||||
with open(save_path / str(dk.model_filename + "_metadata.json"), "w") as fp:
|
||||
json.dump(dk.data, fp, default=dk.np_encoder)
|
||||
with open(save_path / f"{dk.model_filename}_metadata.json", "w") as fp:
|
||||
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
# save the train data to file so we can check preds for area of applicability later
|
||||
dk.data_dictionary["train_features"].to_pickle(
|
||||
save_path / str(dk.model_filename + "_trained_df.pkl")
|
||||
save_path / f"{dk.model_filename}_trained_df.pkl"
|
||||
)
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("principal_component_analysis"):
|
||||
dk.data_dictionary["train_dates"].to_pickle(
|
||||
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
|
||||
)
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
|
||||
cloudpickle.dump(
|
||||
dk.pca, open(dk.data_path / str(dk.model_filename + "_pca_object.pkl"), "wb")
|
||||
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
||||
)
|
||||
|
||||
# if self.live:
|
||||
@ -429,27 +499,27 @@ class FreqaiDataDrawer:
|
||||
/ dk.data_path.parts[-1]
|
||||
)
|
||||
|
||||
with open(dk.data_path / str(dk.model_filename + "_metadata.json"), "r") as fp:
|
||||
with open(dk.data_path / f"{dk.model_filename}_metadata.json", "r") as fp:
|
||||
dk.data = json.load(fp)
|
||||
dk.training_features_list = dk.data["training_features_list"]
|
||||
dk.label_list = dk.data["label_list"]
|
||||
|
||||
dk.data_dictionary["train_features"] = pd.read_pickle(
|
||||
dk.data_path / str(dk.model_filename + "_trained_df.pkl")
|
||||
dk.data_path / f"{dk.model_filename}_trained_df.pkl"
|
||||
)
|
||||
|
||||
# try to access model in memory instead of loading object from disk to save time
|
||||
if dk.live and dk.model_filename in self.model_dictionary:
|
||||
model = self.model_dictionary[dk.model_filename]
|
||||
elif not dk.keras:
|
||||
model = load(dk.data_path / str(dk.model_filename + "_model.joblib"))
|
||||
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
||||
else:
|
||||
from tensorflow import keras
|
||||
|
||||
model = keras.models.load_model(dk.data_path / str(dk.model_filename + "_model.h5"))
|
||||
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
||||
|
||||
if Path(dk.data_path / str(dk.model_filename + "_svm_model.joblib")).resolve().exists():
|
||||
dk.svm_model = load(dk.data_path / str(dk.model_filename + "_svm_model.joblib"))
|
||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
||||
if not model:
|
||||
raise OperationalException(
|
||||
@ -458,7 +528,7 @@ class FreqaiDataDrawer:
|
||||
|
||||
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
||||
dk.pca = cloudpickle.load(
|
||||
open(dk.data_path / str(dk.model_filename + "_pca_object.pkl"), "rb")
|
||||
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
||||
)
|
||||
|
||||
return model
|
||||
@ -471,7 +541,7 @@ class FreqaiDataDrawer:
|
||||
:params:
|
||||
dataframe: DataFrame = strategy provided dataframe
|
||||
"""
|
||||
feat_params = self.freqai_info.get("feature_parameters", {})
|
||||
feat_params = self.freqai_info["feature_parameters"]
|
||||
with self.history_lock:
|
||||
history_data = self.historic_data
|
||||
|
||||
@ -524,7 +594,7 @@ class FreqaiDataDrawer:
|
||||
for pair in dk.all_pairs:
|
||||
if pair not in history_data:
|
||||
history_data[pair] = {}
|
||||
for tf in self.freqai_info.get("feature_parameters", {}).get("include_timeframes"):
|
||||
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
||||
history_data[pair][tf] = load_pair_history(
|
||||
datadir=self.config["datadir"],
|
||||
timeframe=tf,
|
||||
@ -550,11 +620,11 @@ class FreqaiDataDrawer:
|
||||
corr_dataframes: Dict[Any, Any] = {}
|
||||
base_dataframes: Dict[Any, Any] = {}
|
||||
historic_data = self.historic_data
|
||||
pairs = self.freqai_info.get("feature_parameters", {}).get(
|
||||
pairs = self.freqai_info["feature_parameters"].get(
|
||||
"include_corr_pairlist", []
|
||||
)
|
||||
|
||||
for tf in self.freqai_info.get("feature_parameters", {}).get("include_timeframes"):
|
||||
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
||||
base_dataframes[tf] = dk.slice_dataframe(timerange, historic_data[pair][tf])
|
||||
if pairs:
|
||||
for p in pairs:
|
||||
|
@ -10,13 +10,16 @@ import numpy.typing as npt
|
||||
import pandas as pd
|
||||
from pandas import DataFrame
|
||||
from sklearn import linear_model
|
||||
from sklearn.cluster import DBSCAN
|
||||
from sklearn.metrics.pairwise import pairwise_distances
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.neighbors import NearestNeighbors
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.resolvers import ExchangeResolver
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.strategy.interface import IStrategy
|
||||
|
||||
|
||||
@ -39,7 +42,7 @@ class FreqaiDataKitchen:
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming:
|
||||
Elin Törnquist @thorntwig
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
@ -55,10 +58,10 @@ class FreqaiDataKitchen:
|
||||
live: bool = False,
|
||||
pair: str = "",
|
||||
):
|
||||
self.data: Dict[Any, Any] = {}
|
||||
self.data_dictionary: Dict[Any, Any] = {}
|
||||
self.data: Dict[str, Any] = {}
|
||||
self.data_dictionary: Dict[str, DataFrame] = {}
|
||||
self.config = config
|
||||
self.freqai_config = config["freqai"]
|
||||
self.freqai_config: Dict[str, Any] = config["freqai"]
|
||||
self.full_df: DataFrame = DataFrame()
|
||||
self.append_df: DataFrame = DataFrame()
|
||||
self.data_path = Path()
|
||||
@ -68,14 +71,14 @@ class FreqaiDataKitchen:
|
||||
self.live = live
|
||||
self.pair = pair
|
||||
self.svm_model: linear_model.SGDOneClassSVM = None
|
||||
self.keras = self.freqai_config.get("keras", False)
|
||||
self.keras: bool = self.freqai_config.get("keras", False)
|
||||
self.set_all_pairs()
|
||||
if not self.live:
|
||||
if not self.config["timerange"]:
|
||||
raise OperationalException(
|
||||
'Please pass --timerange if you intend to use FreqAI for backtesting.')
|
||||
self.full_timerange = self.create_fulltimerange(
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days")
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days", 0)
|
||||
)
|
||||
|
||||
(self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(
|
||||
@ -84,6 +87,10 @@ class FreqaiDataKitchen:
|
||||
config["freqai"]["backtest_period_days"],
|
||||
)
|
||||
|
||||
self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})
|
||||
self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1)
|
||||
self.train_dates: DataFrame = pd.DataFrame()
|
||||
|
||||
def set_paths(
|
||||
self,
|
||||
pair: str,
|
||||
@ -101,7 +108,7 @@ class FreqaiDataKitchen:
|
||||
|
||||
self.data_path = Path(
|
||||
self.full_path
|
||||
/ str("sub-train" + "-" + pair.split("/")[0] + "_" + str(trained_timestamp))
|
||||
/ f"sub-train-{pair.split('/')[0]}_{trained_timestamp}"
|
||||
)
|
||||
|
||||
return
|
||||
@ -116,7 +123,7 @@ class FreqaiDataKitchen:
|
||||
:filtered_dataframe: cleaned dataframe ready to be split.
|
||||
:labels: cleaned labels ready to be split.
|
||||
"""
|
||||
feat_dict = self.freqai_config.get("feature_parameters", {})
|
||||
feat_dict = self.freqai_config["feature_parameters"]
|
||||
|
||||
weights: npt.ArrayLike
|
||||
if feat_dict.get("weight_factor", 0) > 0:
|
||||
@ -188,20 +195,23 @@ class FreqaiDataKitchen:
|
||||
|
||||
drop_index = pd.isnull(filtered_dataframe).any(1) # get the rows that have NaNs,
|
||||
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
|
||||
if (
|
||||
training_filter
|
||||
): # we don't care about total row number (total no. datapoints) in training, we only care
|
||||
if (training_filter):
|
||||
# we don't care about total row number (total no. datapoints) in training, we only care
|
||||
# about removing any row with NaNs
|
||||
# if labels has multiple columns (user wants to train multiple models), we detect here
|
||||
# if labels has multiple columns (user wants to train multiple modelEs), we detect here
|
||||
labels = unfiltered_dataframe.filter(label_list, axis=1)
|
||||
drop_index_labels = pd.isnull(labels).any(1)
|
||||
drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)
|
||||
dates = unfiltered_dataframe['date']
|
||||
filtered_dataframe = filtered_dataframe[
|
||||
(drop_index == 0) & (drop_index_labels == 0)
|
||||
] # dropping values
|
||||
labels = labels[
|
||||
(drop_index == 0) & (drop_index_labels == 0)
|
||||
] # assuming the labels depend entirely on the dataframe here.
|
||||
self.train_dates = dates[
|
||||
(drop_index == 0) & (drop_index_labels == 0)
|
||||
]
|
||||
logger.info(
|
||||
f"dropped {len(unfiltered_dataframe) - len(filtered_dataframe)} training points"
|
||||
f" due to NaNs in populated dataset {len(unfiltered_dataframe)}."
|
||||
@ -252,6 +262,7 @@ class FreqaiDataKitchen:
|
||||
"test_labels": test_labels,
|
||||
"train_weights": train_weights,
|
||||
"test_weights": test_weights,
|
||||
"train_dates": self.train_dates
|
||||
}
|
||||
|
||||
return self.data_dictionary
|
||||
@ -279,7 +290,7 @@ class FreqaiDataKitchen:
|
||||
self.data[item + "_min"] = train_min[item]
|
||||
|
||||
for item in data_dictionary["train_labels"].keys():
|
||||
if data_dictionary["train_labels"][item].dtype == str:
|
||||
if data_dictionary["train_labels"][item].dtype == object:
|
||||
continue
|
||||
train_labels_max = data_dictionary["train_labels"][item].max()
|
||||
train_labels_min = data_dictionary["train_labels"][item].min()
|
||||
@ -305,8 +316,7 @@ class FreqaiDataKitchen:
|
||||
"""
|
||||
Normalize a set of data using the mean and standard deviation from
|
||||
the associated training data.
|
||||
:params:
|
||||
:df: Dataframe to be standardized
|
||||
:param df: Dataframe to be standardized
|
||||
"""
|
||||
|
||||
for item in df.keys():
|
||||
@ -323,12 +333,11 @@ class FreqaiDataKitchen:
|
||||
"""
|
||||
Normalize a set of data using the mean and standard deviation from
|
||||
the associated training data.
|
||||
:params:
|
||||
:df: Dataframe of predictions to be denormalized
|
||||
:param df: Dataframe of predictions to be denormalized
|
||||
"""
|
||||
|
||||
for label in self.label_list:
|
||||
if df[label].dtype == str:
|
||||
for label in df.columns:
|
||||
if df[label].dtype == object:
|
||||
continue
|
||||
df[label] = (
|
||||
(df[label] + 1)
|
||||
@ -339,7 +348,7 @@ class FreqaiDataKitchen:
|
||||
return df
|
||||
|
||||
def split_timerange(
|
||||
self, tr: str, train_split: int = 28, bt_split: int = 7
|
||||
self, tr: str, train_split: int = 28, bt_split: float = 7
|
||||
) -> Tuple[list, list]:
|
||||
"""
|
||||
Function which takes a single time range (tr) and splits it
|
||||
@ -347,12 +356,12 @@ class FreqaiDataKitchen:
|
||||
tr: str, full timerange to train on
|
||||
train_split: the period length for the each training (days). Specified in user
|
||||
configuration file
|
||||
bt_split: the backtesting length (dats). Specified in user configuration file
|
||||
bt_split: the backtesting length (days). Specified in user configuration file
|
||||
"""
|
||||
|
||||
if not isinstance(train_split, int) or train_split < 1:
|
||||
raise OperationalException(
|
||||
"train_period_days must be an integer greater than 0. " f"Got {train_split}."
|
||||
f"train_period_days must be an integer greater than 0. Got {train_split}."
|
||||
)
|
||||
train_period_days = train_split * SECONDS_IN_DAY
|
||||
bt_period = bt_split * SECONDS_IN_DAY
|
||||
@ -374,7 +383,7 @@ class FreqaiDataKitchen:
|
||||
|
||||
while True:
|
||||
if not first:
|
||||
timerange_train.startts = timerange_train.startts + bt_period
|
||||
timerange_train.startts = timerange_train.startts + int(bt_period)
|
||||
timerange_train.stopts = timerange_train.startts + train_period_days
|
||||
|
||||
first = False
|
||||
@ -387,7 +396,7 @@ class FreqaiDataKitchen:
|
||||
|
||||
timerange_backtest.startts = timerange_train.stopts
|
||||
|
||||
timerange_backtest.stopts = timerange_backtest.startts + bt_period
|
||||
timerange_backtest.stopts = timerange_backtest.startts + int(bt_period)
|
||||
|
||||
if timerange_backtest.stopts > config_timerange.stopts:
|
||||
timerange_backtest.stopts = config_timerange.stopts
|
||||
@ -408,9 +417,8 @@ class FreqaiDataKitchen:
|
||||
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Given a full dataframe, extract the user desired window
|
||||
:params:
|
||||
:tr: timerange string that we wish to extract from df
|
||||
:df: Dataframe containing all candles to run the entire backtest. Here
|
||||
:param tr: timerange string that we wish to extract from df
|
||||
:param df: Dataframe containing all candles to run the entire backtest. Here
|
||||
it is sliced down to just the present training period.
|
||||
"""
|
||||
|
||||
@ -489,11 +497,10 @@ class FreqaiDataKitchen:
|
||||
point. This metric defines the neighborhood of trained data and is used
|
||||
for prediction confidence in the Dissimilarity Index
|
||||
"""
|
||||
logger.info("computing average mean distance for all training points")
|
||||
tc = self.freqai_config.get("model_training_parameters", {}).get("thread_count", -1)
|
||||
pairwise = pairwise_distances(self.data_dictionary["train_features"], n_jobs=tc)
|
||||
# logger.info("computing average mean distance for all training points")
|
||||
pairwise = pairwise_distances(
|
||||
self.data_dictionary["train_features"], n_jobs=self.thread_count)
|
||||
avg_mean_dist = pairwise.mean(axis=1).mean()
|
||||
logger.info(f"avg_mean_dist {avg_mean_dist:.2f}")
|
||||
|
||||
return avg_mean_dist
|
||||
|
||||
@ -515,21 +522,22 @@ class FreqaiDataKitchen:
|
||||
return
|
||||
|
||||
if predict:
|
||||
assert self.svm_model, "No svm model available for outlier removal"
|
||||
if not self.svm_model:
|
||||
logger.warning("No svm model available for outlier removal")
|
||||
return
|
||||
y_pred = self.svm_model.predict(self.data_dictionary["prediction_features"])
|
||||
do_predict = np.where(y_pred == -1, 0, y_pred)
|
||||
|
||||
if (len(do_predict) - do_predict.sum()) > 0:
|
||||
logger.info(
|
||||
f"svm_remove_outliers() tossed {len(do_predict) - do_predict.sum()} predictions"
|
||||
)
|
||||
logger.info(f"SVM tossed {len(do_predict) - do_predict.sum()} predictions.")
|
||||
self.do_predict += do_predict
|
||||
self.do_predict -= 1
|
||||
|
||||
else:
|
||||
# use SGDOneClassSVM to increase speed?
|
||||
nu = self.freqai_config.get("feature_parameters", {}).get("svm_nu", 0.2)
|
||||
self.svm_model = linear_model.SGDOneClassSVM(nu=nu).fit(
|
||||
svm_params = self.freqai_config["feature_parameters"].get(
|
||||
"svm_params", {"shuffle": False, "nu": 0.1})
|
||||
self.svm_model = linear_model.SGDOneClassSVM(**svm_params).fit(
|
||||
self.data_dictionary["train_features"]
|
||||
)
|
||||
y_pred = self.svm_model.predict(self.data_dictionary["train_features"])
|
||||
@ -546,12 +554,14 @@ class FreqaiDataKitchen:
|
||||
]
|
||||
|
||||
logger.info(
|
||||
f"svm_remove_outliers() tossed {len(y_pred) - dropped_points.sum()}"
|
||||
f" train points from {len(y_pred)}"
|
||||
f"SVM tossed {len(y_pred) - dropped_points.sum()}"
|
||||
f" train points from {len(y_pred)} total points."
|
||||
)
|
||||
|
||||
# same for test data
|
||||
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
# TODO: This (and the part above) could be refactored into a separate function
|
||||
# to reduce code duplication
|
||||
if self.freqai_config['data_split_parameters'].get('test_size', 0.1) != 0:
|
||||
y_pred = self.svm_model.predict(self.data_dictionary["test_features"])
|
||||
dropped_points = np.where(y_pred == -1, 0, y_pred)
|
||||
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
|
||||
@ -564,8 +574,77 @@ class FreqaiDataKitchen:
|
||||
]
|
||||
|
||||
logger.info(
|
||||
f"svm_remove_outliers() tossed {len(y_pred) - dropped_points.sum()}"
|
||||
f" test points from {len(y_pred)}"
|
||||
f"SVM tossed {len(y_pred) - dropped_points.sum()}"
|
||||
f" test points from {len(y_pred)} total points."
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
def use_DBSCAN_to_remove_outliers(self, predict: bool, eps=None) -> None:
|
||||
"""
|
||||
Use DBSCAN to cluster training data and remove "noisy" data (read outliers).
|
||||
User controls this via the config param `DBSCAN_outlier_pct` which indicates the
|
||||
pct of training data that they want to be considered outliers.
|
||||
:params:
|
||||
predict: bool = If False (training), iterate to find the best hyper parameters to match
|
||||
user requested outlier percent target. If True (prediction), use the parameters
|
||||
determined from the previous training to estimate if the current prediction point
|
||||
is an outlier.
|
||||
"""
|
||||
|
||||
if predict:
|
||||
train_ft_df = self.data_dictionary['train_features']
|
||||
pred_ft_df = self.data_dictionary['prediction_features']
|
||||
num_preds = len(pred_ft_df)
|
||||
df = pd.concat([train_ft_df, pred_ft_df], axis=0, ignore_index=True)
|
||||
clustering = DBSCAN(eps=self.data['DBSCAN_eps'],
|
||||
min_samples=self.data['DBSCAN_min_samples'],
|
||||
n_jobs=self.thread_count
|
||||
).fit(df)
|
||||
do_predict = np.where(clustering.labels_[-num_preds:] == -1, 0, 1)
|
||||
|
||||
if (len(do_predict) - do_predict.sum()) > 0:
|
||||
logger.info(f"DBSCAN tossed {len(do_predict) - do_predict.sum()} predictions")
|
||||
self.do_predict += do_predict
|
||||
self.do_predict -= 1
|
||||
|
||||
else:
|
||||
|
||||
MinPts = len(self.data_dictionary['train_features'].columns) * 2
|
||||
# measure pairwise distances to train_features.shape[1]*2 nearest neighbours
|
||||
neighbors = NearestNeighbors(
|
||||
n_neighbors=MinPts, n_jobs=self.thread_count)
|
||||
neighbors_fit = neighbors.fit(self.data_dictionary['train_features'])
|
||||
distances, _ = neighbors_fit.kneighbors(self.data_dictionary['train_features'])
|
||||
distances = np.sort(distances, axis=0)
|
||||
index_ten_pct = int(len(distances[:, 1]) * 0.1)
|
||||
distances = distances[index_ten_pct:, 1]
|
||||
epsilon = distances[-1]
|
||||
|
||||
clustering = DBSCAN(eps=epsilon, min_samples=MinPts,
|
||||
n_jobs=int(self.thread_count)).fit(
|
||||
self.data_dictionary['train_features']
|
||||
)
|
||||
|
||||
logger.info(f'DBSCAN found eps of {epsilon}.')
|
||||
|
||||
self.data['DBSCAN_eps'] = epsilon
|
||||
self.data['DBSCAN_min_samples'] = MinPts
|
||||
dropped_points = np.where(clustering.labels_ == -1, 1, 0)
|
||||
|
||||
self.data_dictionary['train_features'] = self.data_dictionary['train_features'][
|
||||
(clustering.labels_ != -1)
|
||||
]
|
||||
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
|
||||
(clustering.labels_ != -1)
|
||||
]
|
||||
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
|
||||
(clustering.labels_ != -1)
|
||||
]
|
||||
|
||||
logger.info(
|
||||
f"DBSCAN tossed {dropped_points.sum()}"
|
||||
f" train points from {len(clustering.labels_)}"
|
||||
)
|
||||
|
||||
return
|
||||
@ -573,9 +652,8 @@ class FreqaiDataKitchen:
|
||||
def find_features(self, dataframe: DataFrame) -> None:
|
||||
"""
|
||||
Find features in the strategy provided dataframe
|
||||
:params:
|
||||
dataframe: DataFrame = strategy provided dataframe
|
||||
:returns:
|
||||
:param dataframe: DataFrame = strategy provided dataframe
|
||||
:return:
|
||||
features: list = the features to be used for training/prediction
|
||||
"""
|
||||
column_names = dataframe.columns
|
||||
@ -586,7 +664,6 @@ class FreqaiDataKitchen:
|
||||
|
||||
self.training_features_list = features
|
||||
self.label_list = labels
|
||||
# return features, labels
|
||||
|
||||
def check_if_pred_in_training_spaces(self) -> None:
|
||||
"""
|
||||
@ -599,13 +676,13 @@ class FreqaiDataKitchen:
|
||||
distance = pairwise_distances(
|
||||
self.data_dictionary["train_features"],
|
||||
self.data_dictionary["prediction_features"],
|
||||
n_jobs=-1,
|
||||
n_jobs=self.thread_count,
|
||||
)
|
||||
|
||||
self.DI_values = distance.min(axis=0) / self.data["avg_mean_dist"]
|
||||
|
||||
do_predict = np.where(
|
||||
self.DI_values < self.freqai_config.get("feature_parameters", {}).get("DI_threshold"),
|
||||
self.DI_values < self.freqai_config["feature_parameters"]["DI_threshold"],
|
||||
1,
|
||||
0,
|
||||
)
|
||||
@ -628,25 +705,27 @@ class FreqaiDataKitchen:
|
||||
weights = np.exp(-np.arange(num_weights) / (wfactor * num_weights))[::-1]
|
||||
return weights
|
||||
|
||||
def append_predictions(self, predictions, do_predict, len_dataframe):
|
||||
def append_predictions(self, predictions: DataFrame, do_predict: npt.ArrayLike) -> None:
|
||||
"""
|
||||
Append backtest prediction from current backtest period to all previous periods
|
||||
"""
|
||||
|
||||
self.append_df = DataFrame()
|
||||
for label in self.label_list:
|
||||
self.append_df[label] = predictions[label]
|
||||
self.append_df[f"{label}_mean"] = self.data["labels_mean"][label]
|
||||
self.append_df[f"{label}_std"] = self.data["labels_std"][label]
|
||||
append_df = DataFrame()
|
||||
for label in predictions.columns:
|
||||
append_df[label] = predictions[label]
|
||||
if append_df[label].dtype == object:
|
||||
continue
|
||||
append_df[f"{label}_mean"] = self.data["labels_mean"][label]
|
||||
append_df[f"{label}_std"] = self.data["labels_std"][label]
|
||||
|
||||
self.append_df["do_predict"] = do_predict
|
||||
if self.freqai_config.get("feature_parameters", {}).get("DI_threshold", 0) > 0:
|
||||
self.append_df["DI_values"] = self.DI_values
|
||||
append_df["do_predict"] = do_predict
|
||||
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
append_df["DI_values"] = self.DI_values
|
||||
|
||||
if self.full_df.empty:
|
||||
self.full_df = self.append_df
|
||||
self.full_df = append_df
|
||||
else:
|
||||
self.full_df = pd.concat([self.full_df, self.append_df], axis=0)
|
||||
self.full_df = pd.concat([self.full_df, append_df], axis=0)
|
||||
|
||||
return
|
||||
|
||||
@ -666,7 +745,6 @@ class FreqaiDataKitchen:
|
||||
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
||||
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
||||
|
||||
self.append_df = DataFrame()
|
||||
self.full_df = DataFrame()
|
||||
|
||||
return
|
||||
@ -683,7 +761,7 @@ class FreqaiDataKitchen:
|
||||
|
||||
if backtest_timerange.stopts == 0:
|
||||
# typically open ended time ranges do work, however, there are some edge cases where
|
||||
# it does not. accomodating these kinds of edge cases just to allow open-ended
|
||||
# it does not. accommodating these kinds of edge cases just to allow open-ended
|
||||
# timerange is not high enough priority to warrant the effort. It is safer for now
|
||||
# to simply ask user to add their end date
|
||||
raise OperationalException("FreqAI backtesting does not allow open ended timeranges. "
|
||||
@ -701,7 +779,7 @@ class FreqaiDataKitchen:
|
||||
full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")
|
||||
|
||||
self.full_path = Path(
|
||||
self.config["user_data_dir"] / "models" / str(self.freqai_config.get("identifier"))
|
||||
self.config["user_data_dir"] / "models" / f"{self.freqai_config['identifier']}"
|
||||
)
|
||||
|
||||
config_path = Path(self.config["config_files"][0])
|
||||
@ -719,9 +797,8 @@ class FreqaiDataKitchen:
|
||||
"""
|
||||
A model age checker to determine if the model is trustworthy based on user defined
|
||||
`expiration_hours` in the configuration file.
|
||||
:params:
|
||||
trained_timestamp: int = The time of training for the most recent model.
|
||||
:returns:
|
||||
:param trained_timestamp: int = The time of training for the most recent model.
|
||||
:return:
|
||||
bool = If the model is expired or not.
|
||||
"""
|
||||
time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
||||
@ -740,30 +817,21 @@ class FreqaiDataKitchen:
|
||||
trained_timerange = TimeRange()
|
||||
data_load_timerange = TimeRange()
|
||||
|
||||
# find the max indicator length required
|
||||
max_timeframe_chars = self.freqai_config.get("feature_parameters", {}).get(
|
||||
"include_timeframes"
|
||||
)[-1]
|
||||
max_period = self.freqai_config.get("feature_parameters", {}).get(
|
||||
"indicator_max_period_candles", 50
|
||||
)
|
||||
additional_seconds = 0
|
||||
if max_timeframe_chars[-1] == "d":
|
||||
additional_seconds = max_period * SECONDS_IN_DAY * int(max_timeframe_chars[-2])
|
||||
elif max_timeframe_chars[-1] == "h":
|
||||
additional_seconds = max_period * 3600 * int(max_timeframe_chars[-2])
|
||||
elif max_timeframe_chars[-1] == "m":
|
||||
if len(max_timeframe_chars) == 2:
|
||||
additional_seconds = max_period * 60 * int(max_timeframe_chars[-2])
|
||||
elif len(max_timeframe_chars) == 3:
|
||||
additional_seconds = max_period * 60 * int(float(max_timeframe_chars[0:2]))
|
||||
else:
|
||||
logger.warning(
|
||||
"FreqAI could not detect max timeframe and therefore may not "
|
||||
"download the proper amount of data for training"
|
||||
)
|
||||
timeframes = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||
|
||||
# logger.info(f'Extending data download by {additional_seconds/SECONDS_IN_DAY:.2f} days')
|
||||
max_tf_seconds = 0
|
||||
for tf in timeframes:
|
||||
secs = timeframe_to_seconds(tf)
|
||||
if secs > max_tf_seconds:
|
||||
max_tf_seconds = secs
|
||||
|
||||
# We notice that users like to use exotic indicators where
|
||||
# they do not know the required timeperiod. Here we include a factor
|
||||
# of safety by multiplying the user considered "max" by 2.
|
||||
max_period = self.freqai_config["feature_parameters"].get(
|
||||
"indicator_max_period_candles", 20
|
||||
) * 2
|
||||
additional_seconds = max_period * max_tf_seconds
|
||||
|
||||
if trained_timestamp != 0:
|
||||
elapsed_time = (time - trained_timestamp) / SECONDS_IN_HOUR
|
||||
@ -784,7 +852,7 @@ class FreqaiDataKitchen:
|
||||
data_load_timerange.stopts = int(time)
|
||||
else: # user passed no live_trained_timerange in config
|
||||
trained_timerange.startts = int(
|
||||
time - self.freqai_config.get("train_period_days") * SECONDS_IN_DAY
|
||||
time - self.freqai_config.get("train_period_days", 0) * SECONDS_IN_DAY
|
||||
)
|
||||
trained_timerange.stopts = int(time)
|
||||
|
||||
@ -815,24 +883,22 @@ class FreqaiDataKitchen:
|
||||
|
||||
self.model_filename = f"cb_{coin.lower()}_{int(trained_timerange.stopts)}"
|
||||
|
||||
def download_all_data_for_training(self, timerange: TimeRange) -> None:
|
||||
def download_all_data_for_training(self, timerange: TimeRange, dp: DataProvider) -> None:
|
||||
"""
|
||||
Called only once upon start of bot to download the necessary data for
|
||||
populating indicators and training the model.
|
||||
:params:
|
||||
timerange: TimeRange = The full data timerange for populating the indicators
|
||||
:param timerange: TimeRange = The full data timerange for populating the indicators
|
||||
and training the model.
|
||||
:param dp: DataProvider instance attached to the strategy
|
||||
"""
|
||||
exchange = ExchangeResolver.load_exchange(
|
||||
self.config["exchange"]["name"], self.config, validate=False, load_leverage_tiers=False
|
||||
)
|
||||
|
||||
new_pairs_days = int((timerange.stopts - timerange.startts) / SECONDS_IN_DAY)
|
||||
|
||||
if not dp._exchange:
|
||||
# Not realistic - this is only called in live mode.
|
||||
raise OperationalException("Dataprovider did not have an exchange attached.")
|
||||
refresh_backtest_ohlcv_data(
|
||||
exchange,
|
||||
dp._exchange,
|
||||
pairs=self.all_pairs,
|
||||
timeframes=self.freqai_config.get("feature_parameters", {}).get("include_timeframes"),
|
||||
timeframes=self.freqai_config["feature_parameters"].get("include_timeframes"),
|
||||
datadir=self.config["datadir"],
|
||||
timerange=timerange,
|
||||
new_pairs_days=new_pairs_days,
|
||||
@ -845,7 +911,7 @@ class FreqaiDataKitchen:
|
||||
def set_all_pairs(self) -> None:
|
||||
|
||||
self.all_pairs = copy.deepcopy(
|
||||
self.freqai_config.get("feature_parameters", {}).get("include_corr_pairlist", [])
|
||||
self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
)
|
||||
for pair in self.config.get("exchange", "").get("pair_whitelist"):
|
||||
if pair not in self.all_pairs:
|
||||
@ -876,8 +942,8 @@ class FreqaiDataKitchen:
|
||||
# for prediction dataframe creation, we let dataprovider handle everything in the strategy
|
||||
# so we create empty dictionaries, which allows us to pass None to
|
||||
# `populate_any_indicators()`. Signaling we want the dp to give us the live dataframe.
|
||||
tfs = self.freqai_config.get("feature_parameters", {}).get("include_timeframes")
|
||||
pairs = self.freqai_config.get("feature_parameters", {}).get("include_corr_pairlist", [])
|
||||
tfs = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||
pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
if not prediction_dataframe.empty:
|
||||
dataframe = prediction_dataframe.copy()
|
||||
for tf in tfs:
|
||||
@ -889,29 +955,26 @@ class FreqaiDataKitchen:
|
||||
else:
|
||||
dataframe = base_dataframes[self.config["timeframe"]].copy()
|
||||
|
||||
sgi = True
|
||||
sgi = False
|
||||
for tf in tfs:
|
||||
if tf == tfs[-1]:
|
||||
sgi = True # doing this last allows user to use all tf raw prices in labels
|
||||
dataframe = strategy.populate_any_indicators(
|
||||
pair,
|
||||
pair,
|
||||
dataframe.copy(),
|
||||
tf,
|
||||
informative=base_dataframes[tf],
|
||||
coin=pair.split("/")[0] + "-",
|
||||
set_generalized_indicators=sgi,
|
||||
set_generalized_indicators=sgi
|
||||
)
|
||||
sgi = False
|
||||
if pairs:
|
||||
for i in pairs:
|
||||
if pair in i:
|
||||
continue # dont repeat anything from whitelist
|
||||
dataframe = strategy.populate_any_indicators(
|
||||
pair,
|
||||
i,
|
||||
dataframe.copy(),
|
||||
tf,
|
||||
informative=corr_dataframes[i][tf],
|
||||
coin=i.split("/")[0] + "-",
|
||||
informative=corr_dataframes[i][tf]
|
||||
)
|
||||
|
||||
return dataframe
|
||||
@ -923,17 +986,12 @@ class FreqaiDataKitchen:
|
||||
import scipy as spy
|
||||
|
||||
self.data["labels_mean"], self.data["labels_std"] = {}, {}
|
||||
for label in self.label_list:
|
||||
for label in self.data_dictionary["train_labels"].columns:
|
||||
if self.data_dictionary["train_labels"][label].dtype == object:
|
||||
continue
|
||||
f = spy.stats.norm.fit(self.data_dictionary["train_labels"][label])
|
||||
self.data["labels_mean"][label], self.data["labels_std"][label] = f[0], f[1]
|
||||
|
||||
# KEEPME incase we want to let user start to grab quantiles.
|
||||
# upper_q = spy.stats.norm.ppf(self.freqai_config['feature_parameters'][
|
||||
# 'target_quantile'], *f)
|
||||
# lower_q = spy.stats.norm.ppf(1 - self.freqai_config['feature_parameters'][
|
||||
# 'target_quantile'], *f)
|
||||
# self.data["upper_quantile"] = upper_q
|
||||
# self.data["lower_quantile"] = lower_q
|
||||
return
|
||||
|
||||
def remove_features_from_df(self, dataframe: DataFrame) -> DataFrame:
|
||||
@ -945,168 +1003,3 @@ class FreqaiDataKitchen:
|
||||
col for col in dataframe.columns if not col.startswith("%") or col.startswith("%%")
|
||||
]
|
||||
return dataframe[to_keep]
|
||||
|
||||
def np_encoder(self, object):
|
||||
if isinstance(object, np.generic):
|
||||
return object.item()
|
||||
|
||||
# Functions containing useful data manpulation examples. but not actively in use.
|
||||
|
||||
# Possibly phasing these outlier removal methods below out in favor of
|
||||
# use_SVM_to_remove_outliers (computationally more efficient and apparently higher performance).
|
||||
# But these have good data manipulation examples, so keep them commented here for now.
|
||||
|
||||
# def determine_statistical_distributions(self) -> None:
|
||||
# from fitter import Fitter
|
||||
|
||||
# logger.info('Determining best model for all features, may take some time')
|
||||
|
||||
# def compute_quantiles(ft):
|
||||
# f = Fitter(self.data_dictionary["train_features"][ft],
|
||||
# distributions=['gamma', 'cauchy', 'laplace',
|
||||
# 'beta', 'uniform', 'lognorm'])
|
||||
# f.fit()
|
||||
# # f.summary()
|
||||
# dist = list(f.get_best().items())[0][0]
|
||||
# params = f.get_best()[dist]
|
||||
# upper_q = getattr(spy.stats, list(f.get_best().items())[0][0]).ppf(0.999, **params)
|
||||
# lower_q = getattr(spy.stats, list(f.get_best().items())[0][0]).ppf(0.001, **params)
|
||||
|
||||
# return ft, upper_q, lower_q, dist
|
||||
|
||||
# quantiles_tuple = Parallel(n_jobs=-1)(
|
||||
# delayed(compute_quantiles)(ft) for ft in self.data_dictionary[
|
||||
# 'train_features'].columns)
|
||||
|
||||
# df = pd.DataFrame(quantiles_tuple, columns=['features', 'upper_quantiles',
|
||||
# 'lower_quantiles', 'dist'])
|
||||
# self.data_dictionary['upper_quantiles'] = df['upper_quantiles']
|
||||
# self.data_dictionary['lower_quantiles'] = df['lower_quantiles']
|
||||
|
||||
# return
|
||||
|
||||
# def remove_outliers(self, predict: bool) -> None:
|
||||
# """
|
||||
# Remove data that looks like an outlier based on the distribution of each
|
||||
# variable.
|
||||
# :params:
|
||||
# :predict: boolean which tells the function if this is prediction data or
|
||||
# training data coming in.
|
||||
# """
|
||||
|
||||
# lower_quantile = self.data_dictionary["lower_quantiles"].to_numpy()
|
||||
# upper_quantile = self.data_dictionary["upper_quantiles"].to_numpy()
|
||||
|
||||
# if predict:
|
||||
|
||||
# df = self.data_dictionary["prediction_features"][
|
||||
# (self.data_dictionary["prediction_features"] < upper_quantile)
|
||||
# & (self.data_dictionary["prediction_features"] > lower_quantile)
|
||||
# ]
|
||||
# drop_index = pd.isnull(df).any(1)
|
||||
# self.data_dictionary["prediction_features"].fillna(0, inplace=True)
|
||||
# drop_index = ~drop_index
|
||||
# do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))
|
||||
|
||||
# logger.info(
|
||||
# "remove_outliers() tossed %s predictions",
|
||||
# len(do_predict) - do_predict.sum(),
|
||||
# )
|
||||
# self.do_predict += do_predict
|
||||
# self.do_predict -= 1
|
||||
|
||||
# else:
|
||||
|
||||
# filter_train_df = self.data_dictionary["train_features"][
|
||||
# (self.data_dictionary["train_features"] < upper_quantile)
|
||||
# & (self.data_dictionary["train_features"] > lower_quantile)
|
||||
# ]
|
||||
# drop_index = pd.isnull(filter_train_df).any(1)
|
||||
# drop_index = drop_index.replace(True, 1).replace(False, 0)
|
||||
# self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
# self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
# self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
|
||||
# logger.info(
|
||||
# f'remove_outliers() tossed {drop_index.sum()}'
|
||||
# f' training points from {len(filter_train_df)}'
|
||||
# )
|
||||
|
||||
# # do the same for the test data
|
||||
# filter_test_df = self.data_dictionary["test_features"][
|
||||
# (self.data_dictionary["test_features"] < upper_quantile)
|
||||
# & (self.data_dictionary["test_features"] > lower_quantile)
|
||||
# ]
|
||||
# drop_index = pd.isnull(filter_test_df).any(1)
|
||||
# drop_index = drop_index.replace(True, 1).replace(False, 0)
|
||||
# self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
# self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
# self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
|
||||
# (drop_index == 0)
|
||||
# ]
|
||||
|
||||
# logger.info(
|
||||
# f'remove_outliers() tossed {drop_index.sum()}'
|
||||
# f' test points from {len(filter_test_df)}'
|
||||
# )
|
||||
|
||||
# return
|
||||
|
||||
# def standardize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
|
||||
# """
|
||||
# standardize all data in the data_dictionary according to the training dataset
|
||||
# :params:
|
||||
# :data_dictionary: dictionary containing the cleaned and split training/test data/labels
|
||||
# :returns:
|
||||
# :data_dictionary: updated dictionary with standardized values.
|
||||
# """
|
||||
# # standardize the data by training stats
|
||||
# train_mean = data_dictionary["train_features"].mean()
|
||||
# train_std = data_dictionary["train_features"].std()
|
||||
# data_dictionary["train_features"] = (
|
||||
# data_dictionary["train_features"] - train_mean
|
||||
# ) / train_std
|
||||
# data_dictionary["test_features"] = (
|
||||
# data_dictionary["test_features"] - train_mean
|
||||
# ) / train_std
|
||||
|
||||
# train_labels_std = data_dictionary["train_labels"].std()
|
||||
# train_labels_mean = data_dictionary["train_labels"].mean()
|
||||
# data_dictionary["train_labels"] = (
|
||||
# data_dictionary["train_labels"] - train_labels_mean
|
||||
# ) / train_labels_std
|
||||
# data_dictionary["test_labels"] = (
|
||||
# data_dictionary["test_labels"] - train_labels_mean
|
||||
# ) / train_labels_std
|
||||
|
||||
# for item in train_std.keys():
|
||||
# self.data[item + "_std"] = train_std[item]
|
||||
# self.data[item + "_mean"] = train_mean[item]
|
||||
|
||||
# self.data["labels_std"] = train_labels_std
|
||||
# self.data["labels_mean"] = train_labels_mean
|
||||
|
||||
# return data_dictionary
|
||||
|
||||
# def standardize_data_from_metadata(self, df: DataFrame) -> DataFrame:
|
||||
# """
|
||||
# Normalizes a set of data using the mean and standard deviation from
|
||||
# the associated training data.
|
||||
# :params:
|
||||
# :df: Dataframe to be standardized
|
||||
# """
|
||||
|
||||
# for item in df.keys():
|
||||
# df[item] = (df[item] - self.data[item + "_mean"]) / self.data[item + "_std"]
|
||||
|
||||
# return df
|
||||
|
@ -1,7 +1,5 @@
|
||||
# import contextlib
|
||||
import copy
|
||||
import datetime
|
||||
import gc
|
||||
import logging
|
||||
import shutil
|
||||
import threading
|
||||
@ -12,7 +10,7 @@ from typing import Any, Dict, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from numpy.typing import ArrayLike
|
||||
from numpy.typing import NDArray
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
@ -47,7 +45,7 @@ class IFreqaiModel(ABC):
|
||||
Robert Caulk @robcaulk
|
||||
|
||||
Theoretical brainstorming:
|
||||
Elin Törnquist @thorntwig
|
||||
Elin Törnquist @th0rntwig
|
||||
|
||||
Code review, software architecture brainstorming:
|
||||
@xmatthias
|
||||
@ -82,6 +80,8 @@ class IFreqaiModel(ABC):
|
||||
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
||||
self.pair_it = 0
|
||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||
self.last_trade_database_summary: DataFrame = {}
|
||||
self.current_trade_database_summary: DataFrame = {}
|
||||
|
||||
def assert_config(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
@ -123,7 +123,7 @@ class IFreqaiModel(ABC):
|
||||
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
del dk
|
||||
return self.return_values(dataframe)
|
||||
return dataframe
|
||||
|
||||
@threaded
|
||||
def start_scanning(self, strategy: IStrategy) -> None:
|
||||
@ -183,8 +183,6 @@ class IFreqaiModel(ABC):
|
||||
(_, _, _) = self.dd.get_pair_dict_info(metadata["pair"])
|
||||
train_it += 1
|
||||
total_trains = len(dk.backtesting_timeranges)
|
||||
gc.collect()
|
||||
dk.data = {} # clean the pair specific data between training window sliding
|
||||
self.training_timerange = tr_train
|
||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
|
||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||
@ -204,13 +202,8 @@ class IFreqaiModel(ABC):
|
||||
|
||||
dk.data_path = Path(
|
||||
dk.full_path
|
||||
/ str(
|
||||
"sub-train"
|
||||
+ "-"
|
||||
+ metadata["pair"].split("/")[0]
|
||||
+ "_"
|
||||
+ str(int(trained_timestamp.stopts))
|
||||
)
|
||||
/
|
||||
f"sub-train-{metadata['pair'].split('/')[0]}_{int(trained_timestamp.stopts)}"
|
||||
)
|
||||
if not self.model_exists(
|
||||
metadata["pair"], dk, trained_timestamp=int(trained_timestamp.stopts)
|
||||
@ -228,7 +221,7 @@ class IFreqaiModel(ABC):
|
||||
|
||||
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
||||
|
||||
dk.append_predictions(pred_df, do_preds, len(dataframe_backtest))
|
||||
dk.append_predictions(pred_df, do_preds)
|
||||
|
||||
dk.fill_predictions(dataframe)
|
||||
|
||||
@ -280,7 +273,7 @@ class IFreqaiModel(ABC):
|
||||
"corr_pairlist, this may take a while if you do not have the "
|
||||
"data saved"
|
||||
)
|
||||
dk.download_all_data_for_training(data_load_timerange)
|
||||
dk.download_all_data_for_training(data_load_timerange, strategy.dp)
|
||||
self.dd.load_all_pair_histories(data_load_timerange, dk)
|
||||
|
||||
if not self.scanning:
|
||||
@ -331,7 +324,8 @@ class IFreqaiModel(ABC):
|
||||
return
|
||||
elif self.dk.check_if_model_expired(trained_timestamp):
|
||||
pred_df = DataFrame(np.zeros((2, len(dk.label_list))), columns=dk.label_list)
|
||||
do_preds, dk.DI_values = np.ones(2) * 2, np.zeros(2)
|
||||
do_preds = np.ones(2, dtype=np.int_) * 2
|
||||
dk.DI_values = np.zeros(2)
|
||||
logger.warning(
|
||||
f"Model expired for {pair}, returning null values to strategy. Strategy "
|
||||
"construction should take care to consider this event with "
|
||||
@ -379,17 +373,25 @@ class IFreqaiModel(ABC):
|
||||
example of how outlier data points are dropped from the dataframe used for training.
|
||||
"""
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get(
|
||||
if self.freqai_info["feature_parameters"].get(
|
||||
"principal_component_analysis", False
|
||||
):
|
||||
dk.principal_component_analysis()
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("use_SVM_to_remove_outliers", False):
|
||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
||||
dk.use_SVM_to_remove_outliers(predict=False)
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0):
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
||||
dk.data["avg_mean_dist"] = dk.compute_distances()
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
||||
if dk.pair in self.dd.old_DBSCAN_eps:
|
||||
eps = self.dd.old_DBSCAN_eps[dk.pair]
|
||||
else:
|
||||
eps = None
|
||||
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
|
||||
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
|
||||
|
||||
def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None:
|
||||
"""
|
||||
Base data cleaning method for predict.
|
||||
@ -401,17 +403,20 @@ class IFreqaiModel(ABC):
|
||||
of how the do_predict vector is modified. do_predict is ultimately passed back to strategy
|
||||
for buy signals.
|
||||
"""
|
||||
if self.freqai_info.get("feature_parameters", {}).get(
|
||||
if self.freqai_info["feature_parameters"].get(
|
||||
"principal_component_analysis", False
|
||||
):
|
||||
dk.pca_transform(dataframe)
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("use_SVM_to_remove_outliers", False):
|
||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
||||
dk.use_SVM_to_remove_outliers(predict=True)
|
||||
|
||||
if self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0):
|
||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
||||
dk.check_if_pred_in_training_spaces()
|
||||
|
||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
||||
dk.use_DBSCAN_to_remove_outliers(predict=True)
|
||||
|
||||
def model_exists(
|
||||
self,
|
||||
pair: str,
|
||||
@ -430,9 +435,9 @@ class IFreqaiModel(ABC):
|
||||
coin, _ = pair.split("/")
|
||||
|
||||
if not self.live:
|
||||
dk.model_filename = model_filename = "cb_" + coin.lower() + "_" + str(trained_timestamp)
|
||||
dk.model_filename = model_filename = f"cb_{coin.lower()}_{trained_timestamp}"
|
||||
|
||||
path_to_modelfile = Path(dk.data_path / str(model_filename + "_model.joblib"))
|
||||
path_to_modelfile = Path(dk.data_path / f"{model_filename}_model.joblib")
|
||||
file_exists = path_to_modelfile.is_file()
|
||||
if file_exists and not scanning:
|
||||
logger.info("Found model at %s", dk.data_path / dk.model_filename)
|
||||
@ -442,7 +447,7 @@ class IFreqaiModel(ABC):
|
||||
|
||||
def set_full_path(self) -> None:
|
||||
self.full_path = Path(
|
||||
self.config["user_data_dir"] / "models" / str(self.freqai_info.get("identifier"))
|
||||
self.config["user_data_dir"] / "models" / f"{self.freqai_info['identifier']}"
|
||||
)
|
||||
self.full_path.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy(
|
||||
@ -500,13 +505,54 @@ class IFreqaiModel(ABC):
|
||||
def set_initial_historic_predictions(
|
||||
self, df: DataFrame, model: Any, dk: FreqaiDataKitchen, pair: str
|
||||
) -> None:
|
||||
trained_predictions = model.predict(df)
|
||||
"""
|
||||
This function is called only if the datadrawer failed to load an
|
||||
existing set of historic predictions. In this case, it builds
|
||||
the structure and sets fake predictions off the first training
|
||||
data. After that, FreqAI will append new real predictions to the
|
||||
set of historic predictions.
|
||||
|
||||
These values are used to generate live statistics which can be used
|
||||
in the strategy for adaptive values. E.g. &*_mean/std are quantities
|
||||
that can computed based on live predictions from the set of historical
|
||||
predictions. Those values can be used in the user strategy to better
|
||||
assess prediction rarity, and thus wait for probabilistically favorable
|
||||
entries relative to the live historical predictions.
|
||||
|
||||
If the user reuses an identifier on a subsequent instance,
|
||||
this function will not be called. In that case, "real" predictions
|
||||
will be appended to the loaded set of historic predictions.
|
||||
:param: df: DataFrame = the dataframe containing the training feature data
|
||||
:param: model: Any = A model which was `fit` using a common library such as
|
||||
catboost or lightgbm
|
||||
:param: dk: FreqaiDataKitchen = object containing methods for data analysis
|
||||
:param: pair: str = current pair
|
||||
"""
|
||||
num_candles = self.freqai_info.get('fit_live_predictions_candles', 600)
|
||||
if not num_candles:
|
||||
num_candles = 600
|
||||
df_tail = df.tail(num_candles)
|
||||
trained_predictions = model.predict(df_tail)
|
||||
pred_df = DataFrame(trained_predictions, columns=dk.label_list)
|
||||
|
||||
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||
|
||||
self.dd.historic_predictions[pair] = pd.DataFrame()
|
||||
self.dd.historic_predictions[pair] = copy.deepcopy(pred_df)
|
||||
self.dd.historic_predictions[pair] = pred_df
|
||||
hist_preds_df = self.dd.historic_predictions[pair]
|
||||
|
||||
for label in hist_preds_df.columns:
|
||||
if hist_preds_df[label].dtype == object:
|
||||
continue
|
||||
hist_preds_df[f'{label}_mean'] = 0
|
||||
hist_preds_df[f'{label}_std'] = 0
|
||||
|
||||
hist_preds_df['do_predict'] = 0
|
||||
|
||||
if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0:
|
||||
hist_preds_df['DI_values'] = 0
|
||||
|
||||
for return_str in dk.data['extra_returns_per_train']:
|
||||
hist_preds_df[return_str] = 0
|
||||
|
||||
def fit_live_predictions(self, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
@ -517,13 +563,15 @@ class IFreqaiModel(ABC):
|
||||
num_candles = self.freqai_info.get("fit_live_predictions_candles", 100)
|
||||
dk.data["labels_mean"], dk.data["labels_std"] = {}, {}
|
||||
for label in dk.label_list:
|
||||
if self.dd.historic_predictions[dk.pair][label].dtype == object:
|
||||
continue
|
||||
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
|
||||
|
||||
return
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModlel.py for an example.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
@abstractmethod
|
||||
def train(self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen) -> Any:
|
||||
@ -550,7 +598,7 @@ class IFreqaiModel(ABC):
|
||||
@abstractmethod
|
||||
def predict(
|
||||
self, dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = True
|
||||
) -> Tuple[DataFrame, ArrayLike]:
|
||||
) -> Tuple[DataFrame, NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
@ -561,14 +609,3 @@ class IFreqaiModel(ABC):
|
||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def return_values(self, dataframe: DataFrame) -> DataFrame:
|
||||
"""
|
||||
User defines the dataframe to be returned to strategy here.
|
||||
:param dataframe: DataFrame = the full dataframe for the current prediction (live)
|
||||
or --timerange (backtesting)
|
||||
:return: dataframe: DataFrame = dataframe filled with user defined data
|
||||
"""
|
||||
|
||||
return
|
||||
|
@ -1,6 +1,7 @@
|
||||
import logging
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from pandas import DataFrame
|
||||
|
||||
@ -18,15 +19,6 @@ class BaseRegressionModel(IFreqaiModel):
|
||||
such as prediction_models/CatboostPredictionModel.py for guidance.
|
||||
"""
|
||||
|
||||
def return_values(self, dataframe: DataFrame) -> DataFrame:
|
||||
"""
|
||||
User uses this function to add any additional return values to the dataframe.
|
||||
e.g.
|
||||
dataframe['volatility'] = dk.volatility_values
|
||||
"""
|
||||
|
||||
return dataframe
|
||||
|
||||
def train(
|
||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Any:
|
||||
@ -55,6 +47,8 @@ class BaseRegressionModel(IFreqaiModel):
|
||||
f"{end_date}--------------------")
|
||||
# split data into train/test data.
|
||||
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
|
||||
dk.fit_labels()
|
||||
# normalize all data based on train_dataset only
|
||||
data_dictionary = dk.normalize_data(data_dictionary)
|
||||
|
||||
@ -74,8 +68,6 @@ class BaseRegressionModel(IFreqaiModel):
|
||||
|
||||
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
||||
self.fit_live_predictions(dk)
|
||||
else:
|
||||
dk.fit_labels()
|
||||
|
||||
self.dd.save_historic_predictions_to_disk()
|
||||
|
||||
@ -85,7 +77,7 @@ class BaseRegressionModel(IFreqaiModel):
|
||||
|
||||
def predict(
|
||||
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
|
||||
) -> Tuple[DataFrame, npt.ArrayLike]:
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
|
@ -16,15 +16,6 @@ class BaseTensorFlowModel(IFreqaiModel):
|
||||
User *must* inherit from this class and set fit() and predict().
|
||||
"""
|
||||
|
||||
def return_values(self, dataframe: DataFrame) -> DataFrame:
|
||||
"""
|
||||
User uses this function to add any additional return values to the dataframe.
|
||||
e.g.
|
||||
dataframe['volatility'] = dk.volatility_values
|
||||
"""
|
||||
|
||||
return dataframe
|
||||
|
||||
def train(
|
||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
||||
) -> Any:
|
||||
|
41
freqtrade/freqai/prediction_models/CatboostClassifier.py
Normal file
41
freqtrade/freqai/prediction_models/CatboostClassifier.py
Normal file
@ -0,0 +1,41 @@
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from catboost import CatBoostClassifier, Pool
|
||||
|
||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostClassifier(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:params:
|
||||
:data_dictionary: the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
train_data = Pool(
|
||||
data=data_dictionary["train_features"],
|
||||
label=data_dictionary["train_labels"],
|
||||
weight=data_dictionary["train_weights"],
|
||||
)
|
||||
|
||||
cbr = CatBoostClassifier(
|
||||
allow_writing_files=False,
|
||||
loss_function='MultiClass',
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
cbr.fit(train_data)
|
||||
|
||||
return cbr
|
@ -1,6 +1,7 @@
|
||||
import gc
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
import gc
|
||||
|
||||
from catboost import CatBoostRegressor, Pool
|
||||
|
||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
||||
@ -9,7 +10,7 @@ from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressio
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostPredictionModel(BaseRegressionModel):
|
||||
class CatboostRegressor(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
@ -10,7 +10,7 @@ from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressio
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostPredictionMultiModel(BaseRegressionModel):
|
||||
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
38
freqtrade/freqai/prediction_models/LightGBMClassifier.py
Normal file
38
freqtrade/freqai/prediction_models/LightGBMClassifier.py
Normal file
@ -0,0 +1,38 @@
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from lightgbm import LGBMClassifier
|
||||
|
||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LightGBMClassifier(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:params:
|
||||
:data_dictionary: the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
|
||||
eval_set = None
|
||||
else:
|
||||
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
||||
model = LGBMClassifier(**self.model_training_parameters)
|
||||
|
||||
model.fit(X=X, y=y, eval_set=eval_set)
|
||||
|
||||
return model
|
@ -9,7 +9,7 @@ from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressio
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LightGBMPredictionModel(BaseRegressionModel):
|
||||
class LightGBMRegressor(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
@ -10,7 +10,7 @@ from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressio
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LightGBMPredictionMultiModel(BaseRegressionModel):
|
||||
class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
@ -5,6 +5,7 @@ import copy
|
||||
import logging
|
||||
import traceback
|
||||
from datetime import datetime, time, timedelta, timezone
|
||||
from decimal import Decimal
|
||||
from math import isclose
|
||||
from threading import Lock
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
@ -25,7 +26,7 @@ from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
|
||||
from freqtrade.exchange.exchange import timeframe_to_next_date
|
||||
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
|
||||
from freqtrade.mixins import LoggingMixin
|
||||
from freqtrade.persistence import Order, PairLocks, Trade, cleanup_db, init_db
|
||||
from freqtrade.persistence import Order, PairLocks, Trade, init_db
|
||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||
from freqtrade.plugins.protectionmanager import ProtectionManager
|
||||
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
||||
@ -149,7 +150,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
self.check_for_open_trades()
|
||||
|
||||
self.rpc.cleanup()
|
||||
cleanup_db()
|
||||
Trade.commit()
|
||||
self.exchange.close()
|
||||
|
||||
def startup(self) -> None:
|
||||
@ -214,6 +215,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
self._schedule.run_pending()
|
||||
Trade.commit()
|
||||
self.rpc.process_msg_queue(self.dataprovider._msg_queue)
|
||||
self.last_process = datetime.now(timezone.utc)
|
||||
|
||||
def process_stopped(self) -> None:
|
||||
@ -524,39 +526,61 @@ class FreqtradeBot(LoggingMixin):
|
||||
If the strategy triggers the adjustment, a new order gets issued.
|
||||
Once that completes, the existing trade is modified to match new data.
|
||||
"""
|
||||
if self.strategy.max_entry_position_adjustment > -1:
|
||||
count_of_buys = trade.nr_of_successful_entries
|
||||
if count_of_buys > self.strategy.max_entry_position_adjustment:
|
||||
logger.debug(f"Max adjustment entries for {trade.pair} has been reached.")
|
||||
return
|
||||
else:
|
||||
logger.debug("Max adjustment entries is set to unlimited.")
|
||||
current_rate = self.exchange.get_rate(
|
||||
trade.pair, side='entry', is_short=trade.is_short, refresh=True)
|
||||
current_profit = trade.calc_profit_ratio(current_rate)
|
||||
current_entry_rate, current_exit_rate = self.exchange.get_rates(
|
||||
trade.pair, True, trade.is_short)
|
||||
|
||||
min_stake_amount = self.exchange.get_min_pair_stake_amount(trade.pair,
|
||||
current_rate,
|
||||
current_entry_profit = trade.calc_profit_ratio(current_entry_rate)
|
||||
current_exit_profit = trade.calc_profit_ratio(current_exit_rate)
|
||||
|
||||
min_entry_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
||||
current_entry_rate,
|
||||
self.strategy.stoploss)
|
||||
max_stake_amount = self.exchange.get_max_pair_stake_amount(trade.pair, current_rate)
|
||||
min_exit_stake = self.exchange.get_min_pair_stake_amount(trade.pair,
|
||||
current_exit_rate,
|
||||
self.strategy.stoploss)
|
||||
max_entry_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_entry_rate)
|
||||
stake_available = self.wallets.get_available_stake_amount()
|
||||
logger.debug(f"Calling adjust_trade_position for pair {trade.pair}")
|
||||
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
||||
default_retval=None)(
|
||||
trade=trade, current_time=datetime.now(timezone.utc), current_rate=current_rate,
|
||||
current_profit=current_profit, min_stake=min_stake_amount,
|
||||
max_stake=min(max_stake_amount, stake_available))
|
||||
trade=trade,
|
||||
current_time=datetime.now(timezone.utc), current_rate=current_entry_rate,
|
||||
current_profit=current_entry_profit, min_stake=min_entry_stake,
|
||||
max_stake=min(max_entry_stake, stake_available),
|
||||
current_entry_rate=current_entry_rate, current_exit_rate=current_exit_rate,
|
||||
current_entry_profit=current_entry_profit, current_exit_profit=current_exit_profit
|
||||
)
|
||||
|
||||
if stake_amount is not None and stake_amount > 0.0:
|
||||
# We should increase our position
|
||||
self.execute_entry(trade.pair, stake_amount, price=current_rate,
|
||||
if self.strategy.max_entry_position_adjustment > -1:
|
||||
count_of_entries = trade.nr_of_successful_entries
|
||||
if count_of_entries > self.strategy.max_entry_position_adjustment:
|
||||
logger.debug(f"Max adjustment entries for {trade.pair} has been reached.")
|
||||
return
|
||||
else:
|
||||
logger.debug("Max adjustment entries is set to unlimited.")
|
||||
self.execute_entry(trade.pair, stake_amount, price=current_entry_rate,
|
||||
trade=trade, is_short=trade.is_short)
|
||||
|
||||
if stake_amount is not None and stake_amount < 0.0:
|
||||
# We should decrease our position
|
||||
# TODO: Selling part of the trade not implemented yet.
|
||||
logger.error(f"Unable to decrease trade position / sell partially"
|
||||
f" for pair {trade.pair}, feature not implemented.")
|
||||
amount = abs(float(Decimal(stake_amount) / Decimal(current_exit_rate)))
|
||||
if amount > trade.amount:
|
||||
# This is currently ineffective as remaining would become < min tradable
|
||||
# Fixing this would require checking for 0.0 there -
|
||||
# if we decide that this callback is allowed to "fully exit"
|
||||
logger.info(
|
||||
f"Adjusting amount to trade.amount as it is higher. {amount} > {trade.amount}")
|
||||
amount = trade.amount
|
||||
|
||||
remaining = (trade.amount - amount) * current_exit_rate
|
||||
if remaining < min_exit_stake:
|
||||
logger.info(f'Remaining amount of {remaining} would be too small.')
|
||||
return
|
||||
|
||||
self.execute_trade_exit(trade, current_exit_rate, exit_check=ExitCheckTuple(
|
||||
exit_type=ExitType.PARTIAL_EXIT), sub_trade_amt=amount)
|
||||
|
||||
def _check_depth_of_market(self, pair: str, conf: Dict, side: SignalDirection) -> bool:
|
||||
"""
|
||||
@ -600,7 +624,8 @@ class FreqtradeBot(LoggingMixin):
|
||||
ordertype: Optional[str] = None,
|
||||
enter_tag: Optional[str] = None,
|
||||
trade: Optional[Trade] = None,
|
||||
order_adjust: bool = False
|
||||
order_adjust: bool = False,
|
||||
leverage_: Optional[float] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Executes a limit buy for the given pair
|
||||
@ -616,7 +641,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
pos_adjust = trade is not None
|
||||
|
||||
enter_limit_requested, stake_amount, leverage = self.get_valid_enter_price_and_stake(
|
||||
pair, price, stake_amount, trade_side, enter_tag, trade, order_adjust)
|
||||
pair, price, stake_amount, trade_side, enter_tag, trade, order_adjust, leverage_)
|
||||
|
||||
if not stake_amount:
|
||||
return False
|
||||
@ -730,7 +755,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
# Updating wallets
|
||||
self.wallets.update()
|
||||
|
||||
self._notify_enter(trade, order, order_type)
|
||||
self._notify_enter(trade, order_obj, order_type, sub_trade=pos_adjust)
|
||||
|
||||
if pos_adjust:
|
||||
if order_status == 'closed':
|
||||
@ -739,8 +764,8 @@ class FreqtradeBot(LoggingMixin):
|
||||
else:
|
||||
logger.info(f"DCA order {order_status}, will wait for resolution: {trade}")
|
||||
|
||||
# Update fees if order is closed
|
||||
if order_status == 'closed':
|
||||
# Update fees if order is non-opened
|
||||
if order_status in constants.NON_OPEN_EXCHANGE_STATES:
|
||||
self.update_trade_state(trade, order_id, order)
|
||||
|
||||
return True
|
||||
@ -763,6 +788,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
entry_tag: Optional[str],
|
||||
trade: Optional[Trade],
|
||||
order_adjust: bool,
|
||||
leverage_: Optional[float],
|
||||
) -> Tuple[float, float, float]:
|
||||
|
||||
if price:
|
||||
@ -785,8 +811,11 @@ class FreqtradeBot(LoggingMixin):
|
||||
if not enter_limit_requested:
|
||||
raise PricingError('Could not determine entry price.')
|
||||
|
||||
if trade is None:
|
||||
if self.trading_mode != TradingMode.SPOT and trade is None:
|
||||
max_leverage = self.exchange.get_max_leverage(pair, stake_amount)
|
||||
if leverage_:
|
||||
leverage = leverage_
|
||||
else:
|
||||
leverage = strategy_safe_wrapper(self.strategy.leverage, default_retval=1.0)(
|
||||
pair=pair,
|
||||
current_time=datetime.now(timezone.utc),
|
||||
@ -794,7 +823,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
proposed_leverage=1.0,
|
||||
max_leverage=max_leverage,
|
||||
side=trade_side, entry_tag=entry_tag,
|
||||
) if self.trading_mode != TradingMode.SPOT else 1.0
|
||||
)
|
||||
# Cap leverage between 1.0 and max_leverage.
|
||||
leverage = min(max(leverage, 1.0), max_leverage)
|
||||
else:
|
||||
@ -829,13 +858,14 @@ class FreqtradeBot(LoggingMixin):
|
||||
|
||||
return enter_limit_requested, stake_amount, leverage
|
||||
|
||||
def _notify_enter(self, trade: Trade, order: Dict, order_type: Optional[str] = None,
|
||||
fill: bool = False) -> None:
|
||||
def _notify_enter(self, trade: Trade, order: Order, order_type: Optional[str] = None,
|
||||
fill: bool = False, sub_trade: bool = False) -> None:
|
||||
"""
|
||||
Sends rpc notification when a entry order occurred.
|
||||
"""
|
||||
msg_type = RPCMessageType.ENTRY_FILL if fill else RPCMessageType.ENTRY
|
||||
open_rate = safe_value_fallback(order, 'average', 'price')
|
||||
open_rate = order.safe_price
|
||||
|
||||
if open_rate is None:
|
||||
open_rate = trade.open_rate
|
||||
|
||||
@ -859,15 +889,17 @@ class FreqtradeBot(LoggingMixin):
|
||||
'stake_amount': trade.stake_amount,
|
||||
'stake_currency': self.config['stake_currency'],
|
||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||
'amount': safe_value_fallback(order, 'filled', 'amount') or trade.amount,
|
||||
'amount': order.safe_amount_after_fee,
|
||||
'open_date': trade.open_date or datetime.utcnow(),
|
||||
'current_rate': current_rate,
|
||||
'sub_trade': sub_trade,
|
||||
}
|
||||
|
||||
# Send the message
|
||||
self.rpc.send_msg(msg)
|
||||
|
||||
def _notify_enter_cancel(self, trade: Trade, order_type: str, reason: str) -> None:
|
||||
def _notify_enter_cancel(self, trade: Trade, order_type: str, reason: str,
|
||||
sub_trade: bool = False) -> None:
|
||||
"""
|
||||
Sends rpc notification when a entry order cancel occurred.
|
||||
"""
|
||||
@ -892,6 +924,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
'open_date': trade.open_date,
|
||||
'current_rate': current_rate,
|
||||
'reason': reason,
|
||||
'sub_trade': sub_trade,
|
||||
}
|
||||
|
||||
# Send the message
|
||||
@ -1015,7 +1048,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
trade.stoploss_order_id = None
|
||||
logger.error(f'Unable to place a stoploss order on exchange. {e}')
|
||||
logger.warning('Exiting the trade forcefully')
|
||||
self.execute_trade_exit(trade, trade.stop_loss, exit_check=ExitCheckTuple(
|
||||
self.execute_trade_exit(trade, stop_price, exit_check=ExitCheckTuple(
|
||||
exit_type=ExitType.EMERGENCY_EXIT))
|
||||
|
||||
except ExchangeError:
|
||||
@ -1085,7 +1118,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
if (trade.is_open
|
||||
and stoploss_order
|
||||
and stoploss_order['status'] in ('canceled', 'cancelled')):
|
||||
if self.create_stoploss_order(trade=trade, stop_price=trade.stop_loss):
|
||||
if self.create_stoploss_order(trade=trade, stop_price=trade.stoploss_or_liquidation):
|
||||
return False
|
||||
else:
|
||||
trade.stoploss_order_id = None
|
||||
@ -1114,7 +1147,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
:param order: Current on exchange stoploss order
|
||||
:return: None
|
||||
"""
|
||||
stoploss_norm = self.exchange.price_to_precision(trade.pair, trade.stop_loss)
|
||||
stoploss_norm = self.exchange.price_to_precision(trade.pair, trade.stoploss_or_liquidation)
|
||||
|
||||
if self.exchange.stoploss_adjust(stoploss_norm, order, side=trade.exit_side):
|
||||
# we check if the update is necessary
|
||||
@ -1132,7 +1165,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
f"for pair {trade.pair}")
|
||||
|
||||
# Create new stoploss order
|
||||
if not self.create_stoploss_order(trade=trade, stop_price=trade.stop_loss):
|
||||
if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm):
|
||||
logger.warning(f"Could not create trailing stoploss order "
|
||||
f"for pair {trade.pair}.")
|
||||
|
||||
@ -1365,16 +1398,22 @@ class FreqtradeBot(LoggingMixin):
|
||||
trade.open_order_id = None
|
||||
trade.exit_reason = None
|
||||
cancelled = True
|
||||
self.wallets.update()
|
||||
else:
|
||||
# TODO: figure out how to handle partially complete sell orders
|
||||
reason = constants.CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
|
||||
cancelled = False
|
||||
|
||||
self.wallets.update()
|
||||
order_obj = trade.select_order_by_order_id(order['id'])
|
||||
if not order_obj:
|
||||
raise DependencyException(
|
||||
f"Order_obj not found for {order['id']}. This should not have happened.")
|
||||
|
||||
sub_trade = order_obj.amount != trade.amount
|
||||
self._notify_exit_cancel(
|
||||
trade,
|
||||
order_type=self.strategy.order_types['exit'],
|
||||
reason=reason
|
||||
reason=reason, order=order_obj, sub_trade=sub_trade
|
||||
)
|
||||
return cancelled
|
||||
|
||||
@ -1415,6 +1454,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
*,
|
||||
exit_tag: Optional[str] = None,
|
||||
ordertype: Optional[str] = None,
|
||||
sub_trade_amt: float = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Executes a trade exit for the given trade and limit
|
||||
@ -1431,14 +1471,15 @@ class FreqtradeBot(LoggingMixin):
|
||||
)
|
||||
exit_type = 'exit'
|
||||
exit_reason = exit_tag or exit_check.exit_reason
|
||||
if exit_check.exit_type in (ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS):
|
||||
if exit_check.exit_type in (
|
||||
ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS, ExitType.LIQUIDATION):
|
||||
exit_type = 'stoploss'
|
||||
|
||||
# if stoploss is on exchange and we are on dry_run mode,
|
||||
# we consider the sell price stop price
|
||||
if (self.config['dry_run'] and exit_type == 'stoploss'
|
||||
and self.strategy.order_types['stoploss_on_exchange']):
|
||||
limit = trade.stop_loss
|
||||
limit = trade.stoploss_or_liquidation
|
||||
|
||||
# set custom_exit_price if available
|
||||
proposed_limit_rate = limit
|
||||
@ -1460,14 +1501,17 @@ class FreqtradeBot(LoggingMixin):
|
||||
# Emergency sells (default to market!)
|
||||
order_type = self.strategy.order_types.get("emergency_exit", "market")
|
||||
|
||||
amount = self._safe_exit_amount(trade.pair, trade.amount)
|
||||
amount = self._safe_exit_amount(trade.pair, sub_trade_amt or trade.amount)
|
||||
time_in_force = self.strategy.order_time_in_force['exit']
|
||||
|
||||
if not strategy_safe_wrapper(self.strategy.confirm_trade_exit, default_retval=True)(
|
||||
if (exit_check.exit_type != ExitType.LIQUIDATION
|
||||
and not sub_trade_amt
|
||||
and not strategy_safe_wrapper(
|
||||
self.strategy.confirm_trade_exit, default_retval=True)(
|
||||
pair=trade.pair, trade=trade, order_type=order_type, amount=amount, rate=limit,
|
||||
time_in_force=time_in_force, exit_reason=exit_reason,
|
||||
sell_reason=exit_reason, # sellreason -> compatibility
|
||||
current_time=datetime.now(timezone.utc)):
|
||||
current_time=datetime.now(timezone.utc))):
|
||||
logger.info(f"User denied exit for {trade.pair}.")
|
||||
return False
|
||||
|
||||
@ -1501,7 +1545,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
self.strategy.lock_pair(trade.pair, datetime.now(timezone.utc),
|
||||
reason='Auto lock')
|
||||
|
||||
self._notify_exit(trade, order_type)
|
||||
self._notify_exit(trade, order_type, sub_trade=bool(sub_trade_amt), order=order_obj)
|
||||
# In case of market sell orders the order can be closed immediately
|
||||
if order.get('status', 'unknown') in ('closed', 'expired'):
|
||||
self.update_trade_state(trade, trade.open_order_id, order)
|
||||
@ -1509,16 +1553,27 @@ class FreqtradeBot(LoggingMixin):
|
||||
|
||||
return True
|
||||
|
||||
def _notify_exit(self, trade: Trade, order_type: str, fill: bool = False) -> None:
|
||||
def _notify_exit(self, trade: Trade, order_type: str, fill: bool = False,
|
||||
sub_trade: bool = False, order: Order = None) -> None:
|
||||
"""
|
||||
Sends rpc notification when a sell occurred.
|
||||
"""
|
||||
profit_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
||||
profit_trade = trade.calc_profit(rate=profit_rate)
|
||||
# Use cached rates here - it was updated seconds ago.
|
||||
current_rate = self.exchange.get_rate(
|
||||
trade.pair, side='exit', is_short=trade.is_short, refresh=False) if not fill else None
|
||||
|
||||
# second condition is for mypy only; order will always be passed during sub trade
|
||||
if sub_trade and order is not None:
|
||||
amount = order.safe_filled if fill else order.amount
|
||||
profit_rate = order.safe_price
|
||||
|
||||
profit = trade.calc_profit(rate=profit_rate, amount=amount, open_rate=trade.open_rate)
|
||||
profit_ratio = trade.calc_profit_ratio(profit_rate, amount, trade.open_rate)
|
||||
else:
|
||||
profit_rate = trade.close_rate if trade.close_rate else trade.close_rate_requested
|
||||
profit = trade.calc_profit(rate=profit_rate) + (0.0 if fill else trade.realized_profit)
|
||||
profit_ratio = trade.calc_profit_ratio(profit_rate)
|
||||
amount = trade.amount
|
||||
gain = "profit" if profit_ratio > 0 else "loss"
|
||||
|
||||
msg = {
|
||||
@ -1532,11 +1587,11 @@ class FreqtradeBot(LoggingMixin):
|
||||
'gain': gain,
|
||||
'limit': profit_rate,
|
||||
'order_type': order_type,
|
||||
'amount': trade.amount,
|
||||
'amount': amount,
|
||||
'open_rate': trade.open_rate,
|
||||
'close_rate': trade.close_rate,
|
||||
'close_rate': profit_rate,
|
||||
'current_rate': current_rate,
|
||||
'profit_amount': profit_trade,
|
||||
'profit_amount': profit,
|
||||
'profit_ratio': profit_ratio,
|
||||
'buy_tag': trade.enter_tag,
|
||||
'enter_tag': trade.enter_tag,
|
||||
@ -1544,19 +1599,18 @@ class FreqtradeBot(LoggingMixin):
|
||||
'exit_reason': trade.exit_reason,
|
||||
'open_date': trade.open_date,
|
||||
'close_date': trade.close_date or datetime.utcnow(),
|
||||
'stake_amount': trade.stake_amount,
|
||||
'stake_currency': self.config['stake_currency'],
|
||||
'fiat_currency': self.config.get('fiat_display_currency'),
|
||||
'sub_trade': sub_trade,
|
||||
'cumulative_profit': trade.realized_profit,
|
||||
}
|
||||
|
||||
if 'fiat_display_currency' in self.config:
|
||||
msg.update({
|
||||
'fiat_currency': self.config['fiat_display_currency'],
|
||||
})
|
||||
|
||||
# Send the message
|
||||
self.rpc.send_msg(msg)
|
||||
|
||||
def _notify_exit_cancel(self, trade: Trade, order_type: str, reason: str) -> None:
|
||||
def _notify_exit_cancel(self, trade: Trade, order_type: str, reason: str,
|
||||
order: Order, sub_trade: bool = False) -> None:
|
||||
"""
|
||||
Sends rpc notification when a sell cancel occurred.
|
||||
"""
|
||||
@ -1582,7 +1636,7 @@ class FreqtradeBot(LoggingMixin):
|
||||
'gain': gain,
|
||||
'limit': profit_rate or 0,
|
||||
'order_type': order_type,
|
||||
'amount': trade.amount,
|
||||
'amount': order.safe_amount_after_fee,
|
||||
'open_rate': trade.open_rate,
|
||||
'current_rate': current_rate,
|
||||
'profit_amount': profit_trade,
|
||||
@ -1596,6 +1650,8 @@ class FreqtradeBot(LoggingMixin):
|
||||
'stake_currency': self.config['stake_currency'],
|
||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||
'reason': reason,
|
||||
'sub_trade': sub_trade,
|
||||
'stake_amount': trade.stake_amount,
|
||||
}
|
||||
|
||||
if 'fiat_display_currency' in self.config:
|
||||
@ -1650,40 +1706,50 @@ class FreqtradeBot(LoggingMixin):
|
||||
self.handle_order_fee(trade, order_obj, order)
|
||||
|
||||
trade.update_trade(order_obj)
|
||||
# TODO: is the below necessary? it's already done in update_trade for filled buys
|
||||
trade.recalc_trade_from_orders()
|
||||
Trade.commit()
|
||||
|
||||
if order['status'] in constants.NON_OPEN_EXCHANGE_STATES:
|
||||
if order.get('status') in constants.NON_OPEN_EXCHANGE_STATES:
|
||||
# If a entry order was closed, force update on stoploss on exchange
|
||||
if order.get('side') == trade.entry_side:
|
||||
trade = self.cancel_stoploss_on_exchange(trade)
|
||||
if not self.edge:
|
||||
# TODO: should shorting/leverage be supported by Edge,
|
||||
# then this will need to be fixed.
|
||||
trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True)
|
||||
if order.get('side') == trade.entry_side or trade.amount > 0:
|
||||
# Must also run for partial exits
|
||||
# TODO: Margin will need to use interest_rate as well.
|
||||
# interest_rate = self.exchange.get_interest_rate()
|
||||
trade.set_isolated_liq(self.exchange.get_liquidation_price(
|
||||
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
||||
leverage=trade.leverage,
|
||||
pair=trade.pair,
|
||||
amount=trade.amount,
|
||||
open_rate=trade.open_rate,
|
||||
is_short=trade.is_short
|
||||
))
|
||||
if not self.edge:
|
||||
# TODO: should shorting/leverage be supported by Edge,
|
||||
# then this will need to be fixed.
|
||||
trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True)
|
||||
|
||||
# Updating wallets when order is closed
|
||||
self.wallets.update()
|
||||
Trade.commit()
|
||||
|
||||
if not trade.is_open:
|
||||
self.order_close_notify(trade, order_obj, stoploss_order, send_msg)
|
||||
|
||||
return False
|
||||
|
||||
def order_close_notify(
|
||||
self, trade: Trade, order: Order, stoploss_order: bool, send_msg: bool):
|
||||
"""send "fill" notifications"""
|
||||
|
||||
sub_trade = not isclose(order.safe_amount_after_fee,
|
||||
trade.amount, abs_tol=constants.MATH_CLOSE_PREC)
|
||||
if order.ft_order_side == trade.exit_side:
|
||||
# Exit notification
|
||||
if send_msg and not stoploss_order and not trade.open_order_id:
|
||||
self._notify_exit(trade, '', True)
|
||||
self._notify_exit(trade, '', fill=True, sub_trade=sub_trade, order=order)
|
||||
if not trade.is_open:
|
||||
self.handle_protections(trade.pair, trade.trade_direction)
|
||||
elif send_msg and not trade.open_order_id and not stoploss_order:
|
||||
# Enter fill
|
||||
self._notify_enter(trade, order, fill=True)
|
||||
|
||||
return False
|
||||
self._notify_enter(trade, order, fill=True, sub_trade=sub_trade)
|
||||
|
||||
def handle_protections(self, pair: str, side: LongShort) -> None:
|
||||
prot_trig = self.protections.stop_per_pair(pair, side=side)
|
||||
|
91
freqtrade/optimize/backtesting.py
Executable file → Normal file
91
freqtrade/optimize/backtesting.py
Executable file → Normal file
@ -89,6 +89,9 @@ class Backtesting:
|
||||
self.dataprovider = DataProvider(self.config, self.exchange)
|
||||
|
||||
if self.config.get('strategy_list'):
|
||||
if self.config.get('freqai', {}).get('enabled', False):
|
||||
raise OperationalException(
|
||||
"You can't use strategy_list and freqai at the same time.")
|
||||
for strat in list(self.config['strategy_list']):
|
||||
stratconf = deepcopy(self.config)
|
||||
stratconf['strategy'] = strat
|
||||
@ -207,8 +210,12 @@ class Backtesting:
|
||||
"""
|
||||
self.progress.init_step(BacktestState.DATALOAD, 1)
|
||||
|
||||
if self.config.get('freqai') is not None:
|
||||
self.required_startup += int(self.config.get('freqai', {}).get('startup_candles', 1000))
|
||||
if self.config.get('freqai', {}).get('enabled', False):
|
||||
startup_candles = int(self.config.get('freqai', {}).get('startup_candles', 0))
|
||||
if not startup_candles:
|
||||
raise OperationalException('FreqAI backtesting module requires user set '
|
||||
'startup_candles in config.')
|
||||
self.required_startup += int(self.config.get('freqai', {}).get('startup_candles', 0))
|
||||
logger.info(f'Increasing startup_candle_count for freqai to {self.required_startup}')
|
||||
self.config['startup_candle_count'] = self.required_startup
|
||||
|
||||
@ -386,7 +393,8 @@ class Backtesting:
|
||||
Get close rate for backtesting result
|
||||
"""
|
||||
# Special handling if high or low hit STOP_LOSS or ROI
|
||||
if exit.exit_type in (ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS):
|
||||
if exit.exit_type in (
|
||||
ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS, ExitType.LIQUIDATION):
|
||||
return self._get_close_rate_for_stoploss(row, trade, exit, trade_dur)
|
||||
elif exit.exit_type == (ExitType.ROI):
|
||||
return self._get_close_rate_for_roi(row, trade, exit, trade_dur)
|
||||
@ -401,11 +409,16 @@ class Backtesting:
|
||||
is_short = trade.is_short or False
|
||||
leverage = trade.leverage or 1.0
|
||||
side_1 = -1 if is_short else 1
|
||||
if exit.exit_type == ExitType.LIQUIDATION and trade.liquidation_price:
|
||||
stoploss_value = trade.liquidation_price
|
||||
else:
|
||||
stoploss_value = trade.stop_loss
|
||||
|
||||
if is_short:
|
||||
if trade.stop_loss < row[LOW_IDX]:
|
||||
if stoploss_value < row[LOW_IDX]:
|
||||
return row[OPEN_IDX]
|
||||
else:
|
||||
if trade.stop_loss > row[HIGH_IDX]:
|
||||
if stoploss_value > row[HIGH_IDX]:
|
||||
return row[OPEN_IDX]
|
||||
|
||||
# Special case: trailing triggers within same candle as trade opened. Assume most
|
||||
@ -438,7 +451,7 @@ class Backtesting:
|
||||
return max(row[LOW_IDX], stop_rate)
|
||||
|
||||
# Set close_rate to stoploss
|
||||
return trade.stop_loss
|
||||
return stoploss_value
|
||||
|
||||
def _get_close_rate_for_roi(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple,
|
||||
trade_dur: int) -> float:
|
||||
@ -502,16 +515,20 @@ class Backtesting:
|
||||
|
||||
def _get_adjust_trade_entry_for_candle(self, trade: LocalTrade, row: Tuple
|
||||
) -> LocalTrade:
|
||||
current_profit = trade.calc_profit_ratio(row[OPEN_IDX])
|
||||
min_stake = self.exchange.get_min_pair_stake_amount(trade.pair, row[OPEN_IDX], -0.1)
|
||||
max_stake = self.exchange.get_max_pair_stake_amount(trade.pair, row[OPEN_IDX])
|
||||
current_rate = row[OPEN_IDX]
|
||||
current_date = row[DATE_IDX].to_pydatetime()
|
||||
current_profit = trade.calc_profit_ratio(current_rate)
|
||||
min_stake = self.exchange.get_min_pair_stake_amount(trade.pair, current_rate, -0.1)
|
||||
max_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_rate)
|
||||
stake_available = self.wallets.get_available_stake_amount()
|
||||
stake_amount = strategy_safe_wrapper(self.strategy.adjust_trade_position,
|
||||
default_retval=None)(
|
||||
trade=trade, # type: ignore[arg-type]
|
||||
current_time=row[DATE_IDX].to_pydatetime(), current_rate=row[OPEN_IDX],
|
||||
current_time=current_date, current_rate=current_rate,
|
||||
current_profit=current_profit, min_stake=min_stake,
|
||||
max_stake=min(max_stake, stake_available))
|
||||
max_stake=min(max_stake, stake_available),
|
||||
current_entry_rate=current_rate, current_exit_rate=current_rate,
|
||||
current_entry_profit=current_profit, current_exit_profit=current_profit)
|
||||
|
||||
# Check if we should increase our position
|
||||
if stake_amount is not None and stake_amount > 0.0:
|
||||
@ -522,6 +539,24 @@ class Backtesting:
|
||||
self.wallets.update()
|
||||
return pos_trade
|
||||
|
||||
if stake_amount is not None and stake_amount < 0.0:
|
||||
amount = abs(stake_amount) / current_rate
|
||||
if amount > trade.amount:
|
||||
# This is currently ineffective as remaining would become < min tradable
|
||||
amount = trade.amount
|
||||
remaining = (trade.amount - amount) * current_rate
|
||||
if remaining < min_stake:
|
||||
# Remaining stake is too low to be sold.
|
||||
return trade
|
||||
pos_trade = self._exit_trade(trade, row, current_rate, amount)
|
||||
if pos_trade is not None:
|
||||
order = pos_trade.orders[-1]
|
||||
if self._get_order_filled(order.price, row):
|
||||
order.close_bt_order(current_date, trade)
|
||||
trade.recalc_trade_from_orders()
|
||||
self.wallets.update()
|
||||
return pos_trade
|
||||
|
||||
return trade
|
||||
|
||||
def _get_order_filled(self, rate: float, row: Tuple) -> bool:
|
||||
@ -597,21 +632,30 @@ class Backtesting:
|
||||
# Confirm trade exit:
|
||||
time_in_force = self.strategy.order_time_in_force['exit']
|
||||
|
||||
if not strategy_safe_wrapper(self.strategy.confirm_trade_exit, default_retval=True)(
|
||||
if (exit_.exit_type != ExitType.LIQUIDATION and not strategy_safe_wrapper(
|
||||
self.strategy.confirm_trade_exit, default_retval=True)(
|
||||
pair=trade.pair,
|
||||
trade=trade, # type: ignore[arg-type]
|
||||
order_type='limit',
|
||||
order_type=order_type,
|
||||
amount=trade.amount,
|
||||
rate=close_rate,
|
||||
time_in_force=time_in_force,
|
||||
sell_reason=exit_reason, # deprecated
|
||||
exit_reason=exit_reason,
|
||||
current_time=exit_candle_time):
|
||||
current_time=exit_candle_time)):
|
||||
return None
|
||||
|
||||
trade.exit_reason = exit_reason
|
||||
|
||||
return self._exit_trade(trade, row, close_rate, trade.amount)
|
||||
return None
|
||||
|
||||
def _exit_trade(self, trade: LocalTrade, sell_row: Tuple,
|
||||
close_rate: float, amount: float = None) -> Optional[LocalTrade]:
|
||||
self.order_id_counter += 1
|
||||
exit_candle_time = sell_row[DATE_IDX].to_pydatetime()
|
||||
order_type = self.strategy.order_types['exit']
|
||||
amount = amount or trade.amount
|
||||
order = Order(
|
||||
id=self.order_id_counter,
|
||||
ft_trade_id=trade.id,
|
||||
@ -627,16 +671,14 @@ class Backtesting:
|
||||
status="open",
|
||||
price=close_rate,
|
||||
average=close_rate,
|
||||
amount=trade.amount,
|
||||
amount=amount,
|
||||
filled=0,
|
||||
remaining=trade.amount,
|
||||
cost=trade.amount * close_rate,
|
||||
remaining=amount,
|
||||
cost=amount * close_rate,
|
||||
)
|
||||
trade.orders.append(order)
|
||||
return trade
|
||||
|
||||
return None
|
||||
|
||||
def _get_exit_trade_entry(self, trade: LocalTrade, row: Tuple) -> Optional[LocalTrade]:
|
||||
exit_candle_time: datetime = row[DATE_IDX].to_pydatetime()
|
||||
|
||||
@ -812,7 +854,7 @@ class Backtesting:
|
||||
|
||||
trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True)
|
||||
|
||||
trade.set_isolated_liq(self.exchange.get_liquidation_price(
|
||||
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
||||
pair=pair,
|
||||
open_rate=propose_rate,
|
||||
amount=amount,
|
||||
@ -863,6 +905,8 @@ class Backtesting:
|
||||
# Ignore trade if entry-order did not fill yet
|
||||
continue
|
||||
exit_row = data[pair][-1]
|
||||
self._exit_trade(trade, exit_row, exit_row[OPEN_IDX], trade.amount)
|
||||
trade.orders[-1].close_bt_order(exit_row[DATE_IDX].to_pydatetime(), trade)
|
||||
|
||||
trade.close_date = exit_row[DATE_IDX].to_pydatetime()
|
||||
trade.exit_reason = ExitType.FORCE_EXIT.value
|
||||
@ -1004,7 +1048,7 @@ class Backtesting:
|
||||
return None
|
||||
return row
|
||||
|
||||
def backtest(self, processed: Dict,
|
||||
def backtest(self, processed: Dict, # noqa: max-complexity: 13
|
||||
start_date: datetime, end_date: datetime,
|
||||
max_open_trades: int = 0, position_stacking: bool = False,
|
||||
enable_protections: bool = False) -> Dict[str, Any]:
|
||||
@ -1106,6 +1150,11 @@ class Backtesting:
|
||||
if order and self._get_order_filled(order.price, row):
|
||||
order.close_bt_order(current_time, trade)
|
||||
trade.open_order_id = None
|
||||
sub_trade = order.safe_amount_after_fee != trade.amount
|
||||
if sub_trade:
|
||||
order.close_bt_order(current_time, trade)
|
||||
trade.recalc_trade_from_orders()
|
||||
else:
|
||||
trade.close_date = current_time
|
||||
trade.close(order.price, show_msg=False)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# flake8: noqa: F401
|
||||
|
||||
from freqtrade.persistence.models import cleanup_db, init_db
|
||||
from freqtrade.persistence.models import init_db
|
||||
from freqtrade.persistence.pairlock_middleware import PairLocks
|
||||
from freqtrade.persistence.trade_model import LocalTrade, Order, Trade
|
||||
|
@ -95,6 +95,7 @@ def migrate_trades_and_orders_table(
|
||||
exit_reason = get_column_def(cols, 'sell_reason', get_column_def(cols, 'exit_reason', 'null'))
|
||||
strategy = get_column_def(cols, 'strategy', 'null')
|
||||
enter_tag = get_column_def(cols, 'buy_tag', get_column_def(cols, 'enter_tag', 'null'))
|
||||
realized_profit = get_column_def(cols, 'realized_profit', '0.0')
|
||||
|
||||
trading_mode = get_column_def(cols, 'trading_mode', 'null')
|
||||
|
||||
@ -155,7 +156,7 @@ def migrate_trades_and_orders_table(
|
||||
max_rate, min_rate, exit_reason, exit_order_status, strategy, enter_tag,
|
||||
timeframe, open_trade_value, close_profit_abs,
|
||||
trading_mode, leverage, liquidation_price, is_short,
|
||||
interest_rate, funding_fees
|
||||
interest_rate, funding_fees, realized_profit
|
||||
)
|
||||
select id, lower(exchange), pair, {base_currency} base_currency,
|
||||
{stake_currency} stake_currency,
|
||||
@ -181,7 +182,7 @@ def migrate_trades_and_orders_table(
|
||||
{open_trade_value} open_trade_value, {close_profit_abs} close_profit_abs,
|
||||
{trading_mode} trading_mode, {leverage} leverage, {liquidation_price} liquidation_price,
|
||||
{is_short} is_short, {interest_rate} interest_rate,
|
||||
{funding_fees} funding_fees
|
||||
{funding_fees} funding_fees, {realized_profit} realized_profit
|
||||
from {trade_back_name}
|
||||
"""))
|
||||
|
||||
@ -297,8 +298,9 @@ def check_migrate(engine, decl_base, previous_tables) -> None:
|
||||
|
||||
# Check if migration necessary
|
||||
# Migrates both trades and orders table!
|
||||
if not has_column(cols_orders, 'stop_price'):
|
||||
# if not has_column(cols_trades, 'base_currency'):
|
||||
# if ('orders' not in previous_tables
|
||||
# or not has_column(cols_orders, 'stop_price')):
|
||||
if not has_column(cols_trades, 'realized_profit'):
|
||||
logger.info(f"Running database migration for trades - "
|
||||
f"backup: {table_back_name}, {order_table_bak_name}")
|
||||
migrate_trades_and_orders_table(
|
||||
|
@ -61,11 +61,3 @@ def init_db(db_url: str) -> None:
|
||||
previous_tables = inspect(engine).get_table_names()
|
||||
_DECL_BASE.metadata.create_all(engine)
|
||||
check_migrate(engine, decl_base=_DECL_BASE, previous_tables=previous_tables)
|
||||
|
||||
|
||||
def cleanup_db() -> None:
|
||||
"""
|
||||
Flushes all pending operations to disk.
|
||||
:return: None
|
||||
"""
|
||||
Trade.commit()
|
||||
|
@ -4,13 +4,15 @@ This module contains the class to persist trades into SQLite
|
||||
import logging
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from decimal import Decimal
|
||||
from math import isclose
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from sqlalchemy import (Boolean, Column, DateTime, Enum, Float, ForeignKey, Integer, String,
|
||||
UniqueConstraint, desc, func)
|
||||
from sqlalchemy.orm import Query, lazyload, relationship
|
||||
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, NON_OPEN_EXCHANGE_STATES, BuySell, LongShort
|
||||
from freqtrade.constants import (DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC, NON_OPEN_EXCHANGE_STATES,
|
||||
BuySell, LongShort)
|
||||
from freqtrade.enums import ExitType, TradingMode
|
||||
from freqtrade.exceptions import DependencyException, OperationalException
|
||||
from freqtrade.leverage import interest
|
||||
@ -176,10 +178,9 @@ class Order(_DECL_BASE):
|
||||
self.remaining = 0
|
||||
self.status = 'closed'
|
||||
self.ft_is_open = False
|
||||
if (self.ft_order_side == trade.entry_side
|
||||
and len(trade.select_filled_orders(trade.entry_side)) == 1):
|
||||
if (self.ft_order_side == trade.entry_side):
|
||||
trade.open_rate = self.price
|
||||
trade.recalc_open_trade_value()
|
||||
trade.recalc_trade_from_orders()
|
||||
trade.adjust_stop_loss(trade.open_rate, trade.stop_loss_pct, refresh=True)
|
||||
|
||||
@staticmethod
|
||||
@ -195,7 +196,7 @@ class Order(_DECL_BASE):
|
||||
if filtered_orders:
|
||||
oobj = filtered_orders[0]
|
||||
oobj.update_from_ccxt_object(order)
|
||||
Order.query.session.commit()
|
||||
Trade.commit()
|
||||
else:
|
||||
logger.warning(f"Did not find order for {order}.")
|
||||
|
||||
@ -237,6 +238,7 @@ class LocalTrade():
|
||||
trades: List['LocalTrade'] = []
|
||||
trades_open: List['LocalTrade'] = []
|
||||
total_profit: float = 0
|
||||
realized_profit: float = 0
|
||||
|
||||
id: int = 0
|
||||
|
||||
@ -302,6 +304,16 @@ class LocalTrade():
|
||||
# Futures properties
|
||||
funding_fees: Optional[float] = None
|
||||
|
||||
@property
|
||||
def stoploss_or_liquidation(self) -> float:
|
||||
if self.liquidation_price:
|
||||
if self.is_short:
|
||||
return min(self.stop_loss, self.liquidation_price)
|
||||
else:
|
||||
return max(self.stop_loss, self.liquidation_price)
|
||||
|
||||
return self.stop_loss
|
||||
|
||||
@property
|
||||
def buy_tag(self) -> Optional[str]:
|
||||
"""
|
||||
@ -437,6 +449,7 @@ class LocalTrade():
|
||||
if self.close_date else None),
|
||||
'close_timestamp': int(self.close_date.replace(
|
||||
tzinfo=timezone.utc).timestamp() * 1000) if self.close_date else None,
|
||||
'realized_profit': self.realized_profit or 0.0,
|
||||
'close_rate': self.close_rate,
|
||||
'close_rate_requested': self.close_rate_requested,
|
||||
'close_profit': self.close_profit, # Deprecated
|
||||
@ -497,7 +510,7 @@ class LocalTrade():
|
||||
self.max_rate = max(current_price, self.max_rate or self.open_rate)
|
||||
self.min_rate = min(current_price_low, self.min_rate or self.open_rate)
|
||||
|
||||
def set_isolated_liq(self, liquidation_price: Optional[float]):
|
||||
def set_liquidation_price(self, liquidation_price: Optional[float]):
|
||||
"""
|
||||
Method you should use to set self.liquidation price.
|
||||
Assures stop_loss is not passed the liquidation price
|
||||
@ -506,22 +519,13 @@ class LocalTrade():
|
||||
return
|
||||
self.liquidation_price = liquidation_price
|
||||
|
||||
def _set_stop_loss(self, stop_loss: float, percent: float):
|
||||
def __set_stop_loss(self, stop_loss: float, percent: float):
|
||||
"""
|
||||
Method you should use to set self.stop_loss.
|
||||
Assures stop_loss is not passed the liquidation price
|
||||
Method used internally to set self.stop_loss.
|
||||
"""
|
||||
if self.liquidation_price is not None:
|
||||
if self.is_short:
|
||||
sl = min(stop_loss, self.liquidation_price)
|
||||
else:
|
||||
sl = max(stop_loss, self.liquidation_price)
|
||||
else:
|
||||
sl = stop_loss
|
||||
|
||||
if not self.stop_loss:
|
||||
self.initial_stop_loss = sl
|
||||
self.stop_loss = sl
|
||||
self.initial_stop_loss = stop_loss
|
||||
self.stop_loss = stop_loss
|
||||
|
||||
self.stop_loss_pct = -1 * abs(percent)
|
||||
self.stoploss_last_update = datetime.utcnow()
|
||||
@ -543,18 +547,12 @@ class LocalTrade():
|
||||
leverage = self.leverage or 1.0
|
||||
if self.is_short:
|
||||
new_loss = float(current_price * (1 + abs(stoploss / leverage)))
|
||||
# If trading with leverage, don't set the stoploss below the liquidation price
|
||||
if self.liquidation_price:
|
||||
new_loss = min(self.liquidation_price, new_loss)
|
||||
else:
|
||||
new_loss = float(current_price * (1 - abs(stoploss / leverage)))
|
||||
# If trading with leverage, don't set the stoploss below the liquidation price
|
||||
if self.liquidation_price:
|
||||
new_loss = max(self.liquidation_price, new_loss)
|
||||
|
||||
# no stop loss assigned yet
|
||||
if self.initial_stop_loss_pct is None or refresh:
|
||||
self._set_stop_loss(new_loss, stoploss)
|
||||
self.__set_stop_loss(new_loss, stoploss)
|
||||
self.initial_stop_loss = new_loss
|
||||
self.initial_stop_loss_pct = -1 * abs(stoploss)
|
||||
|
||||
@ -569,7 +567,7 @@ class LocalTrade():
|
||||
# ? decreasing the minimum stoploss
|
||||
if (higher_stop and not self.is_short) or (lower_stop and self.is_short):
|
||||
logger.debug(f"{self.pair} - Adjusting stoploss...")
|
||||
self._set_stop_loss(new_loss, stoploss)
|
||||
self.__set_stop_loss(new_loss, stoploss)
|
||||
else:
|
||||
logger.debug(f"{self.pair} - Keeping current stoploss...")
|
||||
|
||||
@ -601,14 +599,28 @@ class LocalTrade():
|
||||
if self.is_open:
|
||||
payment = "SELL" if self.is_short else "BUY"
|
||||
logger.info(f'{order.order_type.upper()}_{payment} has been fulfilled for {self}.')
|
||||
# condition to avoid reset value when updating fees
|
||||
if self.open_order_id == order.order_id:
|
||||
self.open_order_id = None
|
||||
else:
|
||||
logger.warning(
|
||||
f'Got different open_order_id {self.open_order_id} != {order.order_id}')
|
||||
self.recalc_trade_from_orders()
|
||||
elif order.ft_order_side == self.exit_side:
|
||||
if self.is_open:
|
||||
payment = "BUY" if self.is_short else "SELL"
|
||||
# * On margin shorts, you buy a little bit more than the amount (amount + interest)
|
||||
logger.info(f'{order.order_type.upper()}_{payment} has been fulfilled for {self}.')
|
||||
# condition to avoid reset value when updating fees
|
||||
if self.open_order_id == order.order_id:
|
||||
self.open_order_id = None
|
||||
else:
|
||||
logger.warning(
|
||||
f'Got different open_order_id {self.open_order_id} != {order.order_id}')
|
||||
if isclose(order.safe_amount_after_fee, self.amount, abs_tol=MATH_CLOSE_PREC):
|
||||
self.close(order.safe_price)
|
||||
else:
|
||||
self.recalc_trade_from_orders()
|
||||
elif order.ft_order_side == 'stoploss':
|
||||
self.stoploss_order_id = None
|
||||
self.close_rate_requested = self.stop_loss
|
||||
@ -627,11 +639,11 @@ class LocalTrade():
|
||||
"""
|
||||
self.close_rate = rate
|
||||
self.close_date = self.close_date or datetime.utcnow()
|
||||
self.close_profit = self.calc_profit_ratio(rate)
|
||||
self.close_profit_abs = self.calc_profit(rate)
|
||||
self.close_profit_abs = self.calc_profit(rate) + self.realized_profit
|
||||
self.is_open = False
|
||||
self.exit_order_status = 'closed'
|
||||
self.open_order_id = None
|
||||
self.recalc_trade_from_orders(is_closing=True)
|
||||
if show_msg:
|
||||
logger.info(
|
||||
'Marking %s as closed as the trade is fulfilled and found no open orders for it.',
|
||||
@ -677,12 +689,12 @@ class LocalTrade():
|
||||
"""
|
||||
return len([o for o in self.orders if o.ft_order_side == self.exit_side])
|
||||
|
||||
def _calc_open_trade_value(self) -> float:
|
||||
def _calc_open_trade_value(self, amount: float, open_rate: float) -> float:
|
||||
"""
|
||||
Calculate the open_rate including open_fee.
|
||||
:return: Price in of the open trade incl. Fees
|
||||
"""
|
||||
open_trade = Decimal(self.amount) * Decimal(self.open_rate)
|
||||
open_trade = Decimal(amount) * Decimal(open_rate)
|
||||
fees = open_trade * Decimal(self.fee_open)
|
||||
if self.is_short:
|
||||
return float(open_trade - fees)
|
||||
@ -694,7 +706,7 @@ class LocalTrade():
|
||||
Recalculate open_trade_value.
|
||||
Must be called whenever open_rate, fee_open is changed.
|
||||
"""
|
||||
self.open_trade_value = self._calc_open_trade_value()
|
||||
self.open_trade_value = self._calc_open_trade_value(self.amount, self.open_rate)
|
||||
|
||||
def calculate_interest(self) -> Decimal:
|
||||
"""
|
||||
@ -726,7 +738,7 @@ class LocalTrade():
|
||||
else:
|
||||
return close_trade - fees
|
||||
|
||||
def calc_close_trade_value(self, rate: float) -> float:
|
||||
def calc_close_trade_value(self, rate: float, amount: float = None) -> float:
|
||||
"""
|
||||
Calculate the Trade's close value including fees
|
||||
:param rate: rate to compare with.
|
||||
@ -735,96 +747,143 @@ class LocalTrade():
|
||||
if rate is None and not self.close_rate:
|
||||
return 0.0
|
||||
|
||||
amount = Decimal(self.amount)
|
||||
amount1 = Decimal(amount or self.amount)
|
||||
trading_mode = self.trading_mode or TradingMode.SPOT
|
||||
|
||||
if trading_mode == TradingMode.SPOT:
|
||||
return float(self._calc_base_close(amount, rate, self.fee_close))
|
||||
return float(self._calc_base_close(amount1, rate, self.fee_close))
|
||||
|
||||
elif (trading_mode == TradingMode.MARGIN):
|
||||
|
||||
total_interest = self.calculate_interest()
|
||||
|
||||
if self.is_short:
|
||||
amount = amount + total_interest
|
||||
return float(self._calc_base_close(amount, rate, self.fee_close))
|
||||
amount1 = amount1 + total_interest
|
||||
return float(self._calc_base_close(amount1, rate, self.fee_close))
|
||||
else:
|
||||
# Currency already owned for longs, no need to purchase
|
||||
return float(self._calc_base_close(amount, rate, self.fee_close) - total_interest)
|
||||
return float(self._calc_base_close(amount1, rate, self.fee_close) - total_interest)
|
||||
|
||||
elif (trading_mode == TradingMode.FUTURES):
|
||||
funding_fees = self.funding_fees or 0.0
|
||||
# Positive funding_fees -> Trade has gained from fees.
|
||||
# Negative funding_fees -> Trade had to pay the fees.
|
||||
if self.is_short:
|
||||
return float(self._calc_base_close(amount, rate, self.fee_close)) - funding_fees
|
||||
return float(self._calc_base_close(amount1, rate, self.fee_close)) - funding_fees
|
||||
else:
|
||||
return float(self._calc_base_close(amount, rate, self.fee_close)) + funding_fees
|
||||
return float(self._calc_base_close(amount1, rate, self.fee_close)) + funding_fees
|
||||
else:
|
||||
raise OperationalException(
|
||||
f"{self.trading_mode.value} trading is not yet available using freqtrade")
|
||||
|
||||
def calc_profit(self, rate: float) -> float:
|
||||
def calc_profit(self, rate: float, amount: float = None, open_rate: float = None) -> float:
|
||||
"""
|
||||
Calculate the absolute profit in stake currency between Close and Open trade
|
||||
:param rate: close rate to compare with.
|
||||
:param amount: Amount to use for the calculation. Falls back to trade.amount if not set.
|
||||
:param open_rate: open_rate to use. Defaults to self.open_rate if not provided.
|
||||
:return: profit in stake currency as float
|
||||
"""
|
||||
close_trade_value = self.calc_close_trade_value(rate)
|
||||
close_trade_value = self.calc_close_trade_value(rate, amount)
|
||||
if amount is None or open_rate is None:
|
||||
open_trade_value = self.open_trade_value
|
||||
else:
|
||||
open_trade_value = self._calc_open_trade_value(amount, open_rate)
|
||||
|
||||
if self.is_short:
|
||||
profit = self.open_trade_value - close_trade_value
|
||||
profit = open_trade_value - close_trade_value
|
||||
else:
|
||||
profit = close_trade_value - self.open_trade_value
|
||||
profit = close_trade_value - open_trade_value
|
||||
return float(f"{profit:.8f}")
|
||||
|
||||
def calc_profit_ratio(self, rate: float) -> float:
|
||||
def calc_profit_ratio(
|
||||
self, rate: float, amount: float = None, open_rate: float = None) -> float:
|
||||
"""
|
||||
Calculates the profit as ratio (including fee).
|
||||
:param rate: rate to compare with.
|
||||
:param amount: Amount to use for the calculation. Falls back to trade.amount if not set.
|
||||
:param open_rate: open_rate to use. Defaults to self.open_rate if not provided.
|
||||
:return: profit ratio as float
|
||||
"""
|
||||
close_trade_value = self.calc_close_trade_value(rate)
|
||||
close_trade_value = self.calc_close_trade_value(rate, amount)
|
||||
|
||||
if amount is None or open_rate is None:
|
||||
open_trade_value = self.open_trade_value
|
||||
else:
|
||||
open_trade_value = self._calc_open_trade_value(amount, open_rate)
|
||||
|
||||
short_close_zero = (self.is_short and close_trade_value == 0.0)
|
||||
long_close_zero = (not self.is_short and self.open_trade_value == 0.0)
|
||||
long_close_zero = (not self.is_short and open_trade_value == 0.0)
|
||||
leverage = self.leverage or 1.0
|
||||
|
||||
if (short_close_zero or long_close_zero):
|
||||
return 0.0
|
||||
else:
|
||||
if self.is_short:
|
||||
profit_ratio = (1 - (close_trade_value / self.open_trade_value)) * leverage
|
||||
profit_ratio = (1 - (close_trade_value / open_trade_value)) * leverage
|
||||
else:
|
||||
profit_ratio = ((close_trade_value / self.open_trade_value) - 1) * leverage
|
||||
profit_ratio = ((close_trade_value / open_trade_value) - 1) * leverage
|
||||
|
||||
return float(f"{profit_ratio:.8f}")
|
||||
|
||||
def recalc_trade_from_orders(self):
|
||||
def recalc_trade_from_orders(self, is_closing: bool = False):
|
||||
|
||||
current_amount = 0.0
|
||||
current_stake = 0.0
|
||||
total_stake = 0.0 # Total stake after all buy orders (does not subtract!)
|
||||
avg_price = 0.0
|
||||
close_profit = 0.0
|
||||
close_profit_abs = 0.0
|
||||
|
||||
total_amount = 0.0
|
||||
total_stake = 0.0
|
||||
for o in self.orders:
|
||||
if (o.ft_is_open or
|
||||
(o.ft_order_side != self.entry_side) or
|
||||
(o.status not in NON_OPEN_EXCHANGE_STATES)):
|
||||
if o.ft_is_open or not o.filled:
|
||||
continue
|
||||
|
||||
tmp_amount = o.safe_amount_after_fee
|
||||
tmp_price = o.average or o.price
|
||||
if tmp_amount > 0.0 and tmp_price is not None:
|
||||
total_amount += tmp_amount
|
||||
total_stake += tmp_price * tmp_amount
|
||||
tmp_price = o.safe_price
|
||||
|
||||
if total_amount > 0:
|
||||
is_exit = o.ft_order_side != self.entry_side
|
||||
side = -1 if is_exit else 1
|
||||
if tmp_amount > 0.0 and tmp_price is not None:
|
||||
current_amount += tmp_amount * side
|
||||
price = avg_price if is_exit else tmp_price
|
||||
current_stake += price * tmp_amount * side
|
||||
|
||||
if current_amount > 0:
|
||||
avg_price = current_stake / current_amount
|
||||
|
||||
if is_exit:
|
||||
# Process partial exits
|
||||
exit_rate = o.safe_price
|
||||
exit_amount = o.safe_amount_after_fee
|
||||
profit = self.calc_profit(rate=exit_rate, amount=exit_amount, open_rate=avg_price)
|
||||
close_profit_abs += profit
|
||||
close_profit = self.calc_profit_ratio(
|
||||
exit_rate, amount=exit_amount, open_rate=avg_price)
|
||||
if current_amount <= 0:
|
||||
profit = close_profit_abs
|
||||
else:
|
||||
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
||||
|
||||
if close_profit:
|
||||
self.close_profit = close_profit
|
||||
self.realized_profit = close_profit_abs
|
||||
self.close_profit_abs = profit
|
||||
|
||||
if current_amount > 0:
|
||||
# Trade is still open
|
||||
# Leverage not updated, as we don't allow changing leverage through DCA at the moment.
|
||||
self.open_rate = total_stake / total_amount
|
||||
self.stake_amount = total_stake / (self.leverage or 1.0)
|
||||
self.amount = total_amount
|
||||
self.fee_open_cost = self.fee_open * total_stake
|
||||
self.open_rate = current_stake / current_amount
|
||||
self.stake_amount = current_stake / (self.leverage or 1.0)
|
||||
self.amount = current_amount
|
||||
self.fee_open_cost = self.fee_open * current_stake
|
||||
self.recalc_open_trade_value()
|
||||
if self.stop_loss_pct is not None and self.open_rate is not None:
|
||||
self.adjust_stop_loss(self.open_rate, self.stop_loss_pct)
|
||||
elif is_closing and total_stake > 0:
|
||||
# Close profit abs / maximum owned
|
||||
# Fees are considered as they are part of close_profit_abs
|
||||
self.close_profit = (close_profit_abs / total_stake) * self.leverage
|
||||
|
||||
def select_order_by_order_id(self, order_id: str) -> Optional[Order]:
|
||||
"""
|
||||
@ -846,7 +905,7 @@ class LocalTrade():
|
||||
"""
|
||||
orders = self.orders
|
||||
if order_side:
|
||||
orders = [o for o in self.orders if o.ft_order_side == order_side]
|
||||
orders = [o for o in orders if o.ft_order_side == order_side]
|
||||
if is_open is not None:
|
||||
orders = [o for o in orders if o.ft_is_open == is_open]
|
||||
if len(orders) > 0:
|
||||
@ -861,9 +920,9 @@ class LocalTrade():
|
||||
:return: array of Order objects
|
||||
"""
|
||||
return [o for o in self.orders if ((o.ft_order_side == order_side) or (order_side is None))
|
||||
and o.ft_is_open is False and
|
||||
(o.filled or 0) > 0 and
|
||||
o.status in NON_OPEN_EXCHANGE_STATES]
|
||||
and o.ft_is_open is False
|
||||
and o.filled
|
||||
and o.status in NON_OPEN_EXCHANGE_STATES]
|
||||
|
||||
def select_filled_or_open_orders(self) -> List['Order']:
|
||||
"""
|
||||
@ -1028,6 +1087,7 @@ class Trade(_DECL_BASE, LocalTrade):
|
||||
open_trade_value = Column(Float)
|
||||
close_rate: Optional[float] = Column(Float)
|
||||
close_rate_requested = Column(Float)
|
||||
realized_profit = Column(Float, default=0.0)
|
||||
close_profit = Column(Float)
|
||||
close_profit_abs = Column(Float)
|
||||
stake_amount = Column(Float, nullable=False)
|
||||
@ -1073,6 +1133,7 @@ class Trade(_DECL_BASE, LocalTrade):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.realized_profit = 0
|
||||
self.recalc_open_trade_value()
|
||||
|
||||
def delete(self) -> None:
|
||||
@ -1087,6 +1148,10 @@ class Trade(_DECL_BASE, LocalTrade):
|
||||
def commit():
|
||||
Trade.query.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def rollback():
|
||||
Trade.query.session.rollback()
|
||||
|
||||
@staticmethod
|
||||
def get_trades_proxy(*, pair: str = None, is_open: bool = None,
|
||||
open_date: datetime = None, close_date: datetime = None,
|
||||
|
@ -8,11 +8,11 @@ from typing import Any, Dict, List, Optional
|
||||
import arrow
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import PeriodicCache
|
||||
from freqtrade.constants import ListPairsWithTimeframes
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.misc import plural
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||
from freqtrade.util import PeriodicCache
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -43,12 +43,10 @@ def expand_pairlist(wildcardpl: List[str], available_pairs: List[str],
|
||||
|
||||
|
||||
def dynamic_expand_pairlist(config: dict, markets: list) -> List[str]:
|
||||
if config.get('freqai', {}):
|
||||
corr_pairlist = config['freqai']['feature_parameters']['include_corr_pairlist']
|
||||
full_pairs = config['pairs'] + [pair for pair in corr_pairlist
|
||||
if pair not in config['pairs']]
|
||||
expanded_pairs = expand_pairlist(full_pairs, markets)
|
||||
else:
|
||||
expanded_pairs = expand_pairlist(config['pairs'], markets)
|
||||
if config.get('freqai', {}).get('enabled', False):
|
||||
corr_pairlist = config['freqai']['feature_parameters']['include_corr_pairlist']
|
||||
expanded_pairs += [pair for pair in corr_pairlist
|
||||
if pair not in config['pairs']]
|
||||
|
||||
return expanded_pairs
|
||||
|
@ -49,7 +49,7 @@ class StoplossGuard(IProtection):
|
||||
trades1 = Trade.get_trades_proxy(pair=pair, is_open=False, close_date=look_back_until)
|
||||
trades = [trade for trade in trades1 if (str(trade.exit_reason) in (
|
||||
ExitType.TRAILING_STOP_LOSS.value, ExitType.STOP_LOSS.value,
|
||||
ExitType.STOPLOSS_ON_EXCHANGE.value)
|
||||
ExitType.STOPLOSS_ON_EXCHANGE.value, ExitType.LIQUIDATION.value)
|
||||
and trade.close_profit and trade.close_profit < self._profit_limit)]
|
||||
|
||||
if self._only_per_side:
|
||||
|
@ -194,11 +194,11 @@ class OrderSchema(BaseModel):
|
||||
pair: str
|
||||
order_id: str
|
||||
status: str
|
||||
remaining: float
|
||||
remaining: Optional[float]
|
||||
amount: float
|
||||
safe_price: float
|
||||
cost: float
|
||||
filled: float
|
||||
filled: Optional[float]
|
||||
ft_order_side: str
|
||||
order_type: str
|
||||
is_open: bool
|
||||
@ -325,11 +325,13 @@ class ForceEnterPayload(BaseModel):
|
||||
ordertype: Optional[OrderTypeValues]
|
||||
stakeamount: Optional[float]
|
||||
entry_tag: Optional[str]
|
||||
leverage: Optional[float]
|
||||
|
||||
|
||||
class ForceExitPayload(BaseModel):
|
||||
tradeid: str
|
||||
ordertype: Optional[OrderTypeValues]
|
||||
amount: Optional[float]
|
||||
|
||||
|
||||
class BlacklistPayload(BaseModel):
|
||||
|
@ -37,7 +37,8 @@ logger = logging.getLogger(__name__)
|
||||
# 2.14: Add entry/exit orders to trade response
|
||||
# 2.15: Add backtest history endpoints
|
||||
# 2.16: Additional daily metrics
|
||||
API_VERSION = 2.16
|
||||
# 2.17: Forceentry - leverage, partial force_exit
|
||||
API_VERSION = 2.17
|
||||
|
||||
# Public API, requires no auth.
|
||||
router_public = APIRouter()
|
||||
@ -142,12 +143,11 @@ def show_config(rpc: Optional[RPC] = Depends(get_rpc_optional), config=Depends(g
|
||||
@router.post('/forcebuy', response_model=ForceEnterResponse, tags=['trading'])
|
||||
def force_entry(payload: ForceEnterPayload, rpc: RPC = Depends(get_rpc)):
|
||||
ordertype = payload.ordertype.value if payload.ordertype else None
|
||||
stake_amount = payload.stakeamount if payload.stakeamount else None
|
||||
entry_tag = payload.entry_tag if payload.entry_tag else 'force_entry'
|
||||
|
||||
trade = rpc._rpc_force_entry(payload.pair, payload.price, order_side=payload.side,
|
||||
order_type=ordertype, stake_amount=stake_amount,
|
||||
enter_tag=entry_tag)
|
||||
order_type=ordertype, stake_amount=payload.stakeamount,
|
||||
enter_tag=payload.entry_tag or 'force_entry',
|
||||
leverage=payload.leverage)
|
||||
|
||||
if trade:
|
||||
return ForceEnterResponse.parse_obj(trade.to_json())
|
||||
@ -161,7 +161,7 @@ def force_entry(payload: ForceEnterPayload, rpc: RPC = Depends(get_rpc)):
|
||||
@router.post('/forcesell', response_model=ResultMsg, tags=['trading'])
|
||||
def forceexit(payload: ForceExitPayload, rpc: RPC = Depends(get_rpc)):
|
||||
ordertype = payload.ordertype.value if payload.ordertype else None
|
||||
return rpc._rpc_force_exit(payload.tradeid, ordertype)
|
||||
return rpc._rpc_force_exit(payload.tradeid, ordertype, amount=payload.amount)
|
||||
|
||||
|
||||
@router.get('/blacklist', response_model=BlacklistResponse, tags=['info', 'pairlist'])
|
||||
|
@ -18,9 +18,9 @@ def get_rpc_optional() -> Optional[RPC]:
|
||||
def get_rpc() -> Optional[Iterator[RPC]]:
|
||||
_rpc = get_rpc_optional()
|
||||
if _rpc:
|
||||
Trade.query.session.rollback()
|
||||
Trade.rollback()
|
||||
yield _rpc
|
||||
Trade.query.session.rollback()
|
||||
Trade.rollback()
|
||||
else:
|
||||
raise RPCException('Bot is not in the correct state')
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi.exceptions import HTTPException
|
||||
@ -50,8 +51,12 @@ async def index_html(rest_of_path: str):
|
||||
filename = uibase / rest_of_path
|
||||
# It's security relevant to check "relative_to".
|
||||
# Without this, Directory-traversal is possible.
|
||||
media_type: Optional[str] = None
|
||||
if filename.suffix == '.js':
|
||||
# Force text/javascript for .js files - Circumvent faulty system configuration
|
||||
media_type = 'application/javascript'
|
||||
if filename.is_file() and is_relative_to(filename, uibase):
|
||||
return FileResponse(str(filename))
|
||||
return FileResponse(str(filename), media_type=media_type)
|
||||
|
||||
index_file = uibase / 'index.html'
|
||||
if not index_file.is_file():
|
||||
|
@ -12,6 +12,7 @@ from pycoingecko import CoinGeckoAPI
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from freqtrade.constants import SUPPORTED_FIAT
|
||||
from freqtrade.mixins.logging_mixin import LoggingMixin
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -27,7 +28,7 @@ coingecko_mapping = {
|
||||
}
|
||||
|
||||
|
||||
class CryptoToFiatConverter:
|
||||
class CryptoToFiatConverter(LoggingMixin):
|
||||
"""
|
||||
Main class to initiate Crypto to FIAT.
|
||||
This object contains a list of pair Crypto, FIAT
|
||||
@ -54,6 +55,7 @@ class CryptoToFiatConverter:
|
||||
# Timeout: 6h
|
||||
self._pair_price: TTLCache = TTLCache(maxsize=500, ttl=6 * 60 * 60)
|
||||
|
||||
LoggingMixin.__init__(self, logger, 3600)
|
||||
self._load_cryptomap()
|
||||
|
||||
def _load_cryptomap(self) -> None:
|
||||
@ -177,7 +179,9 @@ class CryptoToFiatConverter:
|
||||
|
||||
if not _gekko_id:
|
||||
# return 0 for unsupported stake currencies (fiat-convert should not break the bot)
|
||||
logger.warning("unsupported crypto-symbol %s - returning 0.0", crypto_symbol)
|
||||
self.log_once(
|
||||
f"unsupported crypto-symbol {crypto_symbol.upper()} - returning 0.0",
|
||||
logger.warning)
|
||||
return 0.0
|
||||
|
||||
try:
|
||||
|
@ -179,8 +179,10 @@ class RPC:
|
||||
else:
|
||||
current_rate = trade.close_rate
|
||||
if len(trade.select_filled_orders(trade.entry_side)) > 0:
|
||||
current_profit = trade.calc_profit_ratio(current_rate)
|
||||
current_profit_abs = trade.calc_profit(current_rate)
|
||||
current_profit = trade.calc_profit_ratio(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
current_profit_abs = trade.calc_profit(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
current_profit_fiat: Optional[float] = None
|
||||
# Calculate fiat profit
|
||||
if self._fiat_converter:
|
||||
@ -201,7 +203,7 @@ class RPC:
|
||||
|
||||
trade_dict = trade.to_json()
|
||||
trade_dict.update(dict(
|
||||
close_profit=trade.close_profit if trade.close_profit is not None else None,
|
||||
close_profit=trade.close_profit if not trade.is_open else None,
|
||||
current_rate=current_rate,
|
||||
current_profit=current_profit, # Deprecated
|
||||
current_profit_pct=round(current_profit * 100, 2), # Deprecated
|
||||
@ -239,7 +241,10 @@ class RPC:
|
||||
trade.pair, side='exit', is_short=trade.is_short, refresh=False)
|
||||
except (PricingError, ExchangeError):
|
||||
current_rate = NAN
|
||||
if len(trade.select_filled_orders(trade.entry_side)) > 0:
|
||||
trade_profit = NAN
|
||||
profit_str = f'{NAN:.2%}'
|
||||
else:
|
||||
if trade.nr_of_successful_entries > 0:
|
||||
trade_profit = trade.calc_profit(current_rate)
|
||||
profit_str = f'{trade.calc_profit_ratio(current_rate):.2%}'
|
||||
else:
|
||||
@ -424,21 +429,20 @@ class RPC:
|
||||
for trade in trades:
|
||||
current_rate: float = 0.0
|
||||
|
||||
if not trade.open_rate:
|
||||
continue
|
||||
if trade.close_date:
|
||||
durations.append((trade.close_date - trade.open_date).total_seconds())
|
||||
|
||||
if not trade.is_open:
|
||||
profit_ratio = trade.close_profit
|
||||
profit_closed_coin.append(trade.close_profit_abs)
|
||||
profit_abs = trade.close_profit_abs
|
||||
profit_closed_coin.append(profit_abs)
|
||||
profit_closed_ratio.append(profit_ratio)
|
||||
if trade.close_profit >= 0:
|
||||
winning_trades += 1
|
||||
winning_profit += trade.close_profit_abs
|
||||
winning_profit += profit_abs
|
||||
else:
|
||||
losing_trades += 1
|
||||
losing_profit += trade.close_profit_abs
|
||||
losing_profit += profit_abs
|
||||
else:
|
||||
# Get current rate
|
||||
try:
|
||||
@ -446,11 +450,15 @@ class RPC:
|
||||
trade.pair, side='exit', is_short=trade.is_short, refresh=False)
|
||||
except (PricingError, ExchangeError):
|
||||
current_rate = NAN
|
||||
if isnan(current_rate):
|
||||
profit_ratio = NAN
|
||||
profit_abs = NAN
|
||||
else:
|
||||
profit_ratio = trade.calc_profit_ratio(rate=current_rate)
|
||||
profit_abs = trade.calc_profit(
|
||||
rate=trade.close_rate or current_rate) + trade.realized_profit
|
||||
|
||||
profit_all_coin.append(
|
||||
trade.calc_profit(rate=trade.close_rate or current_rate)
|
||||
)
|
||||
profit_all_coin.append(profit_abs)
|
||||
profit_all_ratio.append(profit_ratio)
|
||||
|
||||
best_pair = Trade.get_best_pair(start_date)
|
||||
@ -659,12 +667,8 @@ class RPC:
|
||||
|
||||
return {'status': 'No more buy will occur from now. Run /reload_config to reset.'}
|
||||
|
||||
def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None) -> Dict[str, str]:
|
||||
"""
|
||||
Handler for forceexit <id>.
|
||||
Sells the given trade at current price
|
||||
"""
|
||||
def _exec_force_exit(trade: Trade) -> None:
|
||||
def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],
|
||||
amount: Optional[float] = None) -> None:
|
||||
# Check if there is there is an open order
|
||||
fully_canceled = False
|
||||
if trade.open_order_id:
|
||||
@ -685,10 +689,26 @@ class RPC:
|
||||
exit_check = ExitCheckTuple(exit_type=ExitType.FORCE_EXIT)
|
||||
order_type = ordertype or self._freqtrade.strategy.order_types.get(
|
||||
"force_exit", self._freqtrade.strategy.order_types["exit"])
|
||||
sub_amount: Optional[float] = None
|
||||
if amount and amount < trade.amount:
|
||||
# Partial exit ...
|
||||
min_exit_stake = self._freqtrade.exchange.get_min_pair_stake_amount(
|
||||
trade.pair, current_rate, trade.stop_loss_pct)
|
||||
remaining = (trade.amount - amount) * current_rate
|
||||
if remaining < min_exit_stake:
|
||||
raise RPCException(f'Remaining amount of {remaining} would be too small.')
|
||||
sub_amount = amount
|
||||
|
||||
self._freqtrade.execute_trade_exit(
|
||||
trade, current_rate, exit_check, ordertype=order_type)
|
||||
# ---- EOF def _exec_forcesell ----
|
||||
trade, current_rate, exit_check, ordertype=order_type,
|
||||
sub_trade_amt=sub_amount)
|
||||
|
||||
def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None, *,
|
||||
amount: Optional[float] = None) -> Dict[str, str]:
|
||||
"""
|
||||
Handler for forceexit <id>.
|
||||
Sells the given trade at current price
|
||||
"""
|
||||
|
||||
if self._freqtrade.state != State.RUNNING:
|
||||
raise RPCException('trader is not running')
|
||||
@ -697,7 +717,7 @@ class RPC:
|
||||
if trade_id == 'all':
|
||||
# Execute sell for all open orders
|
||||
for trade in Trade.get_open_trades():
|
||||
_exec_force_exit(trade)
|
||||
self.__exec_force_exit(trade, ordertype)
|
||||
Trade.commit()
|
||||
self._freqtrade.wallets.update()
|
||||
return {'result': 'Created sell orders for all open trades.'}
|
||||
@ -710,7 +730,7 @@ class RPC:
|
||||
logger.warning('force_exit: Invalid argument received')
|
||||
raise RPCException('invalid argument')
|
||||
|
||||
_exec_force_exit(trade)
|
||||
self.__exec_force_exit(trade, ordertype, amount)
|
||||
Trade.commit()
|
||||
self._freqtrade.wallets.update()
|
||||
return {'result': f'Created sell order for trade {trade_id}.'}
|
||||
@ -719,7 +739,8 @@ class RPC:
|
||||
order_type: Optional[str] = None,
|
||||
order_side: SignalDirection = SignalDirection.LONG,
|
||||
stake_amount: Optional[float] = None,
|
||||
enter_tag: Optional[str] = 'force_entry') -> Optional[Trade]:
|
||||
enter_tag: Optional[str] = 'force_entry',
|
||||
leverage: Optional[float] = None) -> Optional[Trade]:
|
||||
"""
|
||||
Handler for forcebuy <asset> <price>
|
||||
Buys a pair trade at the given or current price
|
||||
@ -761,6 +782,7 @@ class RPC:
|
||||
ordertype=order_type, trade=trade,
|
||||
is_short=is_short,
|
||||
enter_tag=enter_tag,
|
||||
leverage_=leverage,
|
||||
):
|
||||
Trade.commit()
|
||||
trade = Trade.get_trades([Trade.is_open.is_(True), Trade.pair == pair]).first()
|
||||
@ -875,7 +897,7 @@ class RPC:
|
||||
lock.active = False
|
||||
lock.lock_end_time = datetime.now(timezone.utc)
|
||||
|
||||
PairLock.query.session.commit()
|
||||
Trade.commit()
|
||||
|
||||
return self._rpc_locks()
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
This module contains class to manage RPC communications (Telegram, API, ...)
|
||||
"""
|
||||
import logging
|
||||
from collections import deque
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from freqtrade.enums import RPCMessageType
|
||||
@ -77,6 +78,17 @@ class RPCManager:
|
||||
except NotImplementedError:
|
||||
logger.error(f"Message type '{msg['type']}' not implemented by handler {mod.name}.")
|
||||
|
||||
def process_msg_queue(self, queue: deque) -> None:
|
||||
"""
|
||||
Process all messages in the queue.
|
||||
"""
|
||||
while queue:
|
||||
msg = queue.popleft()
|
||||
self.send_msg({
|
||||
'type': RPCMessageType.STRATEGY_MSG,
|
||||
'msg': msg,
|
||||
})
|
||||
|
||||
def startup_messages(self, config: Dict[str, Any], pairlist, protections) -> None:
|
||||
if config['dry_run']:
|
||||
self.send_msg({
|
||||
|
@ -16,8 +16,8 @@ from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
import arrow
|
||||
from tabulate import tabulate
|
||||
from telegram import (CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton,
|
||||
ParseMode, ReplyKeyboardMarkup, Update)
|
||||
from telegram import (MAX_MESSAGE_LENGTH, CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup,
|
||||
KeyboardButton, ParseMode, ReplyKeyboardMarkup, Update)
|
||||
from telegram.error import BadRequest, NetworkError, TelegramError
|
||||
from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, Updater
|
||||
from telegram.utils.helpers import escape_markdown
|
||||
@ -35,8 +35,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
logger.debug('Included module rpc.telegram ...')
|
||||
|
||||
MAX_TELEGRAM_MESSAGE_LENGTH = 4096
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimeunitMappings:
|
||||
@ -72,7 +70,7 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]:
|
||||
)
|
||||
return wrapper
|
||||
# Rollback session to avoid getting data stored in a transaction.
|
||||
Trade.query.session.rollback()
|
||||
Trade.rollback()
|
||||
logger.debug(
|
||||
'Executing handler: %s for chat_id: %s',
|
||||
command_handler.__name__,
|
||||
@ -315,20 +313,36 @@ class Telegram(RPCHandler):
|
||||
msg['profit_fiat'] = self._rpc._fiat_converter.convert_amount(
|
||||
msg['profit_amount'], msg['stake_currency'], msg['fiat_currency'])
|
||||
msg['profit_extra'] = (
|
||||
f" ({msg['gain']}: {msg['profit_amount']:.8f} {msg['stake_currency']}"
|
||||
f" / {msg['profit_fiat']:.3f} {msg['fiat_currency']})")
|
||||
f" / {msg['profit_fiat']:.3f} {msg['fiat_currency']}")
|
||||
else:
|
||||
msg['profit_extra'] = ''
|
||||
msg['profit_extra'] = (
|
||||
f" ({msg['gain']}: {msg['profit_amount']:.8f} {msg['stake_currency']}"
|
||||
f"{msg['profit_extra']})")
|
||||
is_fill = msg['type'] == RPCMessageType.EXIT_FILL
|
||||
is_sub_trade = msg.get('sub_trade')
|
||||
is_sub_profit = msg['profit_amount'] != msg.get('cumulative_profit')
|
||||
profit_prefix = ('Sub ' if is_sub_profit
|
||||
else 'Cumulative ') if is_sub_trade else ''
|
||||
cp_extra = ''
|
||||
if is_sub_profit and is_sub_trade:
|
||||
if self._rpc._fiat_converter:
|
||||
cp_fiat = self._rpc._fiat_converter.convert_amount(
|
||||
msg['cumulative_profit'], msg['stake_currency'], msg['fiat_currency'])
|
||||
cp_extra = f" / {cp_fiat:.3f} {msg['fiat_currency']}"
|
||||
else:
|
||||
cp_extra = ''
|
||||
cp_extra = f"*Cumulative Profit:* (`{msg['cumulative_profit']:.8f} " \
|
||||
f"{msg['stake_currency']}{cp_extra}`)\n"
|
||||
message = (
|
||||
f"{msg['emoji']} *{self._exchange_from_msg(msg)}:* "
|
||||
f"{'Exited' if is_fill else 'Exiting'} {msg['pair']} (#{msg['trade_id']})\n"
|
||||
f"{self._add_analyzed_candle(msg['pair'])}"
|
||||
f"*{'Profit' if is_fill else 'Unrealized Profit'}:* "
|
||||
f"*{f'{profit_prefix}Profit' if is_fill else f'Unrealized {profit_prefix}Profit'}:* "
|
||||
f"`{msg['profit_ratio']:.2%}{msg['profit_extra']}`\n"
|
||||
f"{cp_extra}"
|
||||
f"*Enter Tag:* `{msg['enter_tag']}`\n"
|
||||
f"*Exit Reason:* `{msg['exit_reason']}`\n"
|
||||
f"*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`\n"
|
||||
f"*Direction:* `{msg['direction']}`\n"
|
||||
f"{msg['leverage_text']}"
|
||||
f"*Amount:* `{msg['amount']:.8f}`\n"
|
||||
@ -336,11 +350,25 @@ class Telegram(RPCHandler):
|
||||
)
|
||||
if msg['type'] == RPCMessageType.EXIT:
|
||||
message += (f"*Current Rate:* `{msg['current_rate']:.8f}`\n"
|
||||
f"*Close Rate:* `{msg['limit']:.8f}`")
|
||||
f"*Exit Rate:* `{msg['limit']:.8f}`")
|
||||
|
||||
elif msg['type'] == RPCMessageType.EXIT_FILL:
|
||||
message += f"*Close Rate:* `{msg['close_rate']:.8f}`"
|
||||
message += f"*Exit Rate:* `{msg['close_rate']:.8f}`"
|
||||
if msg.get('sub_trade'):
|
||||
if self._rpc._fiat_converter:
|
||||
msg['stake_amount_fiat'] = self._rpc._fiat_converter.convert_amount(
|
||||
msg['stake_amount'], msg['stake_currency'], msg['fiat_currency'])
|
||||
else:
|
||||
msg['stake_amount_fiat'] = 0
|
||||
rem = round_coin_value(msg['stake_amount'], msg['stake_currency'])
|
||||
message += f"\n*Remaining:* `({rem}"
|
||||
|
||||
if msg.get('fiat_currency', None):
|
||||
message += f", {round_coin_value(msg['stake_amount_fiat'], msg['fiat_currency'])}"
|
||||
|
||||
message += ")`"
|
||||
else:
|
||||
message += f"\n*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`"
|
||||
return message
|
||||
|
||||
def compose_message(self, msg: Dict[str, Any], msg_type: RPCMessageType) -> str:
|
||||
@ -353,7 +381,8 @@ class Telegram(RPCHandler):
|
||||
elif msg_type in (RPCMessageType.ENTRY_CANCEL, RPCMessageType.EXIT_CANCEL):
|
||||
msg['message_side'] = 'enter' if msg_type in [RPCMessageType.ENTRY_CANCEL] else 'exit'
|
||||
message = (f"\N{WARNING SIGN} *{self._exchange_from_msg(msg)}:* "
|
||||
f"Cancelling {msg['message_side']} Order for {msg['pair']} "
|
||||
f"Cancelling {'partial ' if msg.get('sub_trade') else ''}"
|
||||
f"{msg['message_side']} Order for {msg['pair']} "
|
||||
f"(#{msg['trade_id']}). Reason: {msg['reason']}.")
|
||||
|
||||
elif msg_type == RPCMessageType.PROTECTION_TRIGGER:
|
||||
@ -376,7 +405,8 @@ class Telegram(RPCHandler):
|
||||
|
||||
elif msg_type == RPCMessageType.STARTUP:
|
||||
message = f"{msg['status']}"
|
||||
|
||||
elif msg_type == RPCMessageType.STRATEGY_MSG:
|
||||
message = f"{msg['msg']}"
|
||||
else:
|
||||
raise NotImplementedError(f"Unknown message type: {msg_type}")
|
||||
return message
|
||||
@ -423,54 +453,63 @@ class Telegram(RPCHandler):
|
||||
else:
|
||||
return "\N{CROSS MARK}"
|
||||
|
||||
def _prepare_entry_details(self, filled_orders: List, quote_currency: str, is_open: bool):
|
||||
def _prepare_order_details(self, filled_orders: List, quote_currency: str, is_open: bool):
|
||||
"""
|
||||
Prepare details of trade with entry adjustment enabled
|
||||
"""
|
||||
lines: List[str] = []
|
||||
lines_detail: List[str] = []
|
||||
if len(filled_orders) > 0:
|
||||
first_avg = filled_orders[0]["safe_price"]
|
||||
|
||||
for x, order in enumerate(filled_orders):
|
||||
if not order['ft_is_entry'] or order['is_open'] is True:
|
||||
lines: List[str] = []
|
||||
if order['is_open'] is True:
|
||||
continue
|
||||
wording = 'Entry' if order['ft_is_entry'] else 'Exit'
|
||||
|
||||
cur_entry_datetime = arrow.get(order["order_filled_date"])
|
||||
cur_entry_amount = order["amount"]
|
||||
cur_entry_amount = order["filled"] or order["amount"]
|
||||
cur_entry_average = order["safe_price"]
|
||||
lines.append(" ")
|
||||
if x == 0:
|
||||
lines.append(f"*Entry #{x+1}:*")
|
||||
lines.append(f"*{wording} #{x+1}:*")
|
||||
lines.append(
|
||||
f"*Entry Amount:* {cur_entry_amount} ({order['cost']:.8f} {quote_currency})")
|
||||
lines.append(f"*Average Entry Price:* {cur_entry_average}")
|
||||
f"*Amount:* {cur_entry_amount} ({order['cost']:.8f} {quote_currency})")
|
||||
lines.append(f"*Average Price:* {cur_entry_average}")
|
||||
else:
|
||||
sumA = 0
|
||||
sumB = 0
|
||||
for y in range(x):
|
||||
sumA += (filled_orders[y]["amount"] * filled_orders[y]["safe_price"])
|
||||
sumB += filled_orders[y]["amount"]
|
||||
amount = filled_orders[y]["filled"] or filled_orders[y]["amount"]
|
||||
sumA += amount * filled_orders[y]["safe_price"]
|
||||
sumB += amount
|
||||
prev_avg_price = sumA / sumB
|
||||
# TODO: This calculation ignores fees.
|
||||
price_to_1st_entry = ((cur_entry_average - first_avg) / first_avg)
|
||||
minus_on_entry = 0
|
||||
if prev_avg_price:
|
||||
minus_on_entry = (cur_entry_average - prev_avg_price) / prev_avg_price
|
||||
|
||||
dur_entry = cur_entry_datetime - arrow.get(
|
||||
filled_orders[x - 1]["order_filled_date"])
|
||||
days = dur_entry.days
|
||||
hours, remainder = divmod(dur_entry.seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
lines.append(f"*Entry #{x+1}:* at {minus_on_entry:.2%} avg profit")
|
||||
lines.append(f"*{wording} #{x+1}:* at {minus_on_entry:.2%} avg profit")
|
||||
if is_open:
|
||||
lines.append("({})".format(cur_entry_datetime
|
||||
.humanize(granularity=["day", "hour", "minute"])))
|
||||
lines.append(
|
||||
f"*Entry Amount:* {cur_entry_amount} ({order['cost']:.8f} {quote_currency})")
|
||||
lines.append(f"*Average Entry Price:* {cur_entry_average} "
|
||||
f"*Amount:* {cur_entry_amount} ({order['cost']:.8f} {quote_currency})")
|
||||
lines.append(f"*Average {wording} Price:* {cur_entry_average} "
|
||||
f"({price_to_1st_entry:.2%} from 1st entry rate)")
|
||||
lines.append(f"*Order filled at:* {order['order_filled_date']}")
|
||||
lines.append(f"({days}d {hours}h {minutes}m {seconds}s from previous entry)")
|
||||
return lines
|
||||
lines.append(f"*Order filled:* {order['order_filled_date']}")
|
||||
|
||||
# TODO: is this really useful?
|
||||
# dur_entry = cur_entry_datetime - arrow.get(
|
||||
# filled_orders[x - 1]["order_filled_date"])
|
||||
# days = dur_entry.days
|
||||
# hours, remainder = divmod(dur_entry.seconds, 3600)
|
||||
# minutes, seconds = divmod(remainder, 60)
|
||||
# lines.append(
|
||||
# f"({days}d {hours}h {minutes}m {seconds}s from previous {wording.lower()})")
|
||||
lines_detail.append("\n".join(lines))
|
||||
return lines_detail
|
||||
|
||||
@authorized_only
|
||||
def _status(self, update: Update, context: CallbackContext) -> None:
|
||||
@ -485,7 +524,14 @@ class Telegram(RPCHandler):
|
||||
if context.args and 'table' in context.args:
|
||||
self._status_table(update, context)
|
||||
return
|
||||
else:
|
||||
self._status_msg(update, context)
|
||||
|
||||
def _status_msg(self, update: Update, context: CallbackContext) -> None:
|
||||
"""
|
||||
handler for `/status` and `/status <id>`.
|
||||
|
||||
"""
|
||||
try:
|
||||
|
||||
# Check if there's at least one numerical ID provided.
|
||||
@ -497,7 +543,6 @@ class Telegram(RPCHandler):
|
||||
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
|
||||
position_adjust = self._config.get('position_adjustment_enable', False)
|
||||
max_entries = self._config.get('max_entry_position_adjustment', -1)
|
||||
messages = []
|
||||
for r in results:
|
||||
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
|
||||
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
|
||||
@ -528,6 +573,8 @@ class Telegram(RPCHandler):
|
||||
])
|
||||
|
||||
if r['is_open']:
|
||||
if r.get('realized_profit'):
|
||||
lines.append("*Realized Profit:* `{realized_profit:.8f}`")
|
||||
if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
|
||||
and r['initial_stop_loss_ratio'] is not None):
|
||||
# Adding initial stoploss only if it is different from stoploss
|
||||
@ -540,24 +587,34 @@ class Telegram(RPCHandler):
|
||||
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
|
||||
"`({stoploss_current_dist_ratio:.2%})`")
|
||||
if r['open_order']:
|
||||
if r['exit_order_status']:
|
||||
lines.append("*Open Order:* `{open_order}` - `{exit_order_status}`")
|
||||
else:
|
||||
lines.append("*Open Order:* `{open_order}`")
|
||||
lines.append(
|
||||
"*Open Order:* `{open_order}`"
|
||||
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
|
||||
|
||||
lines_detail = self._prepare_entry_details(
|
||||
lines_detail = self._prepare_order_details(
|
||||
r['orders'], r['quote_currency'], r['is_open'])
|
||||
lines.extend(lines_detail if lines_detail else "")
|
||||
|
||||
# Filter empty lines using list-comprehension
|
||||
messages.append("\n".join([line for line in lines if line]).format(**r))
|
||||
|
||||
for msg in messages:
|
||||
self._send_msg(msg)
|
||||
self.__send_status_msg(lines, r)
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
|
||||
def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Send status message.
|
||||
"""
|
||||
msg = ''
|
||||
|
||||
for line in lines:
|
||||
if line:
|
||||
if (len(msg) + len(line) + 1) < MAX_MESSAGE_LENGTH:
|
||||
msg += line + '\n'
|
||||
else:
|
||||
self._send_msg(msg.format(**r))
|
||||
msg = "*Trade ID:* `{trade_id}` - continued\n" + line + '\n'
|
||||
|
||||
self._send_msg(msg.format(**r))
|
||||
|
||||
@authorized_only
|
||||
def _status_table(self, update: Update, context: CallbackContext) -> None:
|
||||
"""
|
||||
@ -860,7 +917,7 @@ class Telegram(RPCHandler):
|
||||
total_dust_currencies += 1
|
||||
|
||||
# Handle overflowing message length
|
||||
if len(output + curr_output) >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output)
|
||||
output = curr_output
|
||||
else:
|
||||
@ -1123,7 +1180,7 @@ class Telegram(RPCHandler):
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
@ -1158,7 +1215,7 @@ class Telegram(RPCHandler):
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
@ -1193,7 +1250,7 @@ class Telegram(RPCHandler):
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
@ -1228,7 +1285,7 @@ class Telegram(RPCHandler):
|
||||
f"({trade['profit']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
@ -1367,7 +1424,7 @@ class Telegram(RPCHandler):
|
||||
escape_markdown(logrec[2], version=2),
|
||||
escape_markdown(logrec[3], version=2),
|
||||
escape_markdown(logrec[4], version=2))
|
||||
if len(msgs + msg) + 10 >= MAX_TELEGRAM_MESSAGE_LENGTH:
|
||||
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
|
||||
# Send message immediately if it would become too long
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
msgs = msg + '\n'
|
||||
|
@ -146,11 +146,19 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
self._ft_informative.append((informative_data, cls_method))
|
||||
|
||||
def load_freqAI_model(self) -> None:
|
||||
if self.config.get('freqai', None):
|
||||
if self.config.get('freqai', {}).get('enabled', False):
|
||||
# Import here to avoid importing this if freqAI is disabled
|
||||
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
|
||||
|
||||
self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)
|
||||
self.freqai_info = self.config["freqai"]
|
||||
else:
|
||||
# Gracious failures if freqAI is disabled but "start" is called.
|
||||
class DummyClass():
|
||||
def start(self, *args, **kwargs):
|
||||
raise OperationalException(
|
||||
'freqAI is not enabled. Please enable it in your config to use this strategy.')
|
||||
self.freqai = DummyClass() # type: ignore
|
||||
|
||||
def ft_bot_start(self, **kwargs) -> None:
|
||||
"""
|
||||
@ -472,10 +480,13 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
def adjust_trade_position(self, trade: Trade, current_time: datetime,
|
||||
current_rate: float, current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float,
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs) -> Optional[float]:
|
||||
"""
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be increased.
|
||||
This means extra buy orders with additional fees.
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be
|
||||
increased or decreased.
|
||||
This means extra buy or sell orders with additional fees.
|
||||
Only called when `position_adjustment_enable` is set to True.
|
||||
|
||||
For full documentation please go to https://www.freqtrade.io/en/latest/strategy-advanced/
|
||||
@ -486,10 +497,16 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param current_rate: Current buy rate.
|
||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||
:param min_stake: Minimal stake size allowed by exchange.
|
||||
:param max_stake: Balance available for trading.
|
||||
:param min_stake: Minimal stake size allowed by exchange (for both entries and exits)
|
||||
:param max_stake: Maximum stake allowed (either through balance, or by exchange limits).
|
||||
:param current_entry_rate: Current rate using entry pricing.
|
||||
:param current_exit_rate: Current rate using exit pricing.
|
||||
:param current_entry_profit: Current profit using entry pricing.
|
||||
:param current_exit_profit: Current profit using exit pricing.
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return float: Stake amount to adjust your trade
|
||||
:return float: Stake amount to adjust your trade,
|
||||
Positive values to increase position, Negative values to decrease position.
|
||||
Return None for no action.
|
||||
"""
|
||||
return None
|
||||
|
||||
@ -557,8 +574,8 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
"""
|
||||
return None
|
||||
|
||||
def populate_any_indicators(self, basepair: str, pair: str, df: DataFrame, tf: str,
|
||||
informative: DataFrame = None, coin: str = "",
|
||||
def populate_any_indicators(self, pair: str, df: DataFrame, tf: str,
|
||||
informative: DataFrame = None,
|
||||
set_generalized_indicators: bool = False) -> DataFrame:
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
@ -570,7 +587,6 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
return df
|
||||
|
||||
@ -989,7 +1005,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
# ROI
|
||||
# Trailing stoploss
|
||||
|
||||
if stoplossflag.exit_type == ExitType.STOP_LOSS:
|
||||
if stoplossflag.exit_type in (ExitType.STOP_LOSS, ExitType.LIQUIDATION):
|
||||
|
||||
logger.debug(f"{trade.pair} - Stoploss hit. exit_type={stoplossflag.exit_type}")
|
||||
exits.append(stoplossflag)
|
||||
@ -1061,6 +1077,17 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
|
||||
sl_higher_long = (trade.stop_loss >= (low or current_rate) and not trade.is_short)
|
||||
sl_lower_short = (trade.stop_loss <= (high or current_rate) and trade.is_short)
|
||||
liq_higher_long = (trade.liquidation_price
|
||||
and trade.liquidation_price >= (low or current_rate)
|
||||
and not trade.is_short)
|
||||
liq_lower_short = (trade.liquidation_price
|
||||
and trade.liquidation_price <= (high or current_rate)
|
||||
and trade.is_short)
|
||||
|
||||
if (liq_higher_long or liq_lower_short):
|
||||
logger.debug(f"{trade.pair} - Liquidation price hit. exit_type=ExitType.LIQUIDATION")
|
||||
return ExitCheckTuple(exit_type=ExitType.LIQUIDATION)
|
||||
|
||||
# evaluate if the stoploss was hit if stoploss is not on exchange
|
||||
# in Dry-Run, this handles stoploss logic as well, as the logic will not be different to
|
||||
# regular stoploss handling.
|
||||
@ -1078,13 +1105,6 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||
f"stoploss is {trade.stop_loss:.6f}, "
|
||||
f"initial stoploss was at {trade.initial_stop_loss:.6f}, "
|
||||
f"trade opened at {trade.open_rate:.6f}")
|
||||
new_stoploss = (
|
||||
trade.stop_loss + trade.initial_stop_loss
|
||||
if trade.is_short else
|
||||
trade.stop_loss - trade.initial_stop_loss
|
||||
)
|
||||
logger.debug(f"{trade.pair} - Trailing stop saved "
|
||||
f"{new_stoploss:.6f}")
|
||||
|
||||
return ExitCheckTuple(exit_type=exit_type)
|
||||
|
||||
|
@ -65,7 +65,7 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
return informative_pairs
|
||||
|
||||
def populate_any_indicators(
|
||||
self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
@ -78,9 +78,10 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
with self.freqai.lock:
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
@ -92,11 +93,8 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
informative[f"{coin}20sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"{coin}21ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
informative[f"%-{coin}close_over_20sma-period_{t}"] = (
|
||||
informative["close"] / informative[f"{coin}20sma-period_{t}"]
|
||||
)
|
||||
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
|
||||
@ -148,8 +146,6 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df["&-s_close"] = (
|
||||
df["close"]
|
||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
@ -159,12 +155,31 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
- 1
|
||||
)
|
||||
|
||||
# Classifiers are typically set up with strings as targets:
|
||||
# df['&s-up_or_down'] = np.where( df["close"].shift(-100) >
|
||||
# df["close"], 'up', 'down')
|
||||
|
||||
# If user wishes to use multiple targets, they can add more by
|
||||
# appending more columns with '&'. User should keep in mind that multi targets
|
||||
# requires a multioutput prediction model such as
|
||||
# templates/CatboostPredictionMultiModel.py,
|
||||
|
||||
# df["&-s_range"] = (
|
||||
# df["close"]
|
||||
# .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
# .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
# .max()
|
||||
# -
|
||||
# df["close"]
|
||||
# .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
# .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||
# .min()
|
||||
# )
|
||||
|
||||
return df
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
# All indicators must be populated by populate_any_indicators() for live functionality
|
||||
# to work correctly.
|
||||
|
||||
@ -235,16 +250,16 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
|
||||
if (
|
||||
"prediction" + entry_tag not in pair_dict[pair]
|
||||
or pair_dict[pair]["prediction" + entry_tag] > 0
|
||||
or pair_dict[pair]['extras']["prediction" + entry_tag] == 0
|
||||
):
|
||||
with self.freqai.lock:
|
||||
pair_dict[pair]["prediction" + entry_tag] = abs(trade_candle["&-s_close"])
|
||||
pair_dict[pair]['extras']["prediction" + entry_tag] = abs(trade_candle["&-s_close"])
|
||||
if not follow_mode:
|
||||
self.freqai.dd.save_drawer_to_disk()
|
||||
else:
|
||||
self.freqai.dd.save_follower_dict_to_disk()
|
||||
|
||||
roi_price = pair_dict[pair]["prediction" + entry_tag]
|
||||
roi_price = pair_dict[pair]['extras']["prediction" + entry_tag]
|
||||
roi_time = self.max_roi_time_long.value
|
||||
|
||||
roi_decay = roi_price * (
|
||||
@ -282,7 +297,7 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
pair_dict = self.freqai.dd.follower_dict
|
||||
|
||||
with self.freqai.lock:
|
||||
pair_dict[pair]["prediction" + entry_tag] = 0
|
||||
pair_dict[pair]['extras']["prediction" + entry_tag] = 0
|
||||
if not follow_mode:
|
||||
self.freqai.dd.save_drawer_to_disk()
|
||||
else:
|
||||
|
@ -12,6 +12,7 @@
|
||||
"tradable_balance_ratio": 0.99,
|
||||
"fiat_display_currency": "{{ fiat_display_currency }}",{{ ('\n "timeframe": "' + timeframe + '",') if timeframe else '' }}
|
||||
"dry_run": {{ dry_run | lower }},
|
||||
"dry_run_wallet": 1000,
|
||||
"cancel_open_orders_on_exit": false,
|
||||
"trading_mode": "{{ trading_mode }}",
|
||||
"margin_mode": "{{ margin_mode }}",
|
||||
|
@ -247,12 +247,16 @@ def check_exit_timeout(self, pair: str, trade: 'Trade', order: 'Order',
|
||||
"""
|
||||
return False
|
||||
|
||||
def adjust_trade_position(self, trade: 'Trade', current_time: 'datetime',
|
||||
current_rate: float, current_profit: float, min_stake: Optional[float],
|
||||
max_stake: float, **kwargs) -> 'Optional[float]':
|
||||
def adjust_trade_position(self, trade: 'Trade', current_time: datetime,
|
||||
current_rate: float, current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float,
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs) -> Optional[float]:
|
||||
"""
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be increased.
|
||||
This means extra buy orders with additional fees.
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be
|
||||
increased or decreased.
|
||||
This means extra buy or sell orders with additional fees.
|
||||
Only called when `position_adjustment_enable` is set to True.
|
||||
|
||||
For full documentation please go to https://www.freqtrade.io/en/latest/strategy-advanced/
|
||||
@ -263,10 +267,16 @@ def adjust_trade_position(self, trade: 'Trade', current_time: 'datetime',
|
||||
:param current_time: datetime object, containing the current datetime
|
||||
:param current_rate: Current buy rate.
|
||||
:param current_profit: Current profit (as ratio), calculated based on current_rate.
|
||||
:param min_stake: Minimal stake size allowed by exchange.
|
||||
:param max_stake: Balance available for trading.
|
||||
:param min_stake: Minimal stake size allowed by exchange (for both entries and exits)
|
||||
:param max_stake: Maximum stake allowed (either through balance, or by exchange limits).
|
||||
:param current_entry_rate: Current rate using entry pricing.
|
||||
:param current_exit_rate: Current rate using exit pricing.
|
||||
:param current_entry_profit: Current profit using entry pricing.
|
||||
:param current_exit_profit: Current profit using exit pricing.
|
||||
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
|
||||
:return float: Stake amount to adjust your trade
|
||||
:return float: Stake amount to adjust your trade,
|
||||
Positive values to increase position, Negative values to decrease position.
|
||||
Return None for no action.
|
||||
"""
|
||||
return None
|
||||
|
||||
|
3
freqtrade/util/__init__.py
Normal file
3
freqtrade/util/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
# flake8: noqa: F401
|
||||
from freqtrade.util.ft_precise import FtPrecise
|
||||
from freqtrade.util.periodic_cache import PeriodicCache
|
12
freqtrade/util/ft_precise.py
Normal file
12
freqtrade/util/ft_precise.py
Normal file
@ -0,0 +1,12 @@
|
||||
"""
|
||||
Slim wrapper around ccxt's Precise (string math)
|
||||
To have imports from freqtrade - and support float initializers
|
||||
"""
|
||||
from ccxt import Precise
|
||||
|
||||
|
||||
class FtPrecise(Precise):
|
||||
def __init__(self, number, decimals=None):
|
||||
if not isinstance(number, str):
|
||||
number = str(number)
|
||||
super().__init__(number, decimals)
|
@ -6,7 +6,7 @@
|
||||
-r docs/requirements-docs.txt
|
||||
|
||||
coveralls==3.3.1
|
||||
flake8==4.0.1
|
||||
flake8==5.0.4
|
||||
flake8-tidy-imports==4.8.0
|
||||
mypy==0.971
|
||||
pre-commit==2.20.0
|
||||
@ -25,6 +25,6 @@ nbconvert==6.5.0
|
||||
# mypy types
|
||||
types-cachetools==5.2.1
|
||||
types-filelock==3.2.7
|
||||
types-requests==2.28.3
|
||||
types-requests==2.28.8
|
||||
types-tabulate==0.8.11
|
||||
types-python-dateutil==2.8.19
|
||||
|
@ -2,8 +2,7 @@
|
||||
-r requirements.txt
|
||||
|
||||
# Required for freqai
|
||||
scikit-learn==1.1.1
|
||||
scikit-optimize==0.9.0
|
||||
scikit-learn==1.1.2
|
||||
joblib==1.1.0
|
||||
catboost==1.0.4
|
||||
catboost==1.0.6; platform_machine != 'aarch64'
|
||||
lightgbm==3.3.2
|
||||
|
@ -2,8 +2,8 @@
|
||||
-r requirements.txt
|
||||
|
||||
# Required for hyperopt
|
||||
scipy==1.8.1
|
||||
scikit-learn==1.1.1
|
||||
scipy==1.9.0
|
||||
scikit-learn==1.1.2
|
||||
scikit-optimize==0.9.0
|
||||
filelock==3.7.1
|
||||
progressbar2==4.0.0
|
||||
|
@ -2,7 +2,7 @@ numpy==1.23.1
|
||||
pandas==1.4.3
|
||||
pandas-ta==0.3.14b
|
||||
|
||||
ccxt==1.91.29
|
||||
ccxt==1.91.93
|
||||
# Pin cryptography for now due to rust build errors with piwheels
|
||||
cryptography==37.0.4
|
||||
aiohttp==3.8.1
|
||||
@ -11,8 +11,8 @@ python-telegram-bot==13.13
|
||||
arrow==1.2.2
|
||||
cachetools==4.2.2
|
||||
requests==2.28.1
|
||||
urllib3==1.26.10
|
||||
jsonschema==4.7.2
|
||||
urllib3==1.26.11
|
||||
jsonschema==4.9.1
|
||||
TA-Lib==0.4.24
|
||||
technical==1.3.0
|
||||
tabulate==0.8.10
|
||||
@ -28,7 +28,7 @@ py_find_1st==1.1.5
|
||||
# Load ticker files 30% faster
|
||||
python-rapidjson==1.8
|
||||
# Properly format api responses
|
||||
orjson==3.7.8
|
||||
orjson==3.7.11
|
||||
|
||||
# Notify systemd
|
||||
sdnotify==0.3.2
|
||||
|
@ -275,14 +275,20 @@ class FtRestClient():
|
||||
}
|
||||
return self._post("forceenter", data=data)
|
||||
|
||||
def forceexit(self, tradeid):
|
||||
def forceexit(self, tradeid, ordertype=None, amount=None):
|
||||
"""Force-exit a trade.
|
||||
|
||||
:param tradeid: Id of the trade (can be received via status command)
|
||||
:param ordertype: Order type to use (must be market or limit)
|
||||
:param amount: Amount to sell. Full sell if not given
|
||||
:return: json object
|
||||
"""
|
||||
|
||||
return self._post("forceexit", data={"tradeid": tradeid})
|
||||
return self._post("forceexit", data={
|
||||
"tradeid": tradeid,
|
||||
"ordertype": ordertype,
|
||||
"amount": amount,
|
||||
})
|
||||
|
||||
def strategies(self):
|
||||
"""Lists available strategies
|
||||
|
10
setup.py
10
setup.py
@ -12,6 +12,13 @@ hyperopt = [
|
||||
'progressbar2',
|
||||
]
|
||||
|
||||
freqai = [
|
||||
'scikit-learn',
|
||||
'joblib',
|
||||
'catboost; platform_machine != "aarch64"',
|
||||
'lightgbm',
|
||||
]
|
||||
|
||||
develop = [
|
||||
'coveralls',
|
||||
'flake8',
|
||||
@ -31,7 +38,7 @@ jupyter = [
|
||||
'nbconvert',
|
||||
]
|
||||
|
||||
all_extra = plot + develop + jupyter + hyperopt
|
||||
all_extra = plot + develop + jupyter + hyperopt + freqai
|
||||
|
||||
setup(
|
||||
tests_require=[
|
||||
@ -79,6 +86,7 @@ setup(
|
||||
'plot': plot,
|
||||
'jupyter': jupyter,
|
||||
'hyperopt': hyperopt,
|
||||
'freqai': freqai,
|
||||
'all': all_extra,
|
||||
},
|
||||
)
|
||||
|
@ -1627,8 +1627,8 @@ def limit_buy_order_open():
|
||||
'timestamp': arrow.utcnow().int_timestamp * 1000,
|
||||
'datetime': arrow.utcnow().isoformat(),
|
||||
'price': 0.00001099,
|
||||
'average': 0.00001099,
|
||||
'amount': 90.99181073,
|
||||
'average': None,
|
||||
'filled': 0.0,
|
||||
'cost': 0.0009999,
|
||||
'remaining': 90.99181073,
|
||||
@ -2817,6 +2817,7 @@ def limit_buy_order_usdt_open():
|
||||
'datetime': arrow.utcnow().isoformat(),
|
||||
'timestamp': arrow.utcnow().int_timestamp * 1000,
|
||||
'price': 2.00,
|
||||
'average': 2.00,
|
||||
'amount': 30.0,
|
||||
'filled': 0.0,
|
||||
'cost': 60.0,
|
||||
|
@ -214,7 +214,8 @@ def mock_trade_4(fee, is_short: bool):
|
||||
open_order_id=f'prod_buy_{direc(is_short)}_12345',
|
||||
strategy='StrategyTestV3',
|
||||
timeframe=5,
|
||||
is_short=is_short
|
||||
is_short=is_short,
|
||||
stop_loss_pct=0.10
|
||||
)
|
||||
o = Order.parse_from_ccxt_object(mock_order_4(is_short), 'ETC/BTC', entry_side(is_short))
|
||||
trade.orders.append(o)
|
||||
@ -270,7 +271,8 @@ def mock_trade_5(fee, is_short: bool):
|
||||
enter_tag='TEST1',
|
||||
stoploss_order_id=f'prod_stoploss_{direc(is_short)}_3455',
|
||||
timeframe=5,
|
||||
is_short=is_short
|
||||
is_short=is_short,
|
||||
stop_loss_pct=0.10,
|
||||
)
|
||||
o = Order.parse_from_ccxt_object(mock_order_5(is_short), 'XRP/BTC', entry_side(is_short))
|
||||
trade.orders.append(o)
|
||||
|
@ -63,7 +63,7 @@ def mock_trade_usdt_1(fee, is_short: bool):
|
||||
open_rate=10.0,
|
||||
close_rate=8.0,
|
||||
close_profit=-0.2,
|
||||
close_profit_abs=-4.0,
|
||||
close_profit_abs=-4.09,
|
||||
exchange='binance',
|
||||
strategy='SampleStrategy',
|
||||
open_order_id=f'prod_exit_1_{direc(is_short)}',
|
||||
@ -183,7 +183,7 @@ def mock_trade_usdt_3(fee, is_short: bool):
|
||||
open_rate=1.0,
|
||||
close_rate=1.1,
|
||||
close_profit=0.1,
|
||||
close_profit_abs=9.8425,
|
||||
close_profit_abs=2.8425,
|
||||
exchange='binance',
|
||||
is_open=False,
|
||||
strategy='StrategyTestV2',
|
||||
|
@ -311,3 +311,27 @@ def test_no_exchange_mode(default_conf):
|
||||
|
||||
with pytest.raises(OperationalException, match=message):
|
||||
dp.available_pairs()
|
||||
|
||||
|
||||
def test_dp_send_msg(default_conf):
|
||||
|
||||
default_conf["runmode"] = RunMode.DRY_RUN
|
||||
|
||||
default_conf["timeframe"] = '1h'
|
||||
dp = DataProvider(default_conf, None)
|
||||
msg = 'Test message'
|
||||
dp.send_msg(msg)
|
||||
|
||||
assert msg in dp._msg_queue
|
||||
dp._msg_queue.pop()
|
||||
assert msg not in dp._msg_queue
|
||||
# Message is not resent due to caching
|
||||
dp.send_msg(msg)
|
||||
assert msg not in dp._msg_queue
|
||||
dp.send_msg(msg, always_send=True)
|
||||
assert msg in dp._msg_queue
|
||||
|
||||
default_conf["runmode"] = RunMode.BACKTEST
|
||||
dp = DataProvider(default_conf, None)
|
||||
dp.send_msg(msg, always_send=True)
|
||||
assert msg not in dp._msg_queue
|
||||
|
@ -1,14 +1,14 @@
|
||||
from ccxt import Precise
|
||||
from freqtrade.util import FtPrecise
|
||||
|
||||
|
||||
ws = Precise('-1.123e-6')
|
||||
ws = Precise('-1.123e-6')
|
||||
xs = Precise('0.00000002')
|
||||
ys = Precise('69696900000')
|
||||
zs = Precise('0')
|
||||
ws = FtPrecise('-1.123e-6')
|
||||
ws = FtPrecise('-1.123e-6')
|
||||
xs = FtPrecise('0.00000002')
|
||||
ys = FtPrecise('69696900000')
|
||||
zs = FtPrecise('0')
|
||||
|
||||
|
||||
def test_precise():
|
||||
def test_FtPrecise():
|
||||
assert ys * xs == '1393.938'
|
||||
assert xs * ys == '1393.938'
|
||||
|
||||
@ -45,31 +45,36 @@ def test_precise():
|
||||
assert xs + zs == '0.00000002'
|
||||
assert ys + zs == '69696900000'
|
||||
|
||||
assert abs(Precise('-500.1')) == '500.1'
|
||||
assert abs(Precise('213')) == '213'
|
||||
assert abs(FtPrecise('-500.1')) == '500.1'
|
||||
assert abs(FtPrecise('213')) == '213'
|
||||
|
||||
assert abs(Precise('-500.1')) == '500.1'
|
||||
assert -Precise('213') == '-213'
|
||||
assert abs(FtPrecise('-500.1')) == '500.1'
|
||||
assert -FtPrecise('213') == '-213'
|
||||
|
||||
assert Precise('10.1') % Precise('0.5') == '0.1'
|
||||
assert Precise('5550') % Precise('120') == '30'
|
||||
assert FtPrecise('10.1') % FtPrecise('0.5') == '0.1'
|
||||
assert FtPrecise('5550') % FtPrecise('120') == '30'
|
||||
|
||||
assert Precise('-0.0') == Precise('0')
|
||||
assert Precise('5.534000') == Precise('5.5340')
|
||||
assert FtPrecise('-0.0') == FtPrecise('0')
|
||||
assert FtPrecise('5.534000') == FtPrecise('5.5340')
|
||||
|
||||
assert min(Precise('-3.1415'), Precise('-2')) == '-3.1415'
|
||||
assert min(FtPrecise('-3.1415'), FtPrecise('-2')) == '-3.1415'
|
||||
|
||||
assert max(Precise('3.1415'), Precise('-2')) == '3.1415'
|
||||
assert max(FtPrecise('3.1415'), FtPrecise('-2')) == '3.1415'
|
||||
|
||||
assert Precise('2') > Precise('1.2345')
|
||||
assert not Precise('-3.1415') > Precise('-2')
|
||||
assert not Precise('3.1415') > Precise('3.1415')
|
||||
assert Precise.string_gt('3.14150000000000000000001', '3.1415')
|
||||
assert FtPrecise('2') > FtPrecise('1.2345')
|
||||
assert not FtPrecise('-3.1415') > FtPrecise('-2')
|
||||
assert not FtPrecise('3.1415') > FtPrecise('3.1415')
|
||||
assert FtPrecise.string_gt('3.14150000000000000000001', '3.1415')
|
||||
|
||||
assert Precise('3.1415') >= Precise('3.1415')
|
||||
assert Precise('3.14150000000000000000001') >= Precise('3.1415')
|
||||
assert FtPrecise('3.1415') >= FtPrecise('3.1415')
|
||||
assert FtPrecise('3.14150000000000000000001') >= FtPrecise('3.1415')
|
||||
|
||||
assert not Precise('3.1415') < Precise('3.1415')
|
||||
assert not FtPrecise('3.1415') < FtPrecise('3.1415')
|
||||
|
||||
assert Precise('3.1415') <= Precise('3.1415')
|
||||
assert Precise('3.1415') <= Precise('3.14150000000000000000001')
|
||||
assert FtPrecise('3.1415') <= FtPrecise('3.1415')
|
||||
assert FtPrecise('3.1415') <= FtPrecise('3.14150000000000000000001')
|
||||
|
||||
assert FtPrecise(213) == '213'
|
||||
assert FtPrecise(-213) == '-213'
|
||||
assert str(FtPrecise(-213)) == '-213'
|
||||
assert FtPrecise(213.2) == '213.2'
|
||||
|
@ -27,6 +27,57 @@ from tests.conftest import get_mock_coro, get_patched_exchange, log_has, log_has
|
||||
# Make sure to always keep one exchange here which is NOT subclassed!!
|
||||
EXCHANGES = ['bittrex', 'binance', 'kraken', 'ftx', 'gateio']
|
||||
|
||||
get_entry_rate_data = [
|
||||
('other', 20, 19, 10, 0.0, 20), # Full ask side
|
||||
('ask', 20, 19, 10, 0.0, 20), # Full ask side
|
||||
('ask', 20, 19, 10, 1.0, 10), # Full last side
|
||||
('ask', 20, 19, 10, 0.5, 15), # Between ask and last
|
||||
('ask', 20, 19, 10, 0.7, 13), # Between ask and last
|
||||
('ask', 20, 19, 10, 0.3, 17), # Between ask and last
|
||||
('ask', 5, 6, 10, 1.0, 5), # last bigger than ask
|
||||
('ask', 5, 6, 10, 0.5, 5), # last bigger than ask
|
||||
('ask', 20, 19, 10, None, 20), # price_last_balance missing
|
||||
('ask', 10, 20, None, 0.5, 10), # last not available - uses ask
|
||||
('ask', 4, 5, None, 0.5, 4), # last not available - uses ask
|
||||
('ask', 4, 5, None, 1, 4), # last not available - uses ask
|
||||
('ask', 4, 5, None, 0, 4), # last not available - uses ask
|
||||
('same', 21, 20, 10, 0.0, 20), # Full bid side
|
||||
('bid', 21, 20, 10, 0.0, 20), # Full bid side
|
||||
('bid', 21, 20, 10, 1.0, 10), # Full last side
|
||||
('bid', 21, 20, 10, 0.5, 15), # Between bid and last
|
||||
('bid', 21, 20, 10, 0.7, 13), # Between bid and last
|
||||
('bid', 21, 20, 10, 0.3, 17), # Between bid and last
|
||||
('bid', 6, 5, 10, 1.0, 5), # last bigger than bid
|
||||
('bid', 21, 20, 10, None, 20), # price_last_balance missing
|
||||
('bid', 6, 5, 10, 0.5, 5), # last bigger than bid
|
||||
('bid', 21, 20, None, 0.5, 20), # last not available - uses bid
|
||||
('bid', 6, 5, None, 0.5, 5), # last not available - uses bid
|
||||
('bid', 6, 5, None, 1, 5), # last not available - uses bid
|
||||
('bid', 6, 5, None, 0, 5), # last not available - uses bid
|
||||
]
|
||||
|
||||
get_sell_rate_data = [
|
||||
('bid', 12.0, 11.0, 11.5, 0.0, 11.0), # full bid side
|
||||
('bid', 12.0, 11.0, 11.5, 1.0, 11.5), # full last side
|
||||
('bid', 12.0, 11.0, 11.5, 0.5, 11.25), # between bid and lat
|
||||
('bid', 12.0, 11.2, 10.5, 0.0, 11.2), # Last smaller than bid
|
||||
('bid', 12.0, 11.2, 10.5, 1.0, 11.2), # Last smaller than bid - uses bid
|
||||
('bid', 12.0, 11.2, 10.5, 0.5, 11.2), # Last smaller than bid - uses bid
|
||||
('bid', 0.003, 0.002, 0.005, 0.0, 0.002),
|
||||
('bid', 0.003, 0.002, 0.005, None, 0.002),
|
||||
('ask', 12.0, 11.0, 12.5, 0.0, 12.0), # full ask side
|
||||
('ask', 12.0, 11.0, 12.5, 1.0, 12.5), # full last side
|
||||
('ask', 12.0, 11.0, 12.5, 0.5, 12.25), # between bid and lat
|
||||
('ask', 12.2, 11.2, 10.5, 0.0, 12.2), # Last smaller than ask
|
||||
('ask', 12.0, 11.0, 10.5, 1.0, 12.0), # Last smaller than ask - uses ask
|
||||
('ask', 12.0, 11.2, 10.5, 0.5, 12.0), # Last smaller than ask - uses ask
|
||||
('ask', 10.0, 11.0, 11.0, 0.0, 10.0),
|
||||
('ask', 10.11, 11.2, 11.0, 0.0, 10.11),
|
||||
('ask', 0.001, 0.002, 11.0, 0.0, 0.001),
|
||||
('ask', 0.006, 1.0, 11.0, 0.0, 0.006),
|
||||
('ask', 0.006, 1.0, 11.0, None, 0.006),
|
||||
]
|
||||
|
||||
|
||||
def ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name,
|
||||
fun, mock_ccxt_fun, retries=API_RETRY_COUNT + 1, **kwargs):
|
||||
@ -2360,34 +2411,7 @@ def test_fetch_l2_order_book_exception(default_conf, mocker, exchange_name):
|
||||
exchange.fetch_l2_order_book(pair='ETH/BTC', limit=50)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", [
|
||||
('other', 20, 19, 10, 0.0, 20), # Full ask side
|
||||
('ask', 20, 19, 10, 0.0, 20), # Full ask side
|
||||
('ask', 20, 19, 10, 1.0, 10), # Full last side
|
||||
('ask', 20, 19, 10, 0.5, 15), # Between ask and last
|
||||
('ask', 20, 19, 10, 0.7, 13), # Between ask and last
|
||||
('ask', 20, 19, 10, 0.3, 17), # Between ask and last
|
||||
('ask', 5, 6, 10, 1.0, 5), # last bigger than ask
|
||||
('ask', 5, 6, 10, 0.5, 5), # last bigger than ask
|
||||
('ask', 20, 19, 10, None, 20), # price_last_balance missing
|
||||
('ask', 10, 20, None, 0.5, 10), # last not available - uses ask
|
||||
('ask', 4, 5, None, 0.5, 4), # last not available - uses ask
|
||||
('ask', 4, 5, None, 1, 4), # last not available - uses ask
|
||||
('ask', 4, 5, None, 0, 4), # last not available - uses ask
|
||||
('same', 21, 20, 10, 0.0, 20), # Full bid side
|
||||
('bid', 21, 20, 10, 0.0, 20), # Full bid side
|
||||
('bid', 21, 20, 10, 1.0, 10), # Full last side
|
||||
('bid', 21, 20, 10, 0.5, 15), # Between bid and last
|
||||
('bid', 21, 20, 10, 0.7, 13), # Between bid and last
|
||||
('bid', 21, 20, 10, 0.3, 17), # Between bid and last
|
||||
('bid', 6, 5, 10, 1.0, 5), # last bigger than bid
|
||||
('bid', 21, 20, 10, None, 20), # price_last_balance missing
|
||||
('bid', 6, 5, 10, 0.5, 5), # last bigger than bid
|
||||
('bid', 21, 20, None, 0.5, 20), # last not available - uses bid
|
||||
('bid', 6, 5, None, 0.5, 5), # last not available - uses bid
|
||||
('bid', 6, 5, None, 1, 5), # last not available - uses bid
|
||||
('bid', 6, 5, None, 0, 5), # last not available - uses bid
|
||||
])
|
||||
@pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_entry_rate_data)
|
||||
def test_get_entry_rate(mocker, default_conf, caplog, side, ask, bid,
|
||||
last, last_ab, expected) -> None:
|
||||
caplog.set_level(logging.DEBUG)
|
||||
@ -2411,27 +2435,7 @@ def test_get_entry_rate(mocker, default_conf, caplog, side, ask, bid,
|
||||
assert not log_has("Using cached entry rate for ETH/BTC.", caplog)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('side,ask,bid,last,last_ab,expected', [
|
||||
('bid', 12.0, 11.0, 11.5, 0.0, 11.0), # full bid side
|
||||
('bid', 12.0, 11.0, 11.5, 1.0, 11.5), # full last side
|
||||
('bid', 12.0, 11.0, 11.5, 0.5, 11.25), # between bid and lat
|
||||
('bid', 12.0, 11.2, 10.5, 0.0, 11.2), # Last smaller than bid
|
||||
('bid', 12.0, 11.2, 10.5, 1.0, 11.2), # Last smaller than bid - uses bid
|
||||
('bid', 12.0, 11.2, 10.5, 0.5, 11.2), # Last smaller than bid - uses bid
|
||||
('bid', 0.003, 0.002, 0.005, 0.0, 0.002),
|
||||
('bid', 0.003, 0.002, 0.005, None, 0.002),
|
||||
('ask', 12.0, 11.0, 12.5, 0.0, 12.0), # full ask side
|
||||
('ask', 12.0, 11.0, 12.5, 1.0, 12.5), # full last side
|
||||
('ask', 12.0, 11.0, 12.5, 0.5, 12.25), # between bid and lat
|
||||
('ask', 12.2, 11.2, 10.5, 0.0, 12.2), # Last smaller than ask
|
||||
('ask', 12.0, 11.0, 10.5, 1.0, 12.0), # Last smaller than ask - uses ask
|
||||
('ask', 12.0, 11.2, 10.5, 0.5, 12.0), # Last smaller than ask - uses ask
|
||||
('ask', 10.0, 11.0, 11.0, 0.0, 10.0),
|
||||
('ask', 10.11, 11.2, 11.0, 0.0, 10.11),
|
||||
('ask', 0.001, 0.002, 11.0, 0.0, 0.001),
|
||||
('ask', 0.006, 1.0, 11.0, 0.0, 0.006),
|
||||
('ask', 0.006, 1.0, 11.0, None, 0.006),
|
||||
])
|
||||
@pytest.mark.parametrize('side,ask,bid,last,last_ab,expected', get_sell_rate_data)
|
||||
def test_get_exit_rate(default_conf, mocker, caplog, side, bid, ask,
|
||||
last, last_ab, expected) -> None:
|
||||
caplog.set_level(logging.DEBUG)
|
||||
@ -2481,14 +2485,14 @@ def test_get_ticker_rate_error(mocker, entry, default_conf, caplog, side, is_sho
|
||||
|
||||
|
||||
@pytest.mark.parametrize('is_short,side,expected', [
|
||||
(False, 'bid', 0.043936), # Value from order_book_l2 fitxure - bids side
|
||||
(False, 'ask', 0.043949), # Value from order_book_l2 fitxure - asks side
|
||||
(False, 'other', 0.043936), # Value from order_book_l2 fitxure - bids side
|
||||
(False, 'same', 0.043949), # Value from order_book_l2 fitxure - asks side
|
||||
(True, 'bid', 0.043936), # Value from order_book_l2 fitxure - bids side
|
||||
(True, 'ask', 0.043949), # Value from order_book_l2 fitxure - asks side
|
||||
(True, 'other', 0.043949), # Value from order_book_l2 fitxure - asks side
|
||||
(True, 'same', 0.043936), # Value from order_book_l2 fitxure - bids side
|
||||
(False, 'bid', 0.043936), # Value from order_book_l2 fixture - bids side
|
||||
(False, 'ask', 0.043949), # Value from order_book_l2 fixture - asks side
|
||||
(False, 'other', 0.043936), # Value from order_book_l2 fixture - bids side
|
||||
(False, 'same', 0.043949), # Value from order_book_l2 fixture - asks side
|
||||
(True, 'bid', 0.043936), # Value from order_book_l2 fixture - bids side
|
||||
(True, 'ask', 0.043949), # Value from order_book_l2 fixture - asks side
|
||||
(True, 'other', 0.043949), # Value from order_book_l2 fixture - asks side
|
||||
(True, 'same', 0.043936), # Value from order_book_l2 fixture - bids side
|
||||
])
|
||||
def test_get_exit_rate_orderbook(
|
||||
default_conf, mocker, caplog, is_short, side, expected, order_book_l2):
|
||||
@ -2521,7 +2525,8 @@ def test_get_exit_rate_orderbook_exception(default_conf, mocker, caplog):
|
||||
exchange = get_patched_exchange(mocker, default_conf)
|
||||
with pytest.raises(PricingError):
|
||||
exchange.get_rate(pair, refresh=True, side="exit", is_short=False)
|
||||
assert log_has_re(r"Exit Price at location 1 from orderbook could not be determined\..*",
|
||||
assert log_has_re(rf"{pair} - Exit Price at location 1 from orderbook "
|
||||
rf"could not be determined\..*",
|
||||
caplog)
|
||||
|
||||
|
||||
@ -2548,6 +2553,84 @@ def test_get_exit_rate_exception(default_conf, mocker, is_short):
|
||||
assert exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) == 0.13
|
||||
|
||||
|
||||
@pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_entry_rate_data)
|
||||
@pytest.mark.parametrize("side2", ['bid', 'ask'])
|
||||
@pytest.mark.parametrize("use_order_book", [True, False])
|
||||
def test_get_rates_testing_buy(mocker, default_conf, caplog, side, ask, bid,
|
||||
last, last_ab, expected,
|
||||
side2, use_order_book, order_book_l2) -> None:
|
||||
caplog.set_level(logging.DEBUG)
|
||||
if last_ab is None:
|
||||
del default_conf['entry_pricing']['price_last_balance']
|
||||
else:
|
||||
default_conf['entry_pricing']['price_last_balance'] = last_ab
|
||||
default_conf['entry_pricing']['price_side'] = side
|
||||
default_conf['exit_pricing']['price_side'] = side2
|
||||
default_conf['exit_pricing']['use_order_book'] = use_order_book
|
||||
api_mock = MagicMock()
|
||||
api_mock.fetch_l2_order_book = order_book_l2
|
||||
api_mock.fetch_ticker = MagicMock(
|
||||
return_value={'ask': ask, 'last': last, 'bid': bid})
|
||||
exchange = get_patched_exchange(mocker, default_conf, api_mock)
|
||||
|
||||
assert exchange.get_rates('ETH/BTC', refresh=True, is_short=False)[0] == expected
|
||||
assert not log_has("Using cached buy rate for ETH/BTC.", caplog)
|
||||
|
||||
api_mock.fetch_l2_order_book.reset_mock()
|
||||
api_mock.fetch_ticker.reset_mock()
|
||||
assert exchange.get_rates('ETH/BTC', refresh=False, is_short=False)[0] == expected
|
||||
assert log_has("Using cached buy rate for ETH/BTC.", caplog)
|
||||
assert api_mock.fetch_l2_order_book.call_count == 0
|
||||
assert api_mock.fetch_ticker.call_count == 0
|
||||
# Running a 2nd time with Refresh on!
|
||||
caplog.clear()
|
||||
|
||||
assert exchange.get_rates('ETH/BTC', refresh=True, is_short=False)[0] == expected
|
||||
assert not log_has("Using cached buy rate for ETH/BTC.", caplog)
|
||||
|
||||
assert api_mock.fetch_l2_order_book.call_count == int(use_order_book)
|
||||
assert api_mock.fetch_ticker.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('side,ask,bid,last,last_ab,expected', get_sell_rate_data)
|
||||
@pytest.mark.parametrize("side2", ['bid', 'ask'])
|
||||
@pytest.mark.parametrize("use_order_book", [True, False])
|
||||
def test_get_rates_testing_sell(default_conf, mocker, caplog, side, bid, ask,
|
||||
last, last_ab, expected,
|
||||
side2, use_order_book, order_book_l2) -> None:
|
||||
caplog.set_level(logging.DEBUG)
|
||||
|
||||
default_conf['exit_pricing']['price_side'] = side
|
||||
if last_ab is not None:
|
||||
default_conf['exit_pricing']['price_last_balance'] = last_ab
|
||||
|
||||
default_conf['entry_pricing']['price_side'] = side2
|
||||
default_conf['entry_pricing']['use_order_book'] = use_order_book
|
||||
api_mock = MagicMock()
|
||||
api_mock.fetch_l2_order_book = order_book_l2
|
||||
api_mock.fetch_ticker = MagicMock(
|
||||
return_value={'ask': ask, 'last': last, 'bid': bid})
|
||||
exchange = get_patched_exchange(mocker, default_conf, api_mock)
|
||||
|
||||
pair = "ETH/BTC"
|
||||
|
||||
# Test regular mode
|
||||
rate = exchange.get_rates(pair, refresh=True, is_short=False)[1]
|
||||
assert not log_has("Using cached sell rate for ETH/BTC.", caplog)
|
||||
assert isinstance(rate, float)
|
||||
assert rate == expected
|
||||
# Use caching
|
||||
api_mock.fetch_l2_order_book.reset_mock()
|
||||
api_mock.fetch_ticker.reset_mock()
|
||||
|
||||
rate = exchange.get_rates(pair, refresh=False, is_short=False)[1]
|
||||
assert rate == expected
|
||||
assert log_has("Using cached sell rate for ETH/BTC.", caplog)
|
||||
|
||||
assert api_mock.fetch_l2_order_book.call_count == 0
|
||||
assert api_mock.fetch_ticker.call_count == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exchange_name", EXCHANGES)
|
||||
@pytest.mark.asyncio
|
||||
async def test___async_get_candle_history_sort(default_conf, mocker, exchange_name):
|
||||
@ -4099,20 +4182,6 @@ def test_get_or_calculate_liquidation_price(mocker, default_conf):
|
||||
)
|
||||
assert liq_price == 17.540699999999998
|
||||
|
||||
ccxt_exceptionhandlers(
|
||||
mocker,
|
||||
default_conf,
|
||||
api_mock,
|
||||
"binance",
|
||||
"get_or_calculate_liquidation_price",
|
||||
"fetch_positions",
|
||||
pair="XRP/USDT",
|
||||
open_rate=0.0,
|
||||
is_short=False,
|
||||
position=0.0,
|
||||
wallet_balance=0.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('exchange,rate_start,rate_end,d1,d2,amount,expected_fees', [
|
||||
('binance', 0, 2, "2021-09-01 01:00:00", "2021-09-01 04:00:00", 30.0, 0.0),
|
||||
|
@ -203,7 +203,7 @@ def test_fetch_stoploss_order_ftx(default_conf, mocker, limit_sell_order, limit_
|
||||
'info': {
|
||||
'orderId': 'mocked_limit_sell',
|
||||
}}])
|
||||
api_mock.fetch_order = MagicMock(return_value=limit_sell_order)
|
||||
api_mock.fetch_order = MagicMock(return_value=limit_sell_order.copy())
|
||||
|
||||
# No orderId field - no call to fetch_order
|
||||
resp = exchange.fetch_stoploss_order('X', 'TKN/BTC')
|
||||
@ -219,11 +219,23 @@ def test_fetch_stoploss_order_ftx(default_conf, mocker, limit_sell_order, limit_
|
||||
order = {'id': 'X', 'status': 'closed', 'info': {'orderId': None}, 'average': 0.254}
|
||||
api_mock.fetch_orders = MagicMock(return_value=[order])
|
||||
api_mock.fetch_order.reset_mock()
|
||||
api_mock.privateGetConditionalOrdersConditionalOrderIdTriggers = MagicMock(
|
||||
return_value={'result': [
|
||||
{'orderId': 'mocked_market_sell', 'type': 'market', 'side': 'sell', 'price': 0.254}
|
||||
]})
|
||||
resp = exchange.fetch_stoploss_order('X', 'TKN/BTC')
|
||||
assert resp
|
||||
# fetch_order not called (no regular order ID)
|
||||
assert api_mock.fetch_order.call_count == 0
|
||||
assert order == order
|
||||
assert api_mock.fetch_order.call_count == 1
|
||||
api_mock.privateGetConditionalOrdersConditionalOrderIdTriggers.call_count == 1
|
||||
expected_resp = limit_sell_order.copy()
|
||||
expected_resp.update({
|
||||
'id_stop': 'X',
|
||||
'id': 'X',
|
||||
'type': 'stop',
|
||||
'status_stop': 'triggered',
|
||||
})
|
||||
assert expected_resp == resp
|
||||
|
||||
with pytest.raises(InvalidOrderException):
|
||||
api_mock.fetch_orders = MagicMock(side_effect=ccxt.InvalidOrder("Order not found"))
|
||||
|
@ -21,10 +21,11 @@ def freqai_conf(default_conf, tmpdir):
|
||||
"strategy": "freqai_test_strat",
|
||||
"user_data_dir": Path(tmpdir),
|
||||
"strategy-path": "freqtrade/tests/strategy/strats",
|
||||
"freqaimodel": "LightGBMPredictionModel",
|
||||
"freqaimodel": "LightGBMRegressor",
|
||||
"freqaimodel_path": "freqai/prediction_models",
|
||||
"timerange": "20180110-20180115",
|
||||
"freqai": {
|
||||
"enabled": True,
|
||||
"startup_candles": 10000,
|
||||
"purge_old_models": True,
|
||||
"train_period_days": 5,
|
||||
@ -47,9 +48,9 @@ def freqai_conf(default_conf, tmpdir):
|
||||
"indicator_periods_candles": [10],
|
||||
},
|
||||
"data_split_parameters": {"test_size": 0.33, "random_state": 1},
|
||||
"model_training_parameters": {"n_estimators": 100, "verbosity": 0},
|
||||
"model_training_parameters": {"n_estimators": 100},
|
||||
},
|
||||
"config_files": [Path('config_examples', 'config_freqai_futures.example.json')]
|
||||
"config_files": [Path('config_examples', 'config_freqai.example.json')]
|
||||
}
|
||||
)
|
||||
freqaiconf['exchange'].update({'pair_whitelist': ['ADA/BTC', 'DASH/BTC', 'ETH/BTC', 'LTC/BTC']})
|
||||
@ -57,7 +58,6 @@ def freqai_conf(default_conf, tmpdir):
|
||||
|
||||
|
||||
def get_patched_data_kitchen(mocker, freqaiconf):
|
||||
# dd = mocker.patch('freqtrade.freqai.data_drawer', MagicMock())
|
||||
dk = FreqaiDataKitchen(freqaiconf)
|
||||
return dk
|
||||
|
||||
|
33
tests/freqai/test_freqai_backtsting.py
Normal file
33
tests/freqai/test_freqai_backtsting.py
Normal file
@ -0,0 +1,33 @@
|
||||
from pathlib import Path
|
||||
from unittest.mock import PropertyMock
|
||||
|
||||
import pytest
|
||||
|
||||
from freqtrade.commands.optimize_commands import start_backtesting
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from tests.conftest import (CURRENT_TEST_STRATEGY, get_args, patch_exchange,
|
||||
patched_configuration_load_config_file)
|
||||
|
||||
|
||||
def test_backtest_start_backtest_list_freqai(freqai_conf, mocker, testdatadir):
|
||||
# Tests detail-data loading
|
||||
patch_exchange(mocker)
|
||||
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT']))
|
||||
# mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
|
||||
|
||||
patched_configuration_load_config_file(mocker, freqai_conf)
|
||||
|
||||
args = [
|
||||
'backtesting',
|
||||
'--config', 'config.json',
|
||||
'--datadir', str(testdatadir),
|
||||
'--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'),
|
||||
'--timeframe', '1h',
|
||||
'--strategy-list', CURRENT_TEST_STRATEGY
|
||||
]
|
||||
args = get_args(args)
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"You can't use strategy_list and freqai at the same time\."):
|
||||
start_backtesting(args)
|
@ -12,6 +12,11 @@ from tests.conftest import get_patched_exchange, log_has_re
|
||||
from tests.freqai.conftest import get_patched_freqai_strategy
|
||||
|
||||
|
||||
def is_arm() -> bool:
|
||||
machine = platform.machine()
|
||||
return "arm" in machine or "aarch64" in machine
|
||||
|
||||
|
||||
def test_train_model_in_series_LightGBM(mocker, freqai_conf):
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
|
||||
@ -43,7 +48,7 @@ def test_train_model_in_series_LightGBM(mocker, freqai_conf):
|
||||
def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf):
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"strategy": "freqai_test_multimodel_strat"})
|
||||
freqai_conf.update({"freqaimodel": "LightGBMPredictionMultiModel"})
|
||||
freqai_conf.update({"freqaimodel": "LightGBMRegressorMultiTarget"})
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
@ -70,11 +75,75 @@ def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf):
|
||||
shutil.rmtree(Path(freqai.dk.full_path))
|
||||
|
||||
|
||||
@pytest.mark.skipif("arm" in platform.uname()[-1], reason="no ARM for Catboost ...")
|
||||
@pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...")
|
||||
def test_train_model_in_series_Catboost(mocker, freqai_conf):
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"freqaimodel": "CatboostPredictionModel"})
|
||||
del freqai_conf['freqai']['model_training_parameters']['verbosity']
|
||||
freqai_conf.update({"freqaimodel": "CatboostRegressor"})
|
||||
# freqai_conf.get('freqai', {}).update(
|
||||
# {'model_training_parameters': {"n_estimators": 100, "verbose": 0}})
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
|
||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||
freqai = strategy.freqai
|
||||
freqai.live = True
|
||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||
|
||||
freqai.dd.pair_dict = MagicMock()
|
||||
|
||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||
|
||||
freqai.train_model_in_series(new_timerange, "ADA/BTC",
|
||||
strategy, freqai.dk, data_load_timerange)
|
||||
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
||||
|
||||
shutil.rmtree(Path(freqai.dk.full_path))
|
||||
|
||||
|
||||
@pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...")
|
||||
def test_train_model_in_series_CatboostClassifier(mocker, freqai_conf):
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"freqaimodel": "CatboostClassifier"})
|
||||
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
|
||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||
freqai = strategy.freqai
|
||||
freqai.live = True
|
||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||
|
||||
freqai.dd.pair_dict = MagicMock()
|
||||
|
||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||
|
||||
freqai.train_model_in_series(new_timerange, "ADA/BTC",
|
||||
strategy, freqai.dk, data_load_timerange)
|
||||
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
||||
|
||||
shutil.rmtree(Path(freqai.dk.full_path))
|
||||
|
||||
|
||||
def test_train_model_in_series_LightGBMClassifier(mocker, freqai_conf):
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"freqaimodel": "LightGBMClassifier"})
|
||||
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
|
@ -1,8 +1,10 @@
|
||||
# pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
|
||||
|
||||
from copy import deepcopy
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pandas as pd
|
||||
import pytest
|
||||
from arrow import Arrow
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
@ -87,3 +89,87 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) ->
|
||||
assert (round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or
|
||||
round(ln.iloc[0]["low"], 6) < round(
|
||||
t["close_rate"], 6) < round(ln.iloc[0]["high"], 6))
|
||||
|
||||
|
||||
def test_backtest_position_adjustment_detailed(default_conf, fee, mocker) -> None:
|
||||
default_conf['use_exit_signal'] = False
|
||||
mocker.patch('freqtrade.exchange.Exchange.get_fee', fee)
|
||||
mocker.patch("freqtrade.exchange.Exchange.get_min_pair_stake_amount", return_value=10)
|
||||
mocker.patch("freqtrade.exchange.Exchange.get_max_pair_stake_amount", return_value=float('inf'))
|
||||
patch_exchange(mocker)
|
||||
default_conf.update({
|
||||
"stake_amount": 100.0,
|
||||
"dry_run_wallet": 1000.0,
|
||||
"strategy": "StrategyTestV3"
|
||||
})
|
||||
backtesting = Backtesting(default_conf)
|
||||
backtesting._set_strategy(backtesting.strategylist[0])
|
||||
pair = 'XRP/USDT'
|
||||
row = [
|
||||
pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0),
|
||||
2.1, # Open
|
||||
2.2, # High
|
||||
1.9, # Low
|
||||
2.1, # Close
|
||||
1, # enter_long
|
||||
0, # exit_long
|
||||
0, # enter_short
|
||||
0, # exit_short
|
||||
'', # enter_tag
|
||||
'', # exit_tag
|
||||
]
|
||||
trade = backtesting._enter_trade(pair, row=row, direction='long')
|
||||
trade.orders[0].close_bt_order(row[0], trade)
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 100.0
|
||||
assert pytest.approx(trade.amount) == 47.61904762
|
||||
assert len(trade.orders) == 1
|
||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=None)
|
||||
|
||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 100.0
|
||||
assert pytest.approx(trade.amount) == 47.61904762
|
||||
assert len(trade.orders) == 1
|
||||
# Increase position by 100
|
||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=100)
|
||||
|
||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 200.0
|
||||
assert pytest.approx(trade.amount) == 95.23809524
|
||||
assert len(trade.orders) == 2
|
||||
|
||||
# Reduce by more than amount - no change to trade.
|
||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=-500)
|
||||
|
||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 200.0
|
||||
assert pytest.approx(trade.amount) == 95.23809524
|
||||
assert len(trade.orders) == 2
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
|
||||
# Reduce position by 50
|
||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=-100)
|
||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 100.0
|
||||
assert pytest.approx(trade.amount) == 47.61904762
|
||||
assert len(trade.orders) == 3
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
assert trade.nr_of_successful_exits == 1
|
||||
|
||||
# Adjust below minimum
|
||||
backtesting.strategy.adjust_trade_position = MagicMock(return_value=-99)
|
||||
trade = backtesting._get_adjust_trade_entry_for_candle(trade, row)
|
||||
|
||||
assert trade
|
||||
assert pytest.approx(trade.stake_amount) == 100.0
|
||||
assert pytest.approx(trade.amount) == 47.61904762
|
||||
assert len(trade.orders) == 3
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
assert trade.nr_of_successful_exits == 1
|
||||
|
@ -305,6 +305,7 @@ def test_LowProfitPairs(mocker, default_conf, fee, caplog, only_per_side):
|
||||
min_ago_open=800, min_ago_close=450, profit_rate=0.9,
|
||||
))
|
||||
|
||||
Trade.commit()
|
||||
# Not locked with 1 trade
|
||||
assert not freqtrade.protections.global_stop()
|
||||
assert not freqtrade.protections.stop_per_pair('XRP/BTC')
|
||||
@ -316,6 +317,7 @@ def test_LowProfitPairs(mocker, default_conf, fee, caplog, only_per_side):
|
||||
min_ago_open=200, min_ago_close=120, profit_rate=0.9,
|
||||
))
|
||||
|
||||
Trade.commit()
|
||||
# Not locked with 1 trade (first trade is outside of lookback_period)
|
||||
assert not freqtrade.protections.global_stop()
|
||||
assert not freqtrade.protections.stop_per_pair('XRP/BTC')
|
||||
@ -327,14 +329,16 @@ def test_LowProfitPairs(mocker, default_conf, fee, caplog, only_per_side):
|
||||
'XRP/BTC', fee.return_value, False, exit_reason=ExitType.ROI.value,
|
||||
min_ago_open=20, min_ago_close=10, profit_rate=1.15, is_short=True
|
||||
))
|
||||
Trade.commit()
|
||||
assert freqtrade.protections.stop_per_pair('XRP/BTC') != only_per_side
|
||||
assert not PairLocks.is_pair_locked('XRP/BTC', side='*')
|
||||
assert PairLocks.is_pair_locked('XRP/BTC', side='long') == only_per_side
|
||||
|
||||
Trade.query.session.add(generate_mock_trade(
|
||||
'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value,
|
||||
min_ago_open=110, min_ago_close=20, profit_rate=0.8,
|
||||
min_ago_open=110, min_ago_close=21, profit_rate=0.8,
|
||||
))
|
||||
Trade.commit()
|
||||
|
||||
# Locks due to 2nd trade
|
||||
assert freqtrade.protections.global_stop() != only_per_side
|
||||
@ -342,6 +346,7 @@ def test_LowProfitPairs(mocker, default_conf, fee, caplog, only_per_side):
|
||||
assert PairLocks.is_pair_locked('XRP/BTC', side='long')
|
||||
assert PairLocks.is_pair_locked('XRP/BTC', side='*') != only_per_side
|
||||
assert not PairLocks.is_global_lock()
|
||||
Trade.commit()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
|
@ -111,6 +111,7 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None:
|
||||
'stoploss_entry_dist': -0.00010475,
|
||||
'stoploss_entry_dist_ratio': -0.10448878,
|
||||
'open_order': None,
|
||||
'realized_profit': 0.0,
|
||||
'exchange': 'binance',
|
||||
'leverage': 1.0,
|
||||
'interest_rate': 0.0,
|
||||
@ -196,6 +197,7 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None:
|
||||
'stoploss_entry_dist_ratio': -0.10448878,
|
||||
'open_order': None,
|
||||
'exchange': 'binance',
|
||||
'realized_profit': 0.0,
|
||||
'leverage': 1.0,
|
||||
'interest_rate': 0.0,
|
||||
'liquidation_price': None,
|
||||
@ -312,10 +314,10 @@ def test__rpc_timeunit_profit(default_conf_usdt, ticker, fee,
|
||||
# {'date': datetime.date(2022, 6, 11), 'abs_profit': 13.8299999,
|
||||
# 'starting_balance': 1055.37, 'rel_profit': 0.0131044,
|
||||
# 'fiat_value': 0.0, 'trade_count': 2}
|
||||
assert day['abs_profit'] in (0.0, pytest.approx(13.8299999), pytest.approx(-4.0))
|
||||
assert day['rel_profit'] in (0.0, pytest.approx(0.01310441), pytest.approx(-0.00377583))
|
||||
assert day['abs_profit'] in (0.0, pytest.approx(6.83), pytest.approx(-4.09))
|
||||
assert day['rel_profit'] in (0.0, pytest.approx(0.00642902), pytest.approx(-0.00383512))
|
||||
assert day['trade_count'] in (0, 1, 2)
|
||||
assert day['starting_balance'] in (pytest.approx(1059.37), pytest.approx(1055.37))
|
||||
assert day['starting_balance'] in (pytest.approx(1062.37), pytest.approx(1066.46))
|
||||
assert day['fiat_value'] in (0.0, )
|
||||
# ensure first day is current date
|
||||
assert str(days['data'][0]['date']) == str(datetime.utcnow().date())
|
||||
@ -433,9 +435,9 @@ def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None:
|
||||
create_mock_trades_usdt(fee)
|
||||
|
||||
stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency)
|
||||
assert pytest.approx(stats['profit_closed_coin']) == 9.83
|
||||
assert pytest.approx(stats['profit_closed_coin']) == 2.74
|
||||
assert pytest.approx(stats['profit_closed_percent_mean']) == -1.67
|
||||
assert pytest.approx(stats['profit_closed_fiat']) == 10.813
|
||||
assert pytest.approx(stats['profit_closed_fiat']) == 3.014
|
||||
assert pytest.approx(stats['profit_all_coin']) == -77.45964918
|
||||
assert pytest.approx(stats['profit_all_percent_mean']) == -57.86
|
||||
assert pytest.approx(stats['profit_all_fiat']) == -85.205614098
|
||||
@ -459,46 +461,6 @@ def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None:
|
||||
assert isnan(stats['profit_all_coin'])
|
||||
|
||||
|
||||
# Test that rpc_trade_statistics can handle trades that lacks
|
||||
# trade.open_rate (it is set to None)
|
||||
def test_rpc_trade_statistics_closed(mocker, default_conf_usdt, ticker, fee):
|
||||
mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price',
|
||||
return_value=1.1)
|
||||
mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.exchange.Exchange',
|
||||
fetch_ticker=ticker,
|
||||
get_fee=fee,
|
||||
)
|
||||
|
||||
freqtradebot = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||
patch_get_signal(freqtradebot)
|
||||
stake_currency = default_conf_usdt['stake_currency']
|
||||
fiat_display_currency = default_conf_usdt['fiat_display_currency']
|
||||
|
||||
rpc = RPC(freqtradebot)
|
||||
|
||||
# Create some test data
|
||||
create_mock_trades_usdt(fee)
|
||||
|
||||
for trade in Trade.query.order_by(Trade.id).all():
|
||||
trade.open_rate = None
|
||||
|
||||
stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency)
|
||||
assert stats['profit_closed_coin'] == 0
|
||||
assert stats['profit_closed_percent_mean'] == 0
|
||||
assert stats['profit_closed_fiat'] == 0
|
||||
assert stats['profit_all_coin'] == 0
|
||||
assert stats['profit_all_percent_mean'] == 0
|
||||
assert stats['profit_all_fiat'] == 0
|
||||
assert stats['trade_count'] == 7
|
||||
assert stats['first_trade_date'] == '2 days ago'
|
||||
assert stats['latest_trade_date'] == '17 minutes ago'
|
||||
assert stats['avg_duration'] == '0:00:00'
|
||||
assert stats['best_pair'] == 'XRP/USDT'
|
||||
assert stats['best_rate'] == 10.0
|
||||
|
||||
|
||||
def test_rpc_balance_handle_error(default_conf, mocker):
|
||||
mock_balance = {
|
||||
'BTC': {
|
||||
@ -841,7 +803,8 @@ def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None:
|
||||
'side': 'sell',
|
||||
'amount': amount,
|
||||
'remaining': amount,
|
||||
'filled': 0.0
|
||||
'filled': 0.0,
|
||||
'id': trade.orders[0].order_id,
|
||||
}
|
||||
)
|
||||
msg = rpc._rpc_force_exit('3')
|
||||
@ -867,9 +830,9 @@ def test_performance_handle(default_conf_usdt, ticker, fee, mocker) -> None:
|
||||
|
||||
res = rpc._rpc_performance()
|
||||
assert len(res) == 3
|
||||
assert res[0]['pair'] == 'XRP/USDT'
|
||||
assert res[0]['pair'] == 'ETC/USDT'
|
||||
assert res[0]['count'] == 1
|
||||
assert res[0]['profit_pct'] == 10.0
|
||||
assert res[0]['profit_pct'] == 5.0
|
||||
|
||||
|
||||
def test_enter_tag_performance_handle(default_conf, ticker, fee, mocker) -> None:
|
||||
@ -893,16 +856,16 @@ def test_enter_tag_performance_handle(default_conf, ticker, fee, mocker) -> None
|
||||
res = rpc._rpc_enter_tag_performance(None)
|
||||
|
||||
assert len(res) == 3
|
||||
assert res[0]['enter_tag'] == 'TEST3'
|
||||
assert res[0]['enter_tag'] == 'TEST1'
|
||||
assert res[0]['count'] == 1
|
||||
assert res[0]['profit_pct'] == 10.0
|
||||
assert res[0]['profit_pct'] == 5.0
|
||||
|
||||
res = rpc._rpc_enter_tag_performance(None)
|
||||
|
||||
assert len(res) == 3
|
||||
assert res[0]['enter_tag'] == 'TEST3'
|
||||
assert res[0]['enter_tag'] == 'TEST1'
|
||||
assert res[0]['count'] == 1
|
||||
assert res[0]['profit_pct'] == 10.0
|
||||
assert res[0]['profit_pct'] == 5.0
|
||||
|
||||
|
||||
def test_enter_tag_performance_handle_2(mocker, default_conf, markets, fee):
|
||||
@ -953,11 +916,11 @@ def test_exit_reason_performance_handle(default_conf_usdt, ticker, fee, mocker)
|
||||
res = rpc._rpc_exit_reason_performance(None)
|
||||
|
||||
assert len(res) == 3
|
||||
assert res[0]['exit_reason'] == 'roi'
|
||||
assert res[0]['exit_reason'] == 'exit_signal'
|
||||
assert res[0]['count'] == 1
|
||||
assert res[0]['profit_pct'] == 10.0
|
||||
assert res[0]['profit_pct'] == 5.0
|
||||
|
||||
assert res[1]['exit_reason'] == 'exit_signal'
|
||||
assert res[1]['exit_reason'] == 'roi'
|
||||
assert res[2]['exit_reason'] == 'Other'
|
||||
|
||||
|
||||
@ -1009,9 +972,9 @@ def test_mix_tag_performance_handle(default_conf, ticker, fee, mocker) -> None:
|
||||
res = rpc._rpc_mix_tag_performance(None)
|
||||
|
||||
assert len(res) == 3
|
||||
assert res[0]['mix_tag'] == 'TEST3 roi'
|
||||
assert res[0]['mix_tag'] == 'TEST1 exit_signal'
|
||||
assert res[0]['count'] == 1
|
||||
assert res[0]['profit_pct'] == 10.0
|
||||
assert res[0]['profit_pct'] == 5.0
|
||||
|
||||
|
||||
def test_mix_tag_performance_handle_2(mocker, default_conf, markets, fee):
|
||||
|
@ -109,6 +109,9 @@ def test_api_ui_fallback(botclient, mocker):
|
||||
rc = client_get(client, "/something")
|
||||
assert rc.status_code == 200
|
||||
|
||||
rc = client_get(client, "/something.js")
|
||||
assert rc.status_code == 200
|
||||
|
||||
# Test directory traversal without mock
|
||||
rc = client_get(client, '%2F%2F%2Fetc/passwd')
|
||||
assert rc.status_code == 200
|
||||
@ -717,11 +720,11 @@ def test_api_edge_disabled(botclient, mocker, ticker, fee, markets):
|
||||
(
|
||||
True,
|
||||
{'best_pair': 'ETC/BTC', 'best_rate': -0.5, 'best_pair_profit_ratio': -0.005,
|
||||
'profit_all_coin': 43.61269123,
|
||||
'profit_all_fiat': 538398.67323435, 'profit_all_percent_mean': 66.41,
|
||||
'profit_all_coin': 45.561959,
|
||||
'profit_all_fiat': 562462.39126200, 'profit_all_percent_mean': 66.41,
|
||||
'profit_all_ratio_mean': 0.664109545, 'profit_all_percent_sum': 398.47,
|
||||
'profit_all_ratio_sum': 3.98465727, 'profit_all_percent': 4.36,
|
||||
'profit_all_ratio': 0.043612222872799825, 'profit_closed_coin': -0.00673913,
|
||||
'profit_all_ratio_sum': 3.98465727, 'profit_all_percent': 4.56,
|
||||
'profit_all_ratio': 0.04556147, 'profit_closed_coin': -0.00673913,
|
||||
'profit_closed_fiat': -83.19455985, 'profit_closed_ratio_mean': -0.0075,
|
||||
'profit_closed_percent_mean': -0.75, 'profit_closed_ratio_sum': -0.015,
|
||||
'profit_closed_percent_sum': -1.5, 'profit_closed_ratio': -6.739057628404269e-06,
|
||||
@ -732,11 +735,11 @@ def test_api_edge_disabled(botclient, mocker, ticker, fee, markets):
|
||||
(
|
||||
False,
|
||||
{'best_pair': 'XRP/BTC', 'best_rate': 1.0, 'best_pair_profit_ratio': 0.01,
|
||||
'profit_all_coin': -44.0631579,
|
||||
'profit_all_fiat': -543959.6842755, 'profit_all_percent_mean': -66.41,
|
||||
'profit_all_coin': -45.79641127,
|
||||
'profit_all_fiat': -565356.69712815, 'profit_all_percent_mean': -66.41,
|
||||
'profit_all_ratio_mean': -0.6641100666666667, 'profit_all_percent_sum': -398.47,
|
||||
'profit_all_ratio_sum': -3.9846604, 'profit_all_percent': -4.41,
|
||||
'profit_all_ratio': -0.044063014216106644, 'profit_closed_coin': 0.00073913,
|
||||
'profit_all_ratio_sum': -3.9846604, 'profit_all_percent': -4.58,
|
||||
'profit_all_ratio': -0.045796261934205953, 'profit_closed_coin': 0.00073913,
|
||||
'profit_closed_fiat': 9.124559849999999, 'profit_closed_ratio_mean': 0.0075,
|
||||
'profit_closed_percent_mean': 0.75, 'profit_closed_ratio_sum': 0.015,
|
||||
'profit_closed_percent_sum': 1.5, 'profit_closed_ratio': 7.391275897987988e-07,
|
||||
@ -747,11 +750,11 @@ def test_api_edge_disabled(botclient, mocker, ticker, fee, markets):
|
||||
(
|
||||
None,
|
||||
{'best_pair': 'XRP/BTC', 'best_rate': 1.0, 'best_pair_profit_ratio': 0.01,
|
||||
'profit_all_coin': -14.43790415,
|
||||
'profit_all_fiat': -178235.92673175, 'profit_all_percent_mean': 0.08,
|
||||
'profit_all_coin': -14.94732578,
|
||||
'profit_all_fiat': -184524.7367541, 'profit_all_percent_mean': 0.08,
|
||||
'profit_all_ratio_mean': 0.000835751666666662, 'profit_all_percent_sum': 0.5,
|
||||
'profit_all_ratio_sum': 0.005014509999999972, 'profit_all_percent': -1.44,
|
||||
'profit_all_ratio': -0.014437768014451796, 'profit_closed_coin': -0.00542913,
|
||||
'profit_all_ratio_sum': 0.005014509999999972, 'profit_all_percent': -1.49,
|
||||
'profit_all_ratio': -0.014947184841095841, 'profit_closed_coin': -0.00542913,
|
||||
'profit_closed_fiat': -67.02260985, 'profit_closed_ratio_mean': 0.0025,
|
||||
'profit_closed_percent_mean': 0.25, 'profit_closed_ratio_sum': 0.005,
|
||||
'profit_closed_percent_sum': 0.5, 'profit_closed_ratio': -5.429078808526421e-06,
|
||||
@ -790,22 +793,22 @@ def test_api_profit(botclient, mocker, ticker, fee, markets, is_short, expected)
|
||||
'first_trade_timestamp': ANY,
|
||||
'latest_trade_date': '5 minutes ago',
|
||||
'latest_trade_timestamp': ANY,
|
||||
'profit_all_coin': expected['profit_all_coin'],
|
||||
'profit_all_fiat': expected['profit_all_fiat'],
|
||||
'profit_all_percent_mean': expected['profit_all_percent_mean'],
|
||||
'profit_all_ratio_mean': expected['profit_all_ratio_mean'],
|
||||
'profit_all_percent_sum': expected['profit_all_percent_sum'],
|
||||
'profit_all_ratio_sum': expected['profit_all_ratio_sum'],
|
||||
'profit_all_percent': expected['profit_all_percent'],
|
||||
'profit_all_ratio': expected['profit_all_ratio'],
|
||||
'profit_closed_coin': expected['profit_closed_coin'],
|
||||
'profit_closed_fiat': expected['profit_closed_fiat'],
|
||||
'profit_closed_ratio_mean': expected['profit_closed_ratio_mean'],
|
||||
'profit_closed_percent_mean': expected['profit_closed_percent_mean'],
|
||||
'profit_closed_ratio_sum': expected['profit_closed_ratio_sum'],
|
||||
'profit_closed_percent_sum': expected['profit_closed_percent_sum'],
|
||||
'profit_closed_ratio': expected['profit_closed_ratio'],
|
||||
'profit_closed_percent': expected['profit_closed_percent'],
|
||||
'profit_all_coin': pytest.approx(expected['profit_all_coin']),
|
||||
'profit_all_fiat': pytest.approx(expected['profit_all_fiat']),
|
||||
'profit_all_percent_mean': pytest.approx(expected['profit_all_percent_mean']),
|
||||
'profit_all_ratio_mean': pytest.approx(expected['profit_all_ratio_mean']),
|
||||
'profit_all_percent_sum': pytest.approx(expected['profit_all_percent_sum']),
|
||||
'profit_all_ratio_sum': pytest.approx(expected['profit_all_ratio_sum']),
|
||||
'profit_all_percent': pytest.approx(expected['profit_all_percent']),
|
||||
'profit_all_ratio': pytest.approx(expected['profit_all_ratio']),
|
||||
'profit_closed_coin': pytest.approx(expected['profit_closed_coin']),
|
||||
'profit_closed_fiat': pytest.approx(expected['profit_closed_fiat']),
|
||||
'profit_closed_ratio_mean': pytest.approx(expected['profit_closed_ratio_mean']),
|
||||
'profit_closed_percent_mean': pytest.approx(expected['profit_closed_percent_mean']),
|
||||
'profit_closed_ratio_sum': pytest.approx(expected['profit_closed_ratio_sum']),
|
||||
'profit_closed_percent_sum': pytest.approx(expected['profit_closed_percent_sum']),
|
||||
'profit_closed_ratio': pytest.approx(expected['profit_closed_ratio']),
|
||||
'profit_closed_percent': pytest.approx(expected['profit_closed_percent']),
|
||||
'trade_count': 6,
|
||||
'closed_trade_count': 2,
|
||||
'winning_trades': expected['winning_trades'],
|
||||
@ -1202,7 +1205,7 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets):
|
||||
fetch_ticker=ticker,
|
||||
get_fee=fee,
|
||||
markets=PropertyMock(return_value=markets),
|
||||
_is_dry_limit_order_filled=MagicMock(return_value=False),
|
||||
_is_dry_limit_order_filled=MagicMock(return_value=True),
|
||||
)
|
||||
patch_get_signal(ftbot)
|
||||
|
||||
@ -1212,12 +1215,27 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets):
|
||||
assert rc.json() == {"error": "Error querying /api/v1/forceexit: invalid argument"}
|
||||
Trade.query.session.rollback()
|
||||
|
||||
ftbot.enter_positions()
|
||||
create_mock_trades(fee)
|
||||
trade = Trade.get_trades([Trade.id == 5]).first()
|
||||
assert pytest.approx(trade.amount) == 123
|
||||
rc = client_post(client, f"{BASE_URI}/forceexit",
|
||||
data='{"tradeid": "5", "ordertype": "market", "amount": 23}')
|
||||
assert_response(rc)
|
||||
assert rc.json() == {'result': 'Created sell order for trade 5.'}
|
||||
Trade.query.session.rollback()
|
||||
|
||||
trade = Trade.get_trades([Trade.id == 5]).first()
|
||||
assert pytest.approx(trade.amount) == 100
|
||||
assert trade.is_open is True
|
||||
|
||||
rc = client_post(client, f"{BASE_URI}/forceexit",
|
||||
data='{"tradeid": "1"}')
|
||||
data='{"tradeid": "5"}')
|
||||
assert_response(rc)
|
||||
assert rc.json() == {'result': 'Created sell order for trade 1.'}
|
||||
assert rc.json() == {'result': 'Created sell order for trade 5.'}
|
||||
Trade.query.session.rollback()
|
||||
|
||||
trade = Trade.get_trades([Trade.id == 5]).first()
|
||||
assert trade.is_open is False
|
||||
|
||||
|
||||
def test_api_pair_candles(botclient, ohlcv_history):
|
||||
@ -1403,6 +1421,7 @@ def test_api_strategies(botclient):
|
||||
'StrategyTestV2',
|
||||
'StrategyTestV3',
|
||||
'StrategyTestV3Futures',
|
||||
'freqai_test_classifier',
|
||||
'freqai_test_multimodel_strat',
|
||||
'freqai_test_strat'
|
||||
]}
|
||||
|
@ -1,6 +1,7 @@
|
||||
# pragma pylint: disable=missing-docstring, C0103
|
||||
import logging
|
||||
import time
|
||||
from collections import deque
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from freqtrade.enums import RPCMessageType
|
||||
@ -81,9 +82,25 @@ def test_send_msg_telegram_disabled(mocker, default_conf, caplog) -> None:
|
||||
assert telegram_mock.call_count == 0
|
||||
|
||||
|
||||
def test_process_msg_queue(mocker, default_conf, caplog) -> None:
|
||||
telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg')
|
||||
mocker.patch('freqtrade.rpc.telegram.Telegram._init')
|
||||
|
||||
freqtradebot = get_patched_freqtradebot(mocker, default_conf)
|
||||
rpc_manager = RPCManager(freqtradebot)
|
||||
queue = deque()
|
||||
queue.append('Test message')
|
||||
queue.append('Test message 2')
|
||||
rpc_manager.process_msg_queue(queue)
|
||||
|
||||
assert log_has("Sending rpc message: {'type': strategy_msg, 'msg': 'Test message'}", caplog)
|
||||
assert log_has("Sending rpc message: {'type': strategy_msg, 'msg': 'Test message 2'}", caplog)
|
||||
assert telegram_mock.call_count == 2
|
||||
|
||||
|
||||
def test_send_msg_telegram_enabled(mocker, default_conf, caplog) -> None:
|
||||
telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock())
|
||||
mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock())
|
||||
telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg')
|
||||
mocker.patch('freqtrade.rpc.telegram.Telegram._init')
|
||||
|
||||
freqtradebot = get_patched_freqtradebot(mocker, default_conf)
|
||||
rpc_manager = RPCManager(freqtradebot)
|
||||
|
@ -272,7 +272,7 @@ def test_telegram_status_multi_entry(default_conf, update, mocker, fee) -> None:
|
||||
msg = msg_mock.call_args_list[0][0][0]
|
||||
assert re.search(r'Number of Entries.*2', msg)
|
||||
assert re.search(r'Average Entry Price', msg)
|
||||
assert re.search(r'Order filled at', msg)
|
||||
assert re.search(r'Order filled', msg)
|
||||
assert re.search(r'Close Date:', msg) is None
|
||||
assert re.search(r'Close Profit:', msg) is None
|
||||
|
||||
@ -342,7 +342,7 @@ def test_status_handle(default_conf, update, ticker, fee, mocker) -> None:
|
||||
# close_rate should not be included in the message as the trade is not closed
|
||||
# and no line should be empty
|
||||
lines = msg_mock.call_args_list[0][0][0].split('\n')
|
||||
assert '' not in lines
|
||||
assert '' not in lines[:-1]
|
||||
assert 'Close Rate' not in ''.join(lines)
|
||||
assert 'Close Profit' not in ''.join(lines)
|
||||
|
||||
@ -357,13 +357,29 @@ def test_status_handle(default_conf, update, ticker, fee, mocker) -> None:
|
||||
telegram._status(update=update, context=context)
|
||||
|
||||
lines = msg_mock.call_args_list[0][0][0].split('\n')
|
||||
assert '' not in lines
|
||||
assert '' not in lines[:-1]
|
||||
assert 'Close Rate' not in ''.join(lines)
|
||||
assert 'Close Profit' not in ''.join(lines)
|
||||
|
||||
assert msg_mock.call_count == 2
|
||||
assert 'LTC/BTC' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 500)
|
||||
|
||||
msg_mock.reset_mock()
|
||||
context = MagicMock()
|
||||
context.args = ["2"]
|
||||
telegram._status(update=update, context=context)
|
||||
|
||||
assert msg_mock.call_count == 2
|
||||
|
||||
msg1 = msg_mock.call_args_list[0][0][0]
|
||||
msg2 = msg_mock.call_args_list[1][0][0]
|
||||
|
||||
assert 'Close Rate' not in msg1
|
||||
assert 'Trade ID:* `2`' in msg1
|
||||
assert 'Trade ID:* `2` - continued' in msg2
|
||||
|
||||
|
||||
def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> None:
|
||||
mocker.patch.multiple(
|
||||
@ -433,10 +449,10 @@ def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time_machi
|
||||
assert "Daily Profit over the last 2 days</b>:" in msg_mock.call_args_list[0][0][0]
|
||||
assert 'Day ' in msg_mock.call_args_list[0][0][0]
|
||||
assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 13.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 15.21 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(2)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(2) 13.83 USDT 15.21 USD 1.31%' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(2) 6.83 USDT 7.51 USD 0.64%' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
# Reset msg_mock
|
||||
@ -447,8 +463,8 @@ def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time_machi
|
||||
assert "Daily Profit over the last 7 days</b>:" in msg_mock.call_args_list[0][0][0]
|
||||
assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0]
|
||||
assert str((datetime.utcnow() - timedelta(days=5)).date()) in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 13.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 15.21 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(2)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(1)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
@ -460,8 +476,8 @@ def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time_machi
|
||||
context = MagicMock()
|
||||
context.args = ["1"]
|
||||
telegram._daily(update=update, context=context)
|
||||
assert ' 13.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 15.21 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(2)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
|
||||
@ -523,8 +539,8 @@ def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, time_mach
|
||||
today = datetime.utcnow().date()
|
||||
first_iso_day_of_current_week = today - timedelta(days=today.weekday())
|
||||
assert str(first_iso_day_of_current_week) in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 9.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 10.81 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(3)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
@ -536,8 +552,8 @@ def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, time_mach
|
||||
assert "Weekly Profit over the last 8 weeks (starting from Monday)</b>:" \
|
||||
in msg_mock.call_args_list[0][0][0]
|
||||
assert 'Weekly' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 9.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 10.81 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(3)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
@ -592,8 +608,8 @@ def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, time_mac
|
||||
today = datetime.utcnow().date()
|
||||
current_month = f"{today.year}-{today.month:02} "
|
||||
assert current_month in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 9.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 10.81 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(3)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
@ -606,8 +622,8 @@ def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, time_mac
|
||||
assert 'Monthly Profit over the last 6 months</b>:' in msg_mock.call_args_list[0][0][0]
|
||||
assert 'Month ' in msg_mock.call_args_list[0][0][0]
|
||||
assert current_month in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 9.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 10.81 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(3)' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(0)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
@ -620,8 +636,8 @@ def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, time_mac
|
||||
telegram._monthly(update=update, context=context)
|
||||
assert msg_mock.call_count == 1
|
||||
assert 'Monthly Profit over the last 12 months</b>:' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 9.83 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 10.81 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
|
||||
assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0]
|
||||
assert '(3)' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
# The one-digit months should contain a zero, Eg: September 2021 = "2021-09"
|
||||
@ -959,6 +975,9 @@ def test_telegram_forceexit_handle(default_conf, update, ticker, fee,
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'stake_amount': 0.0009999999999054,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -1028,6 +1047,9 @@ def test_telegram_force_exit_down_handle(default_conf, update, ticker, fee,
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'stake_amount': 0.0009999999999054,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -1087,6 +1109,9 @@ def test_forceexit_all_handle(default_conf, update, ticker, fee, mocker) -> None
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'stake_amount': 0.0009999999999054,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
} == msg
|
||||
|
||||
|
||||
@ -1259,7 +1284,7 @@ def test_telegram_performance_handle(default_conf_usdt, update, ticker, fee, moc
|
||||
telegram._performance(update=update, context=MagicMock())
|
||||
assert msg_mock.call_count == 1
|
||||
assert 'Performance' in msg_mock.call_args_list[0][0][0]
|
||||
assert '<code>XRP/USDT\t9.842 USDT (10.00%) (1)</code>' in msg_mock.call_args_list[0][0][0]
|
||||
assert '<code>XRP/USDT\t2.842 USDT (10.00%) (1)</code>' in msg_mock.call_args_list[0][0][0]
|
||||
|
||||
|
||||
def test_telegram_entry_tag_performance_handle(
|
||||
@ -1309,7 +1334,7 @@ def test_telegram_exit_reason_performance_handle(default_conf_usdt, update, tick
|
||||
telegram._exit_reason_performance(update=update, context=context)
|
||||
assert msg_mock.call_count == 1
|
||||
assert 'Exit Reason Performance' in msg_mock.call_args_list[0][0][0]
|
||||
assert '<code>roi\t9.842 USDT (10.00%) (1)</code>' in msg_mock.call_args_list[0][0][0]
|
||||
assert '<code>roi\t2.842 USDT (10.00%) (1)</code>' in msg_mock.call_args_list[0][0][0]
|
||||
context.args = ['XRP/USDT']
|
||||
|
||||
telegram._exit_reason_performance(update=update, context=context)
|
||||
@ -1341,7 +1366,7 @@ def test_telegram_mix_tag_performance_handle(default_conf_usdt, update, ticker,
|
||||
telegram._mix_tag_performance(update=update, context=context)
|
||||
assert msg_mock.call_count == 1
|
||||
assert 'Mix Tag Performance' in msg_mock.call_args_list[0][0][0]
|
||||
assert ('<code>TEST3 roi\t9.842 USDT (10.00%) (1)</code>'
|
||||
assert ('<code>TEST3 roi\t2.842 USDT (10.00%) (1)</code>'
|
||||
in msg_mock.call_args_list[0][0][0])
|
||||
|
||||
context.args = ['XRP/USDT']
|
||||
@ -1507,7 +1532,7 @@ def test_telegram_logs(default_conf, update, mocker) -> None:
|
||||
|
||||
msg_mock.reset_mock()
|
||||
# Test with changed MaxMessageLength
|
||||
mocker.patch('freqtrade.rpc.telegram.MAX_TELEGRAM_MESSAGE_LENGTH', 200)
|
||||
mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 200)
|
||||
context = MagicMock()
|
||||
context.args = []
|
||||
telegram._logs(update=update, context=context)
|
||||
@ -1789,7 +1814,6 @@ def test_send_msg_entry_fill_notification(default_conf, mocker, message_type, en
|
||||
'leverage': leverage,
|
||||
'stake_amount': 0.01465333,
|
||||
'direction': entered,
|
||||
# 'stake_amount_fiat': 0.0,
|
||||
'stake_currency': 'BTC',
|
||||
'fiat_currency': 'USD',
|
||||
'open_rate': 1.099e-05,
|
||||
@ -1806,6 +1830,33 @@ def test_send_msg_entry_fill_notification(default_conf, mocker, message_type, en
|
||||
'*Total:* `(0.01465333 BTC, 180.895 USD)`'
|
||||
)
|
||||
|
||||
msg_mock.reset_mock()
|
||||
telegram.send_msg({
|
||||
'type': message_type,
|
||||
'trade_id': 1,
|
||||
'enter_tag': enter_signal,
|
||||
'exchange': 'Binance',
|
||||
'pair': 'ETH/BTC',
|
||||
'leverage': leverage,
|
||||
'stake_amount': 0.01465333,
|
||||
'sub_trade': True,
|
||||
'direction': entered,
|
||||
'stake_currency': 'BTC',
|
||||
'fiat_currency': 'USD',
|
||||
'open_rate': 1.099e-05,
|
||||
'amount': 1333.3333333333335,
|
||||
'open_date': arrow.utcnow().shift(hours=-1)
|
||||
})
|
||||
|
||||
assert msg_mock.call_args[0][0] == (
|
||||
f'\N{CHECK MARK} *Binance (dry):* {entered}ed ETH/BTC (#1)\n'
|
||||
f'*Enter Tag:* `{enter_signal}`\n'
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
f"{leverage_text}"
|
||||
'*Open Rate:* `0.00001099`\n'
|
||||
'*Total:* `(0.01465333 BTC, 180.895 USD)`'
|
||||
)
|
||||
|
||||
|
||||
def test_send_msg_sell_notification(default_conf, mocker) -> None:
|
||||
|
||||
@ -1840,12 +1891,51 @@ def test_send_msg_sell_notification(default_conf, mocker) -> None:
|
||||
'*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n'
|
||||
'*Enter Tag:* `buy_signal1`\n'
|
||||
'*Exit Reason:* `stop_loss`\n'
|
||||
'*Duration:* `1:00:00 (60.0 min)`\n'
|
||||
'*Direction:* `Long`\n'
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
'*Open Rate:* `0.00007500`\n'
|
||||
'*Current Rate:* `0.00003201`\n'
|
||||
'*Close Rate:* `0.00003201`'
|
||||
'*Exit Rate:* `0.00003201`\n'
|
||||
'*Duration:* `1:00:00 (60.0 min)`'
|
||||
)
|
||||
|
||||
msg_mock.reset_mock()
|
||||
telegram.send_msg({
|
||||
'type': RPCMessageType.EXIT,
|
||||
'trade_id': 1,
|
||||
'exchange': 'Binance',
|
||||
'pair': 'KEY/ETH',
|
||||
'direction': 'Long',
|
||||
'gain': 'loss',
|
||||
'limit': 3.201e-05,
|
||||
'amount': 1333.3333333333335,
|
||||
'order_type': 'market',
|
||||
'open_rate': 7.5e-05,
|
||||
'current_rate': 3.201e-05,
|
||||
'cumulative_profit': -0.15746268,
|
||||
'profit_amount': -0.05746268,
|
||||
'profit_ratio': -0.57405275,
|
||||
'stake_currency': 'ETH',
|
||||
'fiat_currency': 'USD',
|
||||
'enter_tag': 'buy_signal1',
|
||||
'exit_reason': ExitType.STOP_LOSS.value,
|
||||
'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30),
|
||||
'close_date': arrow.utcnow(),
|
||||
'stake_amount': 0.01,
|
||||
'sub_trade': True,
|
||||
})
|
||||
assert msg_mock.call_args[0][0] == (
|
||||
'\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
|
||||
'*Unrealized Sub Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n'
|
||||
'*Cumulative Profit:* (`-0.15746268 ETH / -24.812 USD`)\n'
|
||||
'*Enter Tag:* `buy_signal1`\n'
|
||||
'*Exit Reason:* `stop_loss`\n'
|
||||
'*Direction:* `Long`\n'
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
'*Open Rate:* `0.00007500`\n'
|
||||
'*Current Rate:* `0.00003201`\n'
|
||||
'*Exit Rate:* `0.00003201`\n'
|
||||
'*Remaining:* `(0.01 ETH, -24.812 USD)`'
|
||||
)
|
||||
|
||||
msg_mock.reset_mock()
|
||||
@ -1871,15 +1961,15 @@ def test_send_msg_sell_notification(default_conf, mocker) -> None:
|
||||
})
|
||||
assert msg_mock.call_args[0][0] == (
|
||||
'\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
|
||||
'*Unrealized Profit:* `-57.41%`\n'
|
||||
'*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH)`\n'
|
||||
'*Enter Tag:* `buy_signal1`\n'
|
||||
'*Exit Reason:* `stop_loss`\n'
|
||||
'*Duration:* `1 day, 2:30:00 (1590.0 min)`\n'
|
||||
'*Direction:* `Long`\n'
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
'*Open Rate:* `0.00007500`\n'
|
||||
'*Current Rate:* `0.00003201`\n'
|
||||
'*Close Rate:* `0.00003201`'
|
||||
'*Exit Rate:* `0.00003201`\n'
|
||||
'*Duration:* `1 day, 2:30:00 (1590.0 min)`'
|
||||
)
|
||||
# Reset singleton function to avoid random breaks
|
||||
telegram._rpc._fiat_converter.convert_amount = old_convamount
|
||||
@ -1954,15 +2044,15 @@ def test_send_msg_sell_fill_notification(default_conf, mocker, direction,
|
||||
leverage_text = f'*Leverage:* `{leverage}`\n' if leverage and leverage != 1.0 else ''
|
||||
assert msg_mock.call_args[0][0] == (
|
||||
'\N{WARNING SIGN} *Binance (dry):* Exited KEY/ETH (#1)\n'
|
||||
'*Profit:* `-57.41%`\n'
|
||||
'*Profit:* `-57.41% (loss: -0.05746268 ETH)`\n'
|
||||
f'*Enter Tag:* `{enter_signal}`\n'
|
||||
'*Exit Reason:* `stop_loss`\n'
|
||||
'*Duration:* `1 day, 2:30:00 (1590.0 min)`\n'
|
||||
f"*Direction:* `{direction}`\n"
|
||||
f"{leverage_text}"
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
'*Open Rate:* `0.00007500`\n'
|
||||
'*Close Rate:* `0.00003201`'
|
||||
'*Exit Rate:* `0.00003201`\n'
|
||||
'*Duration:* `1 day, 2:30:00 (1590.0 min)`'
|
||||
)
|
||||
|
||||
|
||||
@ -1994,6 +2084,16 @@ def test_startup_notification(default_conf, mocker) -> None:
|
||||
assert msg_mock.call_args[0][0] == '*Custom:* `Hello World`'
|
||||
|
||||
|
||||
def test_send_msg_strategy_msg_notification(default_conf, mocker) -> None:
|
||||
|
||||
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
|
||||
telegram.send_msg({
|
||||
'type': RPCMessageType.STRATEGY_MSG,
|
||||
'msg': 'hello world, Test msg'
|
||||
})
|
||||
assert msg_mock.call_args[0][0] == 'hello world, Test msg'
|
||||
|
||||
|
||||
def test_send_msg_unknown_type(default_conf, mocker) -> None:
|
||||
telegram, _, _ = get_telegram_testobject(mocker, default_conf)
|
||||
with pytest.raises(NotImplementedError, match=r'Unknown message type: None'):
|
||||
@ -2080,16 +2180,16 @@ def test_send_msg_sell_notification_no_fiat(
|
||||
leverage_text = f'*Leverage:* `{leverage}`\n' if leverage and leverage != 1.0 else ''
|
||||
assert msg_mock.call_args[0][0] == (
|
||||
'\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
|
||||
'*Unrealized Profit:* `-57.41%`\n'
|
||||
'*Unrealized Profit:* `-57.41% (loss: -0.05746268 ETH)`\n'
|
||||
f'*Enter Tag:* `{enter_signal}`\n'
|
||||
'*Exit Reason:* `stop_loss`\n'
|
||||
'*Duration:* `2:35:03 (155.1 min)`\n'
|
||||
f'*Direction:* `{direction}`\n'
|
||||
f'{leverage_text}'
|
||||
'*Amount:* `1333.33333333`\n'
|
||||
'*Open Rate:* `0.00007500`\n'
|
||||
'*Current Rate:* `0.00003201`\n'
|
||||
'*Close Rate:* `0.00003201`'
|
||||
'*Exit Rate:* `0.00003201`\n'
|
||||
'*Duration:* `2:35:03 (155.1 min)`'
|
||||
)
|
||||
|
||||
|
||||
|
139
tests/strategy/strats/freqai_test_classifier.py
Normal file
139
tests/strategy/strats/freqai_test_classifier.py
Normal file
@ -0,0 +1,139 @@
|
||||
import logging
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import talib.abstract as ta
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class freqai_test_classifier(IStrategy):
|
||||
"""
|
||||
Test strategy - used for testing freqAI functionalities.
|
||||
DO not use in production.
|
||||
"""
|
||||
|
||||
minimal_roi = {"0": 0.1, "240": -1}
|
||||
|
||||
plot_config = {
|
||||
"main_plot": {},
|
||||
"subplots": {
|
||||
"prediction": {"prediction": {"color": "blue"}},
|
||||
"target_roi": {
|
||||
"target_roi": {"color": "brown"},
|
||||
},
|
||||
"do_predict": {
|
||||
"do_predict": {"color": "brown"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
process_only_new_candles = True
|
||||
stoploss = -0.05
|
||||
use_exit_signal = True
|
||||
startup_candle_count: int = 300
|
||||
can_short = False
|
||||
|
||||
linear_roi_offset = DecimalParameter(
|
||||
0.00, 0.02, default=0.005, space="sell", optimize=False, load=True
|
||||
)
|
||||
max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
|
||||
|
||||
def informative_pairs(self):
|
||||
whitelist_pairs = self.dp.current_whitelist()
|
||||
corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"]
|
||||
informative_pairs = []
|
||||
for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]:
|
||||
for pair in whitelist_pairs:
|
||||
informative_pairs.append((pair, tf))
|
||||
for pair in corr_pairs:
|
||||
if pair in whitelist_pairs:
|
||||
continue # avoid duplication
|
||||
informative_pairs.append((pair, tf))
|
||||
return informative_pairs
|
||||
|
||||
def populate_any_indicators(
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
with self.freqai.lock:
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
informative[f"%-{coin}pct-change"] = informative["close"].pct_change()
|
||||
informative[f"%-{coin}raw_volume"] = informative["volume"]
|
||||
informative[f"%-{coin}raw_price"] = informative["close"]
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||
if n == 0:
|
||||
continue
|
||||
informative_shift = informative[indicators].shift(n)
|
||||
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||
informative = pd.concat((informative, informative_shift), axis=1)
|
||||
|
||||
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||
skip_columns = [
|
||||
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||
]
|
||||
df = df.drop(columns=skip_columns)
|
||||
|
||||
# Add generalized indicators here (because in live, it will call this
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df['&s-up_or_down'] = np.where(df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||
|
||||
return df
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
return dataframe
|
||||
|
||||
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
enter_long_conditions = [df['&s-up_or_down'] == 'up']
|
||||
|
||||
if enter_long_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||
] = (1, "long")
|
||||
|
||||
enter_short_conditions = [df['&s-up_or_down'] == 'down']
|
||||
|
||||
if enter_short_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
|
||||
] = (1, "short")
|
||||
|
||||
return df
|
||||
|
||||
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
return df
|
@ -13,13 +13,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class freqai_test_multimodel_strat(IStrategy):
|
||||
"""
|
||||
Example strategy showing how the user connects their own
|
||||
IFreqaiModel to the strategy. Namely, the user uses:
|
||||
self.freqai.start(dataframe, metadata)
|
||||
|
||||
to make predictions on their data. populate_any_indicators() automatically
|
||||
generates the variety of features indicated by the user in the
|
||||
canonical freqtrade configuration file under config['freqai'].
|
||||
Test strategy - used for testing freqAI multimodel functionalities.
|
||||
DO not use in production.
|
||||
"""
|
||||
|
||||
minimal_roi = {"0": 0.1, "240": -1}
|
||||
@ -62,22 +57,10 @@ class freqai_test_multimodel_strat(IStrategy):
|
||||
return informative_pairs
|
||||
|
||||
def populate_any_indicators(
|
||||
self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
from user indicated timeframes in the configuration file. User controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
(see convention below). I.e. user should not prepend any supporting metrics
|
||||
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
:params:
|
||||
:pair: pair to be used as informative
|
||||
:df: strategy dataframe which will receive merges from informatives
|
||||
:tf: timeframe of the dataframe which will modify the feature names
|
||||
:informative: the dataframe associated with the informative pair
|
||||
:coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
with self.freqai.lock:
|
||||
if informative is None:
|
||||
@ -147,11 +130,6 @@ class freqai_test_multimodel_strat(IStrategy):
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
# All indicators must be populated by populate_any_indicators() for live functionality
|
||||
# to work correctly.
|
||||
# the model will return 4 values, its prediction, an indication of whether or not the
|
||||
# prediction should be accepted, the target mean/std values from the labels used during
|
||||
# each training period.
|
||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||
|
@ -13,13 +13,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class freqai_test_strat(IStrategy):
|
||||
"""
|
||||
Example strategy showing how the user connects their own
|
||||
IFreqaiModel to the strategy. Namely, the user uses:
|
||||
self.freqai.start(dataframe, metadata)
|
||||
|
||||
to make predictions on their data. populate_any_indicators() automatically
|
||||
generates the variety of features indicated by the user in the
|
||||
canonical freqtrade configuration file under config['freqai'].
|
||||
Test strategy - used for testing freqAI functionalities.
|
||||
DO not use in production.
|
||||
"""
|
||||
|
||||
minimal_roi = {"0": 0.1, "240": -1}
|
||||
@ -62,22 +57,10 @@ class freqai_test_strat(IStrategy):
|
||||
return informative_pairs
|
||||
|
||||
def populate_any_indicators(
|
||||
self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
from user indicated timeframes in the configuration file. User controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
(see convention below). I.e. user should not prepend any supporting metrics
|
||||
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
:params:
|
||||
:pair: pair to be used as informative
|
||||
:df: strategy dataframe which will receive merges from informatives
|
||||
:tf: timeframe of the dataframe which will modify the feature names
|
||||
:informative: the dataframe associated with the informative pair
|
||||
:coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
with self.freqai.lock:
|
||||
if informative is None:
|
||||
@ -135,11 +118,6 @@ class freqai_test_strat(IStrategy):
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
# All indicators must be populated by populate_any_indicators() for live functionality
|
||||
# to work correctly.
|
||||
# the model will return 4 values, its prediction, an indication of whether or not the
|
||||
# prediction should be accepted, the target mean/std values from the labels used during
|
||||
# each training period.
|
||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||
|
@ -185,9 +185,12 @@ class StrategyTestV3(IStrategy):
|
||||
|
||||
return 3.0
|
||||
|
||||
def adjust_trade_position(self, trade: Trade, current_time: datetime, current_rate: float,
|
||||
current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float, **kwargs):
|
||||
def adjust_trade_position(self, trade: Trade, current_time: datetime,
|
||||
current_rate: float, current_profit: float,
|
||||
min_stake: Optional[float], max_stake: float,
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs) -> Optional[float]:
|
||||
|
||||
if current_profit < -0.0075:
|
||||
orders = trade.select_filled_orders(trade.entry_side)
|
||||
|
@ -290,6 +290,25 @@ def test_advise_all_indicators(default_conf, testdatadir) -> None:
|
||||
assert len(processed['UNITTEST/BTC']) == 102 # partial candle was removed
|
||||
|
||||
|
||||
def test_populate_any_indicators(default_conf, testdatadir) -> None:
|
||||
strategy = StrategyResolver.load_strategy(default_conf)
|
||||
|
||||
timerange = TimeRange.parse_timerange('1510694220-1510700340')
|
||||
data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
|
||||
fill_up_missing=True)
|
||||
processed = strategy.populate_any_indicators('UNITTEST/BTC', data, '5m')
|
||||
assert processed == data
|
||||
assert id(processed) == id(data)
|
||||
assert len(processed['UNITTEST/BTC']) == 102 # partial candle was removed
|
||||
|
||||
|
||||
def test_freqai_not_initialized(default_conf) -> None:
|
||||
strategy = StrategyResolver.load_strategy(default_conf)
|
||||
strategy.ft_bot_start()
|
||||
with pytest.raises(OperationalException, match=r'freqAI is not enabled\.'):
|
||||
strategy.freqai.start()
|
||||
|
||||
|
||||
def test_advise_all_indicators_copy(mocker, default_conf, testdatadir) -> None:
|
||||
strategy = StrategyResolver.load_strategy(default_conf)
|
||||
aimock = mocker.patch('freqtrade.strategy.interface.IStrategy.advise_indicators')
|
||||
@ -408,28 +427,31 @@ def test_min_roi_reached3(default_conf, fee) -> None:
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'profit,adjusted,expected,trailing,custom,profit2,adjusted2,expected2,custom_stop', [
|
||||
'profit,adjusted,expected,liq,trailing,custom,profit2,adjusted2,expected2,custom_stop', [
|
||||
# Profit, adjusted stoploss(absolute), profit for 2nd call, enable trailing,
|
||||
# enable custom stoploss, expected after 1st call, expected after 2nd call
|
||||
(0.2, 0.9, ExitType.NONE, False, False, 0.3, 0.9, ExitType.NONE, None),
|
||||
(0.2, 0.9, ExitType.NONE, False, False, -0.2, 0.9, ExitType.STOP_LOSS, None),
|
||||
(0.2, 1.14, ExitType.NONE, True, False, 0.05, 1.14, ExitType.TRAILING_STOP_LOSS, None),
|
||||
(0.01, 0.96, ExitType.NONE, True, False, 0.05, 1, ExitType.NONE, None),
|
||||
(0.05, 1, ExitType.NONE, True, False, -0.01, 1, ExitType.TRAILING_STOP_LOSS, None),
|
||||
(0.2, 0.9, ExitType.NONE, None, False, False, 0.3, 0.9, ExitType.NONE, None),
|
||||
(0.2, 0.9, ExitType.NONE, None, False, False, -0.2, 0.9, ExitType.STOP_LOSS, None),
|
||||
(0.2, 0.9, ExitType.NONE, 0.8, False, False, -0.2, 0.9, ExitType.LIQUIDATION, None),
|
||||
(0.2, 1.14, ExitType.NONE, None, True, False, 0.05, 1.14, ExitType.TRAILING_STOP_LOSS,
|
||||
None),
|
||||
(0.01, 0.96, ExitType.NONE, None, True, False, 0.05, 1, ExitType.NONE, None),
|
||||
(0.05, 1, ExitType.NONE, None, True, False, -0.01, 1, ExitType.TRAILING_STOP_LOSS, None),
|
||||
# Default custom case - trails with 10%
|
||||
(0.05, 0.95, ExitType.NONE, False, True, -0.02, 0.95, ExitType.NONE, None),
|
||||
(0.05, 0.95, ExitType.NONE, False, True, -0.06, 0.95, ExitType.TRAILING_STOP_LOSS, None),
|
||||
(0.05, 1, ExitType.NONE, False, True, -0.06, 1, ExitType.TRAILING_STOP_LOSS,
|
||||
(0.05, 0.95, ExitType.NONE, None, False, True, -0.02, 0.95, ExitType.NONE, None),
|
||||
(0.05, 0.95, ExitType.NONE, None, False, True, -0.06, 0.95, ExitType.TRAILING_STOP_LOSS,
|
||||
None),
|
||||
(0.05, 1, ExitType.NONE, None, False, True, -0.06, 1, ExitType.TRAILING_STOP_LOSS,
|
||||
lambda **kwargs: -0.05),
|
||||
(0.05, 1, ExitType.NONE, False, True, 0.09, 1.04, ExitType.NONE,
|
||||
(0.05, 1, ExitType.NONE, None, False, True, 0.09, 1.04, ExitType.NONE,
|
||||
lambda **kwargs: -0.05),
|
||||
(0.05, 0.95, ExitType.NONE, False, True, 0.09, 0.98, ExitType.NONE,
|
||||
(0.05, 0.95, ExitType.NONE, None, False, True, 0.09, 0.98, ExitType.NONE,
|
||||
lambda current_profit, **kwargs: -0.1 if current_profit < 0.6 else -(current_profit * 2)),
|
||||
# Error case - static stoploss in place
|
||||
(0.05, 0.9, ExitType.NONE, False, True, 0.09, 0.9, ExitType.NONE,
|
||||
(0.05, 0.9, ExitType.NONE, None, False, True, 0.09, 0.9, ExitType.NONE,
|
||||
lambda **kwargs: None),
|
||||
])
|
||||
def test_stop_loss_reached(default_conf, fee, profit, adjusted, expected, trailing, custom,
|
||||
def test_stop_loss_reached(default_conf, fee, profit, adjusted, expected, liq, trailing, custom,
|
||||
profit2, adjusted2, expected2, custom_stop) -> None:
|
||||
|
||||
strategy = StrategyResolver.load_strategy(default_conf)
|
||||
@ -442,6 +464,7 @@ def test_stop_loss_reached(default_conf, fee, profit, adjusted, expected, traili
|
||||
fee_close=fee.return_value,
|
||||
exchange='binance',
|
||||
open_rate=1,
|
||||
liquidation_price=liq,
|
||||
)
|
||||
trade.adjust_min_max_rates(trade.open_rate, trade.open_rate)
|
||||
strategy.trailing_stop = trailing
|
||||
|
@ -34,7 +34,7 @@ def test_search_all_strategies_no_failed():
|
||||
directory = Path(__file__).parent / "strats"
|
||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=False)
|
||||
assert isinstance(strategies, list)
|
||||
assert len(strategies) == 8
|
||||
assert len(strategies) == 9
|
||||
assert isinstance(strategies[0], dict)
|
||||
|
||||
|
||||
@ -42,10 +42,10 @@ def test_search_all_strategies_with_failed():
|
||||
directory = Path(__file__).parent / "strats"
|
||||
strategies = StrategyResolver.search_all_objects(directory, enum_failed=True)
|
||||
assert isinstance(strategies, list)
|
||||
assert len(strategies) == 9
|
||||
assert len(strategies) == 10
|
||||
# with enum_failed=True search_all_objects() shall find 2 good strategies
|
||||
# and 1 which fails to load
|
||||
assert len([x for x in strategies if x['class'] is not None]) == 8
|
||||
assert len([x for x in strategies if x['class'] is not None]) == 9
|
||||
assert len([x for x in strategies if x['class'] is None]) == 1
|
||||
|
||||
|
||||
|
@ -68,8 +68,14 @@ def test_process_stopped(mocker, default_conf_usdt) -> None:
|
||||
assert coo_mock.call_count == 1
|
||||
|
||||
|
||||
def test_process_calls_sendmsg(mocker, default_conf_usdt) -> None:
|
||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||
freqtrade.process()
|
||||
assert freqtrade.rpc.process_msg_queue.call_count == 1
|
||||
|
||||
|
||||
def test_bot_cleanup(mocker, default_conf_usdt, caplog) -> None:
|
||||
mock_cleanup = mocker.patch('freqtrade.freqtradebot.cleanup_db')
|
||||
mock_cleanup = mocker.patch('freqtrade.freqtradebot.Trade.commit')
|
||||
coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders')
|
||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||
freqtrade.cleanup()
|
||||
@ -837,8 +843,8 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
|
||||
# In case of closed order
|
||||
order['status'] = 'closed'
|
||||
order['price'] = 10
|
||||
order['cost'] = 100
|
||||
order['average'] = 10
|
||||
order['cost'] = 300
|
||||
order['id'] = '444'
|
||||
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
@ -849,7 +855,7 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
assert trade.open_rate == 10
|
||||
assert trade.stake_amount == round(order['price'] * order['filled'] / leverage, 8)
|
||||
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
|
||||
assert pytest.approx(trade.liquidation_price) == liq_price
|
||||
|
||||
# In case of rejected or expired order and partially filled
|
||||
@ -857,8 +863,8 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
order['amount'] = 30.0
|
||||
order['filled'] = 20.0
|
||||
order['remaining'] = 10.00
|
||||
order['price'] = 0.5
|
||||
order['cost'] = 15.0
|
||||
order['average'] = 0.5
|
||||
order['cost'] = 10.0
|
||||
order['id'] = '555'
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=order))
|
||||
@ -866,9 +872,9 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
trade = Trade.query.all()[3]
|
||||
trade.is_short = is_short
|
||||
assert trade
|
||||
assert trade.open_order_id == '555'
|
||||
assert trade.open_order_id is None
|
||||
assert trade.open_rate == 0.5
|
||||
assert trade.stake_amount == round(order['price'] * order['filled'] / leverage, 8)
|
||||
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
|
||||
|
||||
# Test with custom stake
|
||||
order['status'] = 'open'
|
||||
@ -895,7 +901,7 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
order['amount'] = 30.0 * leverage
|
||||
order['filled'] = 0.0
|
||||
order['remaining'] = 30.0
|
||||
order['price'] = 0.5
|
||||
order['average'] = 0.5
|
||||
order['cost'] = 0.0
|
||||
order['id'] = '66'
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
@ -967,6 +973,14 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
|
||||
trade.is_short = is_short
|
||||
assert pytest.approx(trade.stake_amount) == 500
|
||||
|
||||
order['id'] = '55673'
|
||||
|
||||
freqtrade.strategy.leverage.reset_mock()
|
||||
assert freqtrade.execute_entry(pair, 200, leverage_=3)
|
||||
assert freqtrade.strategy.leverage.call_count == 0
|
||||
trade = Trade.query.all()[10]
|
||||
assert trade.leverage == 1 if trading_mode == 'spot' else 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize("is_short", [False, True])
|
||||
def test_execute_entry_confirm_error(mocker, default_conf_usdt, fee, limit_order, is_short) -> None:
|
||||
@ -1077,7 +1091,7 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_
|
||||
'last': 1.9
|
||||
}),
|
||||
create_order=MagicMock(side_effect=[
|
||||
{'id': enter_order['id']},
|
||||
enter_order,
|
||||
exit_order,
|
||||
]),
|
||||
get_fee=fee,
|
||||
@ -1103,20 +1117,20 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_
|
||||
# should do nothing and return false
|
||||
trade.is_open = True
|
||||
trade.open_order_id = None
|
||||
trade.stoploss_order_id = 100
|
||||
trade.stoploss_order_id = "100"
|
||||
|
||||
hanging_stoploss_order = MagicMock(return_value={'status': 'open'})
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_stoploss_order', hanging_stoploss_order)
|
||||
|
||||
assert freqtrade.handle_stoploss_on_exchange(trade) is False
|
||||
assert trade.stoploss_order_id == 100
|
||||
assert trade.stoploss_order_id == "100"
|
||||
|
||||
# Third case: when stoploss was set but it was canceled for some reason
|
||||
# should set a stoploss immediately and return False
|
||||
caplog.clear()
|
||||
trade.is_open = True
|
||||
trade.open_order_id = None
|
||||
trade.stoploss_order_id = 100
|
||||
trade.stoploss_order_id = "100"
|
||||
|
||||
canceled_stoploss_order = MagicMock(return_value={'status': 'canceled'})
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_stoploss_order', canceled_stoploss_order)
|
||||
@ -2033,6 +2047,7 @@ def test_update_trade_state_exception(mocker, default_conf_usdt, is_short, limit
|
||||
|
||||
trade = MagicMock()
|
||||
trade.open_order_id = '123'
|
||||
trade.amount = 123
|
||||
|
||||
# Test raise of OperationalException exception
|
||||
mocker.patch(
|
||||
@ -2346,9 +2361,9 @@ def test_close_trade(
|
||||
trade.is_short = is_short
|
||||
assert trade
|
||||
|
||||
oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], 'buy')
|
||||
oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.enter_side)
|
||||
trade.update_trade(oobj)
|
||||
oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], 'sell')
|
||||
oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], trade.exit_side)
|
||||
trade.update_trade(oobj)
|
||||
assert trade.is_open is False
|
||||
|
||||
@ -2391,8 +2406,8 @@ def test_manage_open_orders_entry_usercustom(
|
||||
'freqtrade.exchange.Exchange',
|
||||
fetch_ticker=ticker_usdt,
|
||||
fetch_order=MagicMock(return_value=old_order),
|
||||
cancel_order_with_result=cancel_order_wr_mock,
|
||||
cancel_order=cancel_order_mock,
|
||||
cancel_order_with_result=cancel_order_wr_mock,
|
||||
get_fee=fee
|
||||
)
|
||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||
@ -2440,7 +2455,9 @@ def test_manage_open_orders_entry(
|
||||
) -> None:
|
||||
old_order = limit_sell_order_old if is_short else limit_buy_order_old
|
||||
rpc_mock = patch_RPCManager(mocker)
|
||||
old_order['id'] = open_trade.open_order_id
|
||||
open_trade.open_order_id = old_order['id']
|
||||
order = Order.parse_from_ccxt_object(old_order, 'mocked', 'buy')
|
||||
open_trade.orders[0] = order
|
||||
limit_buy_cancel = deepcopy(old_order)
|
||||
limit_buy_cancel['status'] = 'canceled'
|
||||
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
|
||||
@ -2631,7 +2648,9 @@ def test_manage_open_orders_exit_usercustom(
|
||||
is_short, open_trade_usdt, caplog
|
||||
) -> None:
|
||||
default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1}
|
||||
limit_sell_order_old['id'] = open_trade_usdt.open_order_id
|
||||
open_trade_usdt.open_order_id = limit_sell_order_old['id']
|
||||
order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked', 'sell')
|
||||
open_trade_usdt.orders[0] = order
|
||||
if is_short:
|
||||
limit_sell_order_old['side'] = 'buy'
|
||||
open_trade_usdt.is_short = is_short
|
||||
@ -3244,6 +3263,9 @@ def test_execute_trade_exit_up(default_conf_usdt, ticker_usdt, fee, ticker_usdt_
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
'stake_amount': pytest.approx(60),
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -3304,6 +3326,9 @@ def test_execute_trade_exit_down(default_conf_usdt, ticker_usdt, fee, ticker_usd
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
'stake_amount': pytest.approx(60),
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -3385,6 +3410,9 @@ def test_execute_trade_exit_custom_exit_price(
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
'stake_amount': pytest.approx(60),
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -3453,6 +3481,9 @@ def test_execute_trade_exit_down_stoploss_on_exchange_dry_run(
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
'stake_amount': pytest.approx(60),
|
||||
} == last_msg
|
||||
|
||||
|
||||
@ -3684,7 +3715,7 @@ def test_execute_trade_exit_market_order(
|
||||
)
|
||||
|
||||
assert not trade.is_open
|
||||
assert trade.close_profit == profit_ratio
|
||||
assert pytest.approx(trade.close_profit) == profit_ratio
|
||||
|
||||
assert rpc_mock.call_count == 4
|
||||
last_msg = rpc_mock.call_args_list[-2][0][0]
|
||||
@ -3712,6 +3743,9 @@ def test_execute_trade_exit_market_order(
|
||||
'open_date': ANY,
|
||||
'close_date': ANY,
|
||||
'close_rate': ANY,
|
||||
'sub_trade': False,
|
||||
'cumulative_profit': 0.0,
|
||||
'stake_amount': pytest.approx(60),
|
||||
|
||||
} == last_msg
|
||||
|
||||
@ -3783,7 +3817,7 @@ def test_exit_profit_only(
|
||||
'last': bid
|
||||
}),
|
||||
create_order=MagicMock(side_effect=[
|
||||
limit_order_open[eside],
|
||||
limit_order[eside],
|
||||
{'id': 1234553382},
|
||||
]),
|
||||
get_fee=fee,
|
||||
@ -4075,7 +4109,7 @@ def test_trailing_stop_loss_positive(
|
||||
'last': enter_price - (-0.01 if is_short else 0.01),
|
||||
}),
|
||||
create_order=MagicMock(side_effect=[
|
||||
limit_order_open[eside],
|
||||
limit_order[eside],
|
||||
{'id': 1234553382},
|
||||
]),
|
||||
get_fee=fee,
|
||||
@ -4626,7 +4660,7 @@ def test_order_book_entry_pricing1(mocker, default_conf_usdt, order_book_l2, exc
|
||||
with pytest.raises(PricingError):
|
||||
freqtrade.exchange.get_rate('ETH/USDT', side="entry", is_short=False, refresh=True)
|
||||
assert log_has_re(
|
||||
r'Entry Price at location 1 from orderbook could not be determined.', caplog)
|
||||
r'ETH/USDT - Entry Price at location 1 from orderbook could not be determined.', caplog)
|
||||
else:
|
||||
assert freqtrade.exchange.get_rate(
|
||||
'ETH/USDT', side="entry", is_short=False, refresh=True) == 0.043935
|
||||
@ -4705,7 +4739,8 @@ def test_order_book_exit_pricing(
|
||||
return_value={'bids': [[]], 'asks': [[]]})
|
||||
with pytest.raises(PricingError):
|
||||
freqtrade.handle_trade(trade)
|
||||
assert log_has_re(r'Exit Price at location 1 from orderbook could not be determined\..*',
|
||||
assert log_has_re(
|
||||
r"ETH/USDT - Exit Price at location 1 from orderbook could not be determined\..*",
|
||||
caplog)
|
||||
|
||||
|
||||
@ -5379,7 +5414,7 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None:
|
||||
'status': None,
|
||||
'price': 9,
|
||||
'amount': 12,
|
||||
'cost': 100,
|
||||
'cost': 108,
|
||||
'ft_is_open': True,
|
||||
'id': '651',
|
||||
'order_id': '651'
|
||||
@ -5474,7 +5509,7 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None:
|
||||
assert trade.open_order_id is None
|
||||
assert pytest.approx(trade.open_rate) == 9.90909090909
|
||||
assert trade.amount == 22
|
||||
assert trade.stake_amount == 218
|
||||
assert pytest.approx(trade.stake_amount) == 218
|
||||
|
||||
orders = Order.query.all()
|
||||
assert orders
|
||||
@ -5527,6 +5562,329 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None:
|
||||
# Make sure the closed order is found as the second order.
|
||||
order = trade.select_order('buy', False)
|
||||
assert order.order_id == '652'
|
||||
closed_sell_dca_order_1 = {
|
||||
'ft_pair': pair,
|
||||
'status': 'closed',
|
||||
'ft_order_side': 'sell',
|
||||
'side': 'sell',
|
||||
'type': 'limit',
|
||||
'price': 8,
|
||||
'average': 8,
|
||||
'amount': 15,
|
||||
'filled': 15,
|
||||
'cost': 120,
|
||||
'ft_is_open': False,
|
||||
'id': '653',
|
||||
'order_id': '653'
|
||||
}
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
assert freqtrade.execute_trade_exit(trade=trade, limit=8,
|
||||
exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT),
|
||||
sub_trade_amt=15)
|
||||
|
||||
# Assert trade is as expected (averaged dca)
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
assert trade.is_open
|
||||
assert trade.amount == 22
|
||||
assert trade.stake_amount == 192.05405405405406
|
||||
assert pytest.approx(trade.open_rate) == 8.729729729729
|
||||
|
||||
orders = Order.query.all()
|
||||
assert orders
|
||||
assert len(orders) == 4
|
||||
|
||||
# Make sure the closed order is found as the second order.
|
||||
order = trade.select_order('sell', False)
|
||||
assert order.order_id == '653'
|
||||
|
||||
|
||||
def test_position_adjust2(mocker, default_conf_usdt, fee) -> None:
|
||||
"""
|
||||
TODO: Should be adjusted to test both long and short
|
||||
buy 100 @ 11
|
||||
sell 50 @ 8
|
||||
sell 50 @ 16
|
||||
"""
|
||||
patch_RPCManager(mocker)
|
||||
patch_exchange(mocker)
|
||||
patch_wallet(mocker, free=10000)
|
||||
default_conf_usdt.update({
|
||||
"position_adjustment_enable": True,
|
||||
"dry_run": False,
|
||||
"stake_amount": 200.0,
|
||||
"dry_run_wallet": 1000.0,
|
||||
})
|
||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
|
||||
bid = 11
|
||||
amount = 100
|
||||
buy_rate_mock = MagicMock(return_value=bid)
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.exchange.Exchange',
|
||||
get_rate=buy_rate_mock,
|
||||
fetch_ticker=MagicMock(return_value={
|
||||
'bid': 10,
|
||||
'ask': 12,
|
||||
'last': 11
|
||||
}),
|
||||
get_min_pair_stake_amount=MagicMock(return_value=1),
|
||||
get_fee=fee,
|
||||
)
|
||||
pair = 'ETH/USDT'
|
||||
# Initial buy
|
||||
closed_successful_buy_order = {
|
||||
'pair': pair,
|
||||
'ft_pair': pair,
|
||||
'ft_order_side': 'buy',
|
||||
'side': 'buy',
|
||||
'type': 'limit',
|
||||
'status': 'closed',
|
||||
'price': bid,
|
||||
'average': bid,
|
||||
'cost': bid * amount,
|
||||
'amount': amount,
|
||||
'filled': amount,
|
||||
'ft_is_open': False,
|
||||
'id': '600',
|
||||
'order_id': '600'
|
||||
}
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=closed_successful_buy_order))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order',
|
||||
MagicMock(return_value=closed_successful_buy_order))
|
||||
assert freqtrade.execute_entry(pair, amount)
|
||||
# Should create an closed trade with an no open order id
|
||||
# Order is filled and trade is open
|
||||
orders = Order.query.all()
|
||||
assert orders
|
||||
assert len(orders) == 1
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.is_open is True
|
||||
assert trade.open_order_id is None
|
||||
assert trade.open_rate == bid
|
||||
assert trade.stake_amount == bid * amount
|
||||
|
||||
# Assume it does nothing since order is closed and trade is open
|
||||
freqtrade.update_closed_trades_without_assigned_fees()
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.is_open is True
|
||||
assert trade.open_order_id is None
|
||||
assert trade.open_rate == bid
|
||||
assert trade.stake_amount == bid * amount
|
||||
assert not trade.fee_updated(trade.entry_side)
|
||||
|
||||
freqtrade.manage_open_orders()
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.is_open is True
|
||||
assert trade.open_order_id is None
|
||||
assert trade.open_rate == bid
|
||||
assert trade.stake_amount == bid * amount
|
||||
assert not trade.fee_updated(trade.entry_side)
|
||||
|
||||
amount = 50
|
||||
ask = 8
|
||||
closed_sell_dca_order_1 = {
|
||||
'ft_pair': pair,
|
||||
'status': 'closed',
|
||||
'ft_order_side': 'sell',
|
||||
'side': 'sell',
|
||||
'type': 'limit',
|
||||
'price': ask,
|
||||
'average': ask,
|
||||
'amount': amount,
|
||||
'filled': amount,
|
||||
'cost': amount * ask,
|
||||
'ft_is_open': False,
|
||||
'id': '601',
|
||||
'order_id': '601'
|
||||
}
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_1))
|
||||
assert freqtrade.execute_trade_exit(trade=trade, limit=ask,
|
||||
exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT),
|
||||
sub_trade_amt=amount)
|
||||
trades: List[Trade] = trade.get_open_trades_without_assigned_fees()
|
||||
assert len(trades) == 1
|
||||
# Assert trade is as expected (averaged dca)
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
assert trade.amount == 50
|
||||
assert trade.open_rate == 11
|
||||
assert trade.stake_amount == 550
|
||||
assert pytest.approx(trade.realized_profit) == -152.375
|
||||
assert pytest.approx(trade.close_profit_abs) == -152.375
|
||||
|
||||
orders = Order.query.all()
|
||||
assert orders
|
||||
assert len(orders) == 2
|
||||
# Make sure the closed order is found as the second order.
|
||||
order = trade.select_order('sell', False)
|
||||
assert order.order_id == '601'
|
||||
|
||||
amount = 50
|
||||
ask = 16
|
||||
closed_sell_dca_order_2 = {
|
||||
'ft_pair': pair,
|
||||
'status': 'closed',
|
||||
'ft_order_side': 'sell',
|
||||
'side': 'sell',
|
||||
'type': 'limit',
|
||||
'price': ask,
|
||||
'average': ask,
|
||||
'amount': amount,
|
||||
'filled': amount,
|
||||
'cost': amount * ask,
|
||||
'ft_is_open': False,
|
||||
'id': '602',
|
||||
'order_id': '602'
|
||||
}
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_2))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_2))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order',
|
||||
MagicMock(return_value=closed_sell_dca_order_2))
|
||||
assert freqtrade.execute_trade_exit(trade=trade, limit=ask,
|
||||
exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT),
|
||||
sub_trade_amt=amount)
|
||||
# Assert trade is as expected (averaged dca)
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
assert trade.amount == 50
|
||||
assert trade.open_rate == 11
|
||||
assert trade.stake_amount == 550
|
||||
# Trade fully realized
|
||||
assert pytest.approx(trade.realized_profit) == 94.25
|
||||
assert pytest.approx(trade.close_profit_abs) == 94.25
|
||||
orders = Order.query.all()
|
||||
assert orders
|
||||
assert len(orders) == 3
|
||||
|
||||
# Make sure the closed order is found as the second order.
|
||||
order = trade.select_order('sell', False)
|
||||
assert order.order_id == '602'
|
||||
assert trade.is_open is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize('data', [
|
||||
(
|
||||
# tuple 1 - side amount, price
|
||||
# tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit
|
||||
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
|
||||
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
|
||||
(('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.044788)),
|
||||
(('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.59201995)),
|
||||
(('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, 336.625, 0.1343142)), # final profit (sum)
|
||||
),
|
||||
(
|
||||
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
|
||||
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
|
||||
(('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 1.189027)),
|
||||
(('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 1.189027)),
|
||||
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.7186579)),
|
||||
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 3175.75, 0.9747170)), # final profit
|
||||
)
|
||||
])
|
||||
def test_position_adjust3(mocker, default_conf_usdt, fee, data) -> None:
|
||||
default_conf_usdt.update({
|
||||
"position_adjustment_enable": True,
|
||||
"dry_run": False,
|
||||
"stake_amount": 200.0,
|
||||
"dry_run_wallet": 1000.0,
|
||||
})
|
||||
patch_RPCManager(mocker)
|
||||
patch_exchange(mocker)
|
||||
patch_wallet(mocker, free=10000)
|
||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||
trade = None
|
||||
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
|
||||
for idx, (order, result) in enumerate(data):
|
||||
amount = order[1]
|
||||
price = order[2]
|
||||
price_mock = MagicMock(return_value=price)
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.exchange.Exchange',
|
||||
get_rate=price_mock,
|
||||
fetch_ticker=MagicMock(return_value={
|
||||
'bid': 10,
|
||||
'ask': 12,
|
||||
'last': 11
|
||||
}),
|
||||
get_min_pair_stake_amount=MagicMock(return_value=1),
|
||||
get_fee=fee,
|
||||
)
|
||||
pair = 'ETH/USDT'
|
||||
closed_successful_order = {
|
||||
'pair': pair,
|
||||
'ft_pair': pair,
|
||||
'ft_order_side': order[0],
|
||||
'side': order[0],
|
||||
'type': 'limit',
|
||||
'status': 'closed',
|
||||
'price': price,
|
||||
'average': price,
|
||||
'cost': price * amount,
|
||||
'amount': amount,
|
||||
'filled': amount,
|
||||
'ft_is_open': False,
|
||||
'id': f'60{idx}',
|
||||
'order_id': f'60{idx}'
|
||||
}
|
||||
mocker.patch('freqtrade.exchange.Exchange.create_order',
|
||||
MagicMock(return_value=closed_successful_order))
|
||||
mocker.patch('freqtrade.exchange.Exchange.fetch_order_or_stoploss_order',
|
||||
MagicMock(return_value=closed_successful_order))
|
||||
if order[0] == 'buy':
|
||||
assert freqtrade.execute_entry(pair, amount, trade=trade)
|
||||
else:
|
||||
assert freqtrade.execute_trade_exit(
|
||||
trade=trade, limit=price,
|
||||
exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT),
|
||||
sub_trade_amt=amount)
|
||||
|
||||
orders1 = Order.query.all()
|
||||
assert orders1
|
||||
assert len(orders1) == idx + 1
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
if idx < len(data) - 1:
|
||||
assert trade.is_open is True
|
||||
assert trade.open_order_id is None
|
||||
assert trade.amount == result[0]
|
||||
assert trade.open_rate == result[1]
|
||||
assert trade.stake_amount == result[2]
|
||||
assert pytest.approx(trade.realized_profit) == result[3]
|
||||
assert pytest.approx(trade.close_profit_abs) == result[4]
|
||||
assert pytest.approx(trade.close_profit) == result[5]
|
||||
|
||||
order_obj = trade.select_order(order[0], False)
|
||||
assert order_obj.order_id == f'60{idx}'
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
assert trade.is_open is False
|
||||
|
||||
|
||||
def test_process_open_trade_positions_exception(mocker, default_conf_usdt, fee, caplog) -> None:
|
||||
@ -5550,9 +5908,25 @@ def test_check_and_call_adjust_trade_position(mocker, default_conf_usdt, fee, ca
|
||||
"max_entry_position_adjustment": 0,
|
||||
})
|
||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||
|
||||
buy_rate_mock = MagicMock(return_value=10)
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.exchange.Exchange',
|
||||
get_rate=buy_rate_mock,
|
||||
fetch_ticker=MagicMock(return_value={
|
||||
'bid': 10,
|
||||
'ask': 12,
|
||||
'last': 11
|
||||
}),
|
||||
get_min_pair_stake_amount=MagicMock(return_value=1),
|
||||
get_fee=fee,
|
||||
)
|
||||
create_mock_trades(fee)
|
||||
caplog.set_level(logging.DEBUG)
|
||||
|
||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=10)
|
||||
freqtrade.process_open_trade_positions()
|
||||
assert log_has_re(r"Max adjustment entries for .* has been reached\.", caplog)
|
||||
|
||||
caplog.clear()
|
||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-10)
|
||||
freqtrade.process_open_trade_positions()
|
||||
assert log_has_re(r"LIMIT_SELL has been fulfilled.*", caplog)
|
||||
|
@ -6,7 +6,7 @@ from freqtrade.enums import ExitCheckTuple, ExitType
|
||||
from freqtrade.persistence import Trade
|
||||
from freqtrade.persistence.models import Order
|
||||
from freqtrade.rpc.rpc import RPC
|
||||
from tests.conftest import get_patched_freqtradebot, patch_get_signal
|
||||
from tests.conftest import get_patched_freqtradebot, log_has_re, patch_get_signal
|
||||
|
||||
|
||||
def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee,
|
||||
@ -291,7 +291,7 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
||||
'freqtrade.exchange.Exchange',
|
||||
fetch_ticker=ticker_usdt,
|
||||
get_fee=fee,
|
||||
amount_to_precision=lambda s, x, y: y,
|
||||
amount_to_precision=lambda s, x, y: round(y, 4),
|
||||
price_to_precision=lambda s, x, y: y,
|
||||
)
|
||||
|
||||
@ -303,6 +303,7 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
||||
assert len(trade.orders) == 1
|
||||
assert pytest.approx(trade.stake_amount) == 60
|
||||
assert trade.open_rate == 2.02
|
||||
assert trade.orders[0].amount == trade.amount
|
||||
# No adjustment
|
||||
freqtrade.process()
|
||||
trade = Trade.get_trades().first()
|
||||
@ -331,8 +332,7 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
||||
trade = Trade.get_trades().first()
|
||||
assert len(trade.orders) == 2
|
||||
assert pytest.approx(trade.stake_amount) == 120
|
||||
# assert trade.orders[0].amount == 30
|
||||
assert trade.orders[1].amount == 60 / ticker_usdt_modif['ask']
|
||||
assert trade.orders[1].amount == round(60 / ticker_usdt_modif['ask'], 4)
|
||||
|
||||
assert trade.amount == trade.orders[0].amount + trade.orders[1].amount
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
@ -344,7 +344,7 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
||||
assert trade.is_open is False
|
||||
# assert trade.orders[0].amount == 30
|
||||
assert trade.orders[0].side == 'sell'
|
||||
assert trade.orders[1].amount == 60 / ticker_usdt_modif['ask']
|
||||
assert trade.orders[1].amount == round(60 / ticker_usdt_modif['ask'], 4)
|
||||
# Sold everything
|
||||
assert trade.orders[-1].side == 'buy'
|
||||
assert trade.orders[2].amount == trade.amount
|
||||
@ -455,3 +455,60 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
|
||||
# Check the 2 filled orders equal the above amount
|
||||
assert pytest.approx(trade.orders[1].amount) == 30.150753768
|
||||
assert pytest.approx(trade.orders[-1].amount) == 61.538461232
|
||||
|
||||
|
||||
def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> None:
|
||||
default_conf_usdt['position_adjustment_enable'] = True
|
||||
|
||||
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.exchange.Exchange',
|
||||
fetch_ticker=ticker_usdt,
|
||||
get_fee=fee,
|
||||
amount_to_precision=lambda s, x, y: y,
|
||||
price_to_precision=lambda s, x, y: y,
|
||||
get_min_pair_stake_amount=MagicMock(return_value=10),
|
||||
)
|
||||
|
||||
patch_get_signal(freqtrade)
|
||||
freqtrade.enter_positions()
|
||||
|
||||
assert len(Trade.get_trades().all()) == 1
|
||||
trade = Trade.get_trades().first()
|
||||
assert len(trade.orders) == 1
|
||||
assert pytest.approx(trade.stake_amount) == 60
|
||||
assert pytest.approx(trade.amount) == 30.0
|
||||
assert trade.open_rate == 2.0
|
||||
|
||||
# Too small size
|
||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-59)
|
||||
freqtrade.process()
|
||||
trade = Trade.get_trades().first()
|
||||
assert len(trade.orders) == 1
|
||||
assert pytest.approx(trade.stake_amount) == 60
|
||||
assert pytest.approx(trade.amount) == 30.0
|
||||
assert log_has_re("Remaining amount of 1.6.* would be too small.", caplog)
|
||||
|
||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20)
|
||||
|
||||
freqtrade.process()
|
||||
trade = Trade.get_trades().first()
|
||||
assert len(trade.orders) == 2
|
||||
assert trade.orders[-1].ft_order_side == 'sell'
|
||||
assert pytest.approx(trade.stake_amount) == 40.198
|
||||
assert pytest.approx(trade.amount) == 20.099
|
||||
assert trade.open_rate == 2.0
|
||||
assert trade.is_open
|
||||
caplog.clear()
|
||||
|
||||
# Sell more than what we got (we got ~20 coins left)
|
||||
# First adjusts the amount to 20 - then rejects.
|
||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-50)
|
||||
freqtrade.process()
|
||||
assert log_has_re("Adjusting amount to trade.amount as it is higher.*", caplog)
|
||||
assert log_has_re("Remaining amount of 0.0 would be too small.", caplog)
|
||||
trade = Trade.get_trades().first()
|
||||
assert len(trade.orders) == 2
|
||||
assert trade.orders[-1].ft_order_side == 'sell'
|
||||
assert pytest.approx(trade.stake_amount) == 40.198
|
||||
assert trade.is_open
|
||||
|
@ -1,6 +1,6 @@
|
||||
import time_machine
|
||||
|
||||
from freqtrade.configuration import PeriodicCache
|
||||
from freqtrade.util import PeriodicCache
|
||||
|
||||
|
||||
def test_ttl_cache():
|
||||
|
@ -99,7 +99,7 @@ def test_enter_exit_side(fee, is_short):
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
def test_set_stop_loss_isolated_liq(fee):
|
||||
def test_set_stop_loss_liquidation(fee):
|
||||
trade = Trade(
|
||||
id=2,
|
||||
pair='ADA/USDT',
|
||||
@ -115,73 +115,94 @@ def test_set_stop_loss_isolated_liq(fee):
|
||||
leverage=2.0,
|
||||
trading_mode=margin
|
||||
)
|
||||
trade.set_isolated_liq(0.09)
|
||||
trade.set_liquidation_price(0.09)
|
||||
assert trade.liquidation_price == 0.09
|
||||
assert trade.stop_loss is None
|
||||
assert trade.initial_stop_loss is None
|
||||
|
||||
trade._set_stop_loss(0.1, (1.0 / 9.0))
|
||||
trade.adjust_stop_loss(2.0, 0.2, True)
|
||||
assert trade.liquidation_price == 0.09
|
||||
assert trade.stop_loss == 0.1
|
||||
assert trade.initial_stop_loss == 0.1
|
||||
assert trade.stop_loss == 1.8
|
||||
assert trade.initial_stop_loss == 1.8
|
||||
|
||||
trade.set_isolated_liq(0.08)
|
||||
trade.set_liquidation_price(0.08)
|
||||
assert trade.liquidation_price == 0.08
|
||||
assert trade.stop_loss == 0.1
|
||||
assert trade.initial_stop_loss == 0.1
|
||||
assert trade.stop_loss == 1.8
|
||||
assert trade.initial_stop_loss == 1.8
|
||||
|
||||
trade.set_isolated_liq(0.11)
|
||||
trade._set_stop_loss(0.1, 0)
|
||||
trade.set_liquidation_price(0.11)
|
||||
trade.adjust_stop_loss(2.0, 0.2)
|
||||
assert trade.liquidation_price == 0.11
|
||||
assert trade.stop_loss == 0.11
|
||||
assert trade.initial_stop_loss == 0.1
|
||||
# Stoploss does not change from liquidation price
|
||||
assert trade.stop_loss == 1.8
|
||||
assert trade.initial_stop_loss == 1.8
|
||||
|
||||
# lower stop doesn't move stoploss
|
||||
trade._set_stop_loss(0.1, 0)
|
||||
trade.adjust_stop_loss(1.8, 0.2)
|
||||
assert trade.liquidation_price == 0.11
|
||||
assert trade.stop_loss == 0.11
|
||||
assert trade.initial_stop_loss == 0.1
|
||||
assert trade.stop_loss == 1.8
|
||||
assert trade.initial_stop_loss == 1.8
|
||||
|
||||
# higher stop does move stoploss
|
||||
trade.adjust_stop_loss(2.1, 0.1)
|
||||
assert trade.liquidation_price == 0.11
|
||||
assert pytest.approx(trade.stop_loss) == 1.994999
|
||||
assert trade.initial_stop_loss == 1.8
|
||||
assert trade.stoploss_or_liquidation == trade.stop_loss
|
||||
|
||||
trade.stop_loss = None
|
||||
trade.liquidation_price = None
|
||||
trade.initial_stop_loss = None
|
||||
trade.initial_stop_loss_pct = None
|
||||
|
||||
trade._set_stop_loss(0.07, 0)
|
||||
trade.adjust_stop_loss(2.0, 0.1, True)
|
||||
assert trade.liquidation_price is None
|
||||
assert trade.stop_loss == 0.07
|
||||
assert trade.initial_stop_loss == 0.07
|
||||
assert trade.stop_loss == 1.9
|
||||
assert trade.initial_stop_loss == 1.9
|
||||
assert trade.stoploss_or_liquidation == 1.9
|
||||
|
||||
trade.is_short = True
|
||||
trade.recalc_open_trade_value()
|
||||
trade.stop_loss = None
|
||||
trade.initial_stop_loss = None
|
||||
trade.initial_stop_loss_pct = None
|
||||
|
||||
trade.set_isolated_liq(0.09)
|
||||
assert trade.liquidation_price == 0.09
|
||||
trade.set_liquidation_price(3.09)
|
||||
assert trade.liquidation_price == 3.09
|
||||
assert trade.stop_loss is None
|
||||
assert trade.initial_stop_loss is None
|
||||
|
||||
trade._set_stop_loss(0.08, (1.0 / 9.0))
|
||||
assert trade.liquidation_price == 0.09
|
||||
assert trade.stop_loss == 0.08
|
||||
assert trade.initial_stop_loss == 0.08
|
||||
trade.adjust_stop_loss(2.0, 0.2)
|
||||
assert trade.liquidation_price == 3.09
|
||||
assert trade.stop_loss == 2.2
|
||||
assert trade.initial_stop_loss == 2.2
|
||||
assert trade.stoploss_or_liquidation == 2.2
|
||||
|
||||
trade.set_isolated_liq(0.1)
|
||||
assert trade.liquidation_price == 0.1
|
||||
assert trade.stop_loss == 0.08
|
||||
assert trade.initial_stop_loss == 0.08
|
||||
trade.set_liquidation_price(3.1)
|
||||
assert trade.liquidation_price == 3.1
|
||||
assert trade.stop_loss == 2.2
|
||||
assert trade.initial_stop_loss == 2.2
|
||||
assert trade.stoploss_or_liquidation == 2.2
|
||||
|
||||
trade.set_isolated_liq(0.07)
|
||||
trade._set_stop_loss(0.1, (1.0 / 8.0))
|
||||
assert trade.liquidation_price == 0.07
|
||||
assert trade.stop_loss == 0.07
|
||||
assert trade.initial_stop_loss == 0.08
|
||||
trade.set_liquidation_price(3.8)
|
||||
assert trade.liquidation_price == 3.8
|
||||
# Stoploss does not change from liquidation price
|
||||
assert trade.stop_loss == 2.2
|
||||
assert trade.initial_stop_loss == 2.2
|
||||
|
||||
# Stop doesn't move stop higher
|
||||
trade._set_stop_loss(0.1, (1.0 / 9.0))
|
||||
assert trade.liquidation_price == 0.07
|
||||
assert trade.stop_loss == 0.07
|
||||
assert trade.initial_stop_loss == 0.08
|
||||
trade.adjust_stop_loss(2.0, 0.3)
|
||||
assert trade.liquidation_price == 3.8
|
||||
assert trade.stop_loss == 2.2
|
||||
assert trade.initial_stop_loss == 2.2
|
||||
|
||||
# Stoploss does move lower
|
||||
trade.set_liquidation_price(1.5)
|
||||
trade.adjust_stop_loss(1.8, 0.1)
|
||||
assert trade.liquidation_price == 1.5
|
||||
assert pytest.approx(trade.stop_loss) == 1.89
|
||||
assert trade.initial_stop_loss == 2.2
|
||||
assert trade.stoploss_or_liquidation == 1.5
|
||||
|
||||
|
||||
@pytest.mark.parametrize('exchange,is_short,lev,minutes,rate,interest,trading_mode', [
|
||||
@ -479,7 +500,7 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_
|
||||
assert trade.close_profit is None
|
||||
assert trade.close_date is None
|
||||
|
||||
trade.open_order_id = 'something'
|
||||
trade.open_order_id = enter_order['id']
|
||||
oobj = Order.parse_from_ccxt_object(enter_order, 'ADA/USDT', entry_side)
|
||||
trade.orders.append(oobj)
|
||||
trade.update_trade(oobj)
|
||||
@ -494,7 +515,7 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_
|
||||
caplog)
|
||||
|
||||
caplog.clear()
|
||||
trade.open_order_id = 'something'
|
||||
trade.open_order_id = enter_order['id']
|
||||
time_machine.move_to("2022-03-31 21:45:05 +00:00")
|
||||
oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', exit_side)
|
||||
trade.orders.append(oobj)
|
||||
@ -529,7 +550,7 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee,
|
||||
leverage=1.0,
|
||||
)
|
||||
|
||||
trade.open_order_id = 'something'
|
||||
trade.open_order_id = 'mocked_market_buy'
|
||||
oobj = Order.parse_from_ccxt_object(market_buy_order_usdt, 'ADA/USDT', 'buy')
|
||||
trade.orders.append(oobj)
|
||||
trade.update_trade(oobj)
|
||||
@ -544,7 +565,7 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee,
|
||||
|
||||
caplog.clear()
|
||||
trade.is_open = True
|
||||
trade.open_order_id = 'something'
|
||||
trade.open_order_id = 'mocked_market_sell'
|
||||
oobj = Order.parse_from_ccxt_object(market_sell_order_usdt, 'ADA/USDT', 'sell')
|
||||
trade.orders.append(oobj)
|
||||
trade.update_trade(oobj)
|
||||
@ -609,14 +630,14 @@ def test_calc_open_close_trade_price(
|
||||
trade.open_rate = 2.0
|
||||
trade.close_rate = 2.2
|
||||
trade.recalc_open_trade_value()
|
||||
assert isclose(trade._calc_open_trade_value(), open_value)
|
||||
assert isclose(trade._calc_open_trade_value(trade.amount, trade.open_rate), open_value)
|
||||
assert isclose(trade.calc_close_trade_value(trade.close_rate), close_value)
|
||||
assert isclose(trade.calc_profit(trade.close_rate), round(profit, 8))
|
||||
assert pytest.approx(trade.calc_profit_ratio(trade.close_rate)) == profit_ratio
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
def test_trade_close(limit_buy_order_usdt, limit_sell_order_usdt, fee):
|
||||
def test_trade_close(fee):
|
||||
trade = Trade(
|
||||
pair='ADA/USDT',
|
||||
stake_amount=60.0,
|
||||
@ -794,7 +815,7 @@ def test_calc_open_trade_value(
|
||||
trade.update_trade(oobj) # Buy @ 2.0
|
||||
|
||||
# Get the open rate price with the standard fee rate
|
||||
assert trade._calc_open_trade_value() == result
|
||||
assert trade._calc_open_trade_value(trade.amount, trade.open_rate) == result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -884,7 +905,7 @@ def test_calc_close_trade_price(
|
||||
('binance', False, 1, 1.9, 0.003, -3.3209999, -0.055211970, spot, 0),
|
||||
('binance', False, 1, 2.2, 0.003, 5.6520000, 0.093965087, spot, 0),
|
||||
|
||||
# # FUTURES, funding_fee=1
|
||||
# FUTURES, funding_fee=1
|
||||
('binance', False, 1, 2.1, 0.0025, 3.6925, 0.06138819, futures, 1),
|
||||
('binance', False, 3, 2.1, 0.0025, 3.6925, 0.18416458, futures, 1),
|
||||
('binance', True, 1, 2.1, 0.0025, -2.3074999, -0.03855472, futures, 1),
|
||||
@ -1170,6 +1191,11 @@ def test_calc_profit(
|
||||
assert pytest.approx(trade.calc_profit(rate=close_rate)) == round(profit, 8)
|
||||
assert pytest.approx(trade.calc_profit_ratio(rate=close_rate)) == round(profit_ratio, 8)
|
||||
|
||||
assert pytest.approx(trade.calc_profit(close_rate, trade.amount,
|
||||
trade.open_rate)) == round(profit, 8)
|
||||
assert pytest.approx(trade.calc_profit_ratio(close_rate, trade.amount,
|
||||
trade.open_rate)) == round(profit_ratio, 8)
|
||||
|
||||
|
||||
def test_migrate_new(mocker, default_conf, fee, caplog):
|
||||
"""
|
||||
@ -1361,7 +1387,7 @@ def test_migrate_new(mocker, default_conf, fee, caplog):
|
||||
assert log_has("trying trades_bak2", caplog)
|
||||
assert log_has("Running database migration for trades - backup: trades_bak2, orders_bak0",
|
||||
caplog)
|
||||
assert trade.open_trade_value == trade._calc_open_trade_value()
|
||||
assert trade.open_trade_value == trade._calc_open_trade_value(trade.amount, trade.open_rate)
|
||||
assert trade.close_profit_abs is None
|
||||
|
||||
orders = trade.orders
|
||||
@ -1537,26 +1563,26 @@ def test_adjust_stop_loss(fee):
|
||||
|
||||
# Get percent of profit with a custom rate (Higher than open rate)
|
||||
trade.adjust_stop_loss(1.3, -0.1)
|
||||
assert round(trade.stop_loss, 8) == 1.17
|
||||
assert pytest.approx(trade.stop_loss) == 1.17
|
||||
assert trade.stop_loss_pct == -0.1
|
||||
assert trade.initial_stop_loss == 0.95
|
||||
assert trade.initial_stop_loss_pct == -0.05
|
||||
|
||||
# current rate lower again ... should not change
|
||||
trade.adjust_stop_loss(1.2, 0.1)
|
||||
assert round(trade.stop_loss, 8) == 1.17
|
||||
assert pytest.approx(trade.stop_loss) == 1.17
|
||||
assert trade.initial_stop_loss == 0.95
|
||||
assert trade.initial_stop_loss_pct == -0.05
|
||||
|
||||
# current rate higher... should raise stoploss
|
||||
trade.adjust_stop_loss(1.4, 0.1)
|
||||
assert round(trade.stop_loss, 8) == 1.26
|
||||
assert pytest.approx(trade.stop_loss) == 1.26
|
||||
assert trade.initial_stop_loss == 0.95
|
||||
assert trade.initial_stop_loss_pct == -0.05
|
||||
|
||||
# Initial is true but stop_loss set - so doesn't do anything
|
||||
trade.adjust_stop_loss(1.7, 0.1, True)
|
||||
assert round(trade.stop_loss, 8) == 1.26
|
||||
assert pytest.approx(trade.stop_loss) == 1.26
|
||||
assert trade.initial_stop_loss == 0.95
|
||||
assert trade.initial_stop_loss_pct == -0.05
|
||||
assert trade.stop_loss_pct == -0.1
|
||||
@ -1609,9 +1635,10 @@ def test_adjust_stop_loss_short(fee):
|
||||
assert trade.initial_stop_loss == 1.05
|
||||
assert trade.initial_stop_loss_pct == -0.05
|
||||
assert trade.stop_loss_pct == -0.1
|
||||
trade.set_isolated_liq(0.63)
|
||||
# Liquidation price is lower than stoploss - so liquidation would trigger first.
|
||||
trade.set_liquidation_price(0.63)
|
||||
trade.adjust_stop_loss(0.59, -0.1)
|
||||
assert trade.stop_loss == 0.63
|
||||
assert trade.stop_loss == 0.649
|
||||
assert trade.liquidation_price == 0.63
|
||||
|
||||
|
||||
@ -1722,6 +1749,7 @@ def test_to_json(fee):
|
||||
'stake_amount': 0.001,
|
||||
'trade_duration': None,
|
||||
'trade_duration_s': None,
|
||||
'realized_profit': 0.0,
|
||||
'close_profit': None,
|
||||
'close_profit_pct': None,
|
||||
'close_profit_abs': None,
|
||||
@ -1798,6 +1826,7 @@ def test_to_json(fee):
|
||||
'initial_stop_loss_abs': None,
|
||||
'initial_stop_loss_pct': None,
|
||||
'initial_stop_loss_ratio': None,
|
||||
'realized_profit': 0.0,
|
||||
'close_profit': None,
|
||||
'close_profit_pct': None,
|
||||
'close_profit_abs': None,
|
||||
@ -2009,10 +2038,10 @@ def test_stoploss_reinitialization_short(default_conf, fee):
|
||||
assert trade_adj.initial_stop_loss == 1.01
|
||||
assert trade_adj.initial_stop_loss_pct == -0.05
|
||||
# Stoploss can't go above liquidation price
|
||||
trade_adj.set_isolated_liq(0.985)
|
||||
trade_adj.set_liquidation_price(0.985)
|
||||
trade.adjust_stop_loss(0.9799, -0.05)
|
||||
assert trade_adj.stop_loss == 0.985
|
||||
assert trade_adj.stop_loss == 0.985
|
||||
assert trade_adj.stop_loss == 0.989699
|
||||
assert trade_adj.liquidation_price == 0.985
|
||||
|
||||
|
||||
def test_update_fee(fee):
|
||||
@ -2346,6 +2375,7 @@ def test_Trade_object_idem():
|
||||
'delete',
|
||||
'session',
|
||||
'commit',
|
||||
'rollback',
|
||||
'query',
|
||||
'open_date',
|
||||
'get_best_pair',
|
||||
@ -2399,7 +2429,7 @@ def test_recalc_trade_from_orders(fee):
|
||||
)
|
||||
|
||||
assert fee.return_value == 0.0025
|
||||
assert trade._calc_open_trade_value() == o1_trade_val
|
||||
assert trade._calc_open_trade_value(trade.amount, trade.open_rate) == o1_trade_val
|
||||
assert trade.amount == o1_amount
|
||||
assert trade.stake_amount == o1_cost
|
||||
assert trade.open_rate == o1_rate
|
||||
@ -2511,7 +2541,8 @@ def test_recalc_trade_from_orders(fee):
|
||||
assert pytest.approx(trade.fee_open_cost) == o1_fee_cost + o2_fee_cost + o3_fee_cost
|
||||
assert pytest.approx(trade.open_trade_value) == o1_trade_val + o2_trade_val + o3_trade_val
|
||||
|
||||
# Just to make sure sell orders are ignored, let's calculate one more time.
|
||||
# Just to make sure full sell orders are ignored, let's calculate one more time.
|
||||
|
||||
sell1 = Order(
|
||||
ft_order_side='sell',
|
||||
ft_pair=trade.pair,
|
||||
@ -2673,7 +2704,7 @@ def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short):
|
||||
assert trade.open_trade_value == 2 * o1_trade_val
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
|
||||
# Just to make sure exit orders are ignored, let's calculate one more time.
|
||||
# Reduce position - this will reduce amount again.
|
||||
sell1 = Order(
|
||||
ft_order_side=exit_side,
|
||||
ft_pair=trade.pair,
|
||||
@ -2684,7 +2715,7 @@ def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short):
|
||||
side=exit_side,
|
||||
price=4,
|
||||
average=3,
|
||||
filled=2,
|
||||
filled=o1_amount,
|
||||
remaining=1,
|
||||
cost=5,
|
||||
order_date=trade.open_date,
|
||||
@ -2693,11 +2724,11 @@ def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short):
|
||||
trade.orders.append(sell1)
|
||||
trade.recalc_trade_from_orders()
|
||||
|
||||
assert trade.amount == 2 * o1_amount
|
||||
assert trade.stake_amount == 2 * o1_amount
|
||||
assert trade.amount == o1_amount
|
||||
assert trade.stake_amount == o1_amount
|
||||
assert trade.open_rate == o1_rate
|
||||
assert trade.fee_open_cost == 2 * o1_fee_cost
|
||||
assert trade.open_trade_value == 2 * o1_trade_val
|
||||
assert trade.fee_open_cost == o1_fee_cost
|
||||
assert trade.open_trade_value == o1_trade_val
|
||||
assert trade.nr_of_successful_entries == 2
|
||||
|
||||
# Check with 1 order
|
||||
@ -2721,11 +2752,11 @@ def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short):
|
||||
trade.recalc_trade_from_orders()
|
||||
|
||||
# Calling recalc with single initial order should not change anything
|
||||
assert trade.amount == 3 * o1_amount
|
||||
assert trade.stake_amount == 3 * o1_amount
|
||||
assert trade.amount == 2 * o1_amount
|
||||
assert trade.stake_amount == 2 * o1_amount
|
||||
assert trade.open_rate == o1_rate
|
||||
assert trade.fee_open_cost == 3 * o1_fee_cost
|
||||
assert trade.open_trade_value == 3 * o1_trade_val
|
||||
assert trade.fee_open_cost == 2 * o1_fee_cost
|
||||
assert trade.open_trade_value == 2 * o1_trade_val
|
||||
assert trade.nr_of_successful_entries == 3
|
||||
|
||||
|
||||
@ -2793,3 +2824,144 @@ def test_order_to_ccxt(limit_buy_order_open):
|
||||
del raw_order['stopPrice']
|
||||
del limit_buy_order_open['datetime']
|
||||
assert raw_order == limit_buy_order_open
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
@pytest.mark.parametrize('data', [
|
||||
{
|
||||
# tuple 1 - side, amount, price
|
||||
# tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit
|
||||
'orders': [
|
||||
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
|
||||
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
|
||||
(('sell', 50, 12), (150.0, 12.5, 1875.0, -25.0, -25.0, -0.04)),
|
||||
(('sell', 100, 20), (50.0, 12.5, 625.0, 725.0, 750.0, 0.60)),
|
||||
(('sell', 50, 5), (50.0, 12.5, 625.0, 350.0, -375.0, -0.60)),
|
||||
],
|
||||
'end_profit': 350.0,
|
||||
'end_profit_ratio': 0.14,
|
||||
'fee': 0.0,
|
||||
},
|
||||
{
|
||||
'orders': [
|
||||
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
|
||||
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
|
||||
(('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.044788)),
|
||||
(('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.59201995)),
|
||||
(('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, -377.1875, -0.60199501)),
|
||||
],
|
||||
'end_profit': 336.625,
|
||||
'end_profit_ratio': 0.1343142,
|
||||
'fee': 0.0025,
|
||||
},
|
||||
{
|
||||
'orders': [
|
||||
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
|
||||
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
|
||||
(('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 1.189027)),
|
||||
(('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 1.189027)),
|
||||
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.7186579)),
|
||||
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 1787.25, 1.08048062)),
|
||||
],
|
||||
'end_profit': 3175.75,
|
||||
'end_profit_ratio': 0.9747170,
|
||||
'fee': 0.0025,
|
||||
},
|
||||
{
|
||||
# Test above without fees
|
||||
'orders': [
|
||||
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
|
||||
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
|
||||
(('sell', 100, 11), (100.0, 5.0, 500.0, 600.0, 600.0, 1.2)),
|
||||
(('buy', 150, 15), (250.0, 11.0, 2750.0, 600.0, 600.0, 1.2)),
|
||||
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1400.0, 800.0, 0.72727273)),
|
||||
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3200.0, 1800.0, 1.09090909)),
|
||||
],
|
||||
'end_profit': 3200.0,
|
||||
'end_profit_ratio': 0.98461538,
|
||||
'fee': 0.0,
|
||||
},
|
||||
{
|
||||
'orders': [
|
||||
(('buy', 100, 8), (100.0, 8.0, 800.0, 0.0, None, None)),
|
||||
(('buy', 100, 9), (200.0, 8.5, 1700.0, 0.0, None, None)),
|
||||
(('sell', 100, 10), (100.0, 8.5, 850.0, 150.0, 150.0, 0.17647059)),
|
||||
(('buy', 150, 11), (250.0, 10, 2500.0, 150.0, 150.0, 0.17647059)),
|
||||
(('sell', 100, 12), (150.0, 10.0, 1500.0, 350.0, 350.0, 0.2)),
|
||||
(('sell', 150, 14), (150.0, 10.0, 1500.0, 950.0, 950.0, 0.40)),
|
||||
],
|
||||
'end_profit': 950.0,
|
||||
'end_profit_ratio': 0.283582,
|
||||
'fee': 0.0,
|
||||
},
|
||||
])
|
||||
def test_recalc_trade_from_orders_dca(data) -> None:
|
||||
|
||||
pair = 'ETH/USDT'
|
||||
trade = Trade(
|
||||
id=2,
|
||||
pair=pair,
|
||||
stake_amount=1000,
|
||||
open_rate=data['orders'][0][0][2],
|
||||
amount=data['orders'][0][0][1],
|
||||
is_open=True,
|
||||
open_date=arrow.utcnow().datetime,
|
||||
fee_open=data['fee'],
|
||||
fee_close=data['fee'],
|
||||
exchange='binance',
|
||||
is_short=False,
|
||||
leverage=1.0,
|
||||
trading_mode=TradingMode.SPOT
|
||||
)
|
||||
Trade.query.session.add(trade)
|
||||
|
||||
for idx, (order, result) in enumerate(data['orders']):
|
||||
amount = order[1]
|
||||
price = order[2]
|
||||
|
||||
order_obj = Order(
|
||||
ft_order_side=order[0],
|
||||
ft_pair=trade.pair,
|
||||
order_id=f"order_{order[0]}_{idx}",
|
||||
ft_is_open=False,
|
||||
status="closed",
|
||||
symbol=trade.pair,
|
||||
order_type="market",
|
||||
side=order[0],
|
||||
price=price,
|
||||
average=price,
|
||||
filled=amount,
|
||||
remaining=0,
|
||||
cost=amount * price,
|
||||
order_date=arrow.utcnow().shift(hours=-10 + idx).datetime,
|
||||
order_filled_date=arrow.utcnow().shift(hours=-10 + idx).datetime,
|
||||
)
|
||||
trade.orders.append(order_obj)
|
||||
trade.recalc_trade_from_orders()
|
||||
Trade.commit()
|
||||
|
||||
orders1 = Order.query.all()
|
||||
assert orders1
|
||||
assert len(orders1) == idx + 1
|
||||
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert len(trade.orders) == idx + 1
|
||||
if idx < len(data) - 1:
|
||||
assert trade.is_open is True
|
||||
assert trade.open_order_id is None
|
||||
assert trade.amount == result[0]
|
||||
assert trade.open_rate == result[1]
|
||||
assert trade.stake_amount == result[2]
|
||||
# TODO: enable the below.
|
||||
assert pytest.approx(trade.realized_profit) == result[3]
|
||||
# assert pytest.approx(trade.close_profit_abs) == result[4]
|
||||
assert pytest.approx(trade.close_profit) == result[5]
|
||||
|
||||
trade.close(price)
|
||||
assert pytest.approx(trade.close_profit_abs) == data['end_profit']
|
||||
assert pytest.approx(trade.close_profit) == data['end_profit_ratio']
|
||||
assert not trade.is_open
|
||||
trade = Trade.query.first()
|
||||
assert trade
|
||||
assert trade.open_order_id is None
|
||||
|
Loading…
Reference in New Issue
Block a user