Merge remote-tracking branch 'origin/develop' into feat/convolutional-neural-net

This commit is contained in:
robcaulk 2022-12-05 20:40:19 +01:00
commit 5826fae8ee
39 changed files with 846 additions and 728 deletions

View File

@ -66,12 +66,6 @@ jobs:
- name: Tests - name: Tests
run: | run: |
pytest --random-order --cov=freqtrade --cov-config=.coveragerc pytest --random-order --cov=freqtrade --cov-config=.coveragerc
if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04'
- name: Tests incl. ccxt compatibility tests
run: |
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04'
- name: Coveralls - name: Coveralls
if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04') if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04')
@ -310,9 +304,64 @@ jobs:
details: Freqtrade doc test failed! details: Freqtrade doc test failed!
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }} webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
build_linux_online:
# Run pytest with "live" checks
runs-on: ubuntu-22.04
# permissions:
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.9"
- name: Cache_dependencies
uses: actions/cache@v3
id: cache
with:
path: ~/dependencies/
key: ${{ runner.os }}-dependencies
- name: pip cache (linux)
uses: actions/cache@v3
if: runner.os == 'Linux'
with:
path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
run: |
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
- name: Installation - *nix
if: runner.os == 'Linux'
run: |
python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include
pip install -r requirements-dev.txt
pip install -e .
- name: Tests incl. ccxt compatibility tests
run: |
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
# Notify only once - when CI completes (and after deploy) in case it's successfull # Notify only once - when CI completes (and after deploy) in case it's successfull
notify-complete: notify-complete:
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ] needs: [
build_linux,
build_macos,
build_windows,
docs_check,
mypy_version_check,
pre-commit,
build_linux_online
]
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
# Discord notification can't handle schedule events # Discord notification can't handle schedule events
if: (github.event_name != 'schedule') if: (github.event_name != 'schedule')
@ -361,7 +410,7 @@ jobs:
python setup.py sdist bdist_wheel python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test) - name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.5.1 uses: pypa/gh-action-pypi-publish@v1.6.1
if: (github.event_name == 'release') if: (github.event_name == 'release')
with: with:
user: __token__ user: __token__
@ -369,7 +418,7 @@ jobs:
repository_url: https://test.pypi.org/legacy/ repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI - name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.5.1 uses: pypa/gh-action-pypi-publish@v1.6.1
if: (github.event_name == 'release') if: (github.event_name == 'release')
with: with:
user: __token__ user: __token__

View File

@ -100,3 +100,17 @@ freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 2 --enter-re
The indicators have to be present in your strategy's main DataFrame (either for your main The indicators have to be present in your strategy's main DataFrame (either for your main
timeframe or for informative timeframes) otherwise they will simply be ignored in the script timeframe or for informative timeframes) otherwise they will simply be ignored in the script
output. output.
### Filtering the trade output by date
To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format:
```
--timerange : Timerange to filter output trades, start date inclusive, end date exclusive. e.g. 20220101-20221231
```
For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January:
```bash
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
```

View File

@ -83,7 +83,7 @@ from pathlib import Path
project_root = "somedir/freqtrade" project_root = "somedir/freqtrade"
i=0 i=0
try: try:
os.chdirdir(project_root) os.chdir(project_root)
assert Path('LICENSE').is_file() assert Path('LICENSE').is_file()
except: except:
while i<4 and (not Path('LICENSE').is_file()): while i<4 and (not Path('LICENSE').is_file()):

View File

@ -49,6 +49,13 @@ For more information about the [Remote container extension](https://code.visuals
New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests. New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests.
If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you). If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you).
#### How to run tests
Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly
!!! Note "feature branches"
Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet.
#### Checking log content in tests #### Checking log content in tests
Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages). Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages).

View File

@ -54,6 +54,9 @@ This configuration enables kraken, as well as rate-limiting to avoid bans from t
## Binance ## Binance
!!! Warning "Server location and geo-ip restrictions"
Please be aware that binance restrict api access regarding the server country. The currents and non exhaustive countries blocked are United States, Malaysia (Singapour), Ontario (Canada). Please go to [binance terms > b. Eligibility](https://www.binance.com/en/terms) to find up to date list.
Binance supports [time_in_force](configuration.md#understand-order_time_in_force). Binance supports [time_in_force](configuration.md#understand-order_time_in_force).
!!! Tip "Stoploss on Exchange" !!! Tip "Stoploss on Exchange"

View File

@ -37,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer. | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`. | `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. <br> **Datatype:** Integer. <br> Default: `0`. | `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/<identifier>/sub-train-<COIN>_<timestamp>.html`. <br> **Datatype:** Integer. <br> Default: `0`.
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1). | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean. | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary. | `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
@ -82,6 +82,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int. | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int.
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. | `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
### Additional parameters ### Additional parameters

View File

@ -8,7 +8,7 @@
### What is RL and why does FreqAI need it? ### What is RL and why does FreqAI need it?
Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network.
A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.*
@ -16,15 +16,15 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new
### The RL interface ### The RL interface
With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function).
We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely.
The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library.
### Important considerations ### Important considerations
As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world.
## Running Reinforcement Learning ## Running Reinforcement Learning
@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu
informative[f"%-{pair}raw_low"] = informative["low"] informative[f"%-{pair}raw_low"] = informative["low"]
``` ```
Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:
@ -166,7 +166,8 @@ As you begin to modify the strategy and the prediction model, you will quickly r
```python ```python
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
class MyCoolRLModel(ReinforcementLearner): class MyCoolRLModel(ReinforcementLearner):
""" """
@ -242,7 +243,7 @@ cd freqtrade
tensorboard --logdir user_data/models/unique-id tensorboard --logdir user_data/models/unique-id
``` ```
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard).
![tensorboard](assets/tensorboard.jpg) ![tensorboard](assets/tensorboard.jpg)
@ -253,7 +254,7 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action
* the actions available in the `calculate_reward` * the actions available in the `calculate_reward`
* the actions consumed by the user strategy * the actions consumed by the user strategy
Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`.
!!! Note !!! Note
FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those).

View File

@ -79,16 +79,11 @@ To change your **features**, you **must** set a new `identifier` in the config t
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config. To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
### Backtest live models ### Backtest live collected predictions
FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `True` in the config. FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study.
The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in the historic predictions file.
Each model has an identifier derived from the training end date. If you have only 1 model trained, FreqAI will backtest from the training end date until the current date. If you have more than 1 model, each model will perform the backtesting according to the training end date until the training end date of the next model and so on. For the last model, the period of the previous model will be used for the execution.
!!! Note
Currently, there is no checking for expired models, even if the `expired_hours` parameter is set.
### Downloading data to cover the full backtest period ### Downloading data to cover the full backtest period

View File

@ -1,6 +1,6 @@
markdown==3.3.7 markdown==3.3.7
mkdocs==1.4.2 mkdocs==1.4.2
mkdocs-material==8.5.10 mkdocs-material==8.5.11
mdx_truly_sane_lists==1.3 mdx_truly_sane_lists==1.3
pymdown-extensions==9.8 pymdown-extensions==9.9
jinja2==3.1.2 jinja2==3.1.2

View File

@ -232,7 +232,7 @@ graph = generate_candlestick_graph(pair=pair,
# Show graph inline # Show graph inline
# graph.show() # graph.show()
# Render graph in a seperate window # Render graph in a separate window
graph.show(renderer="browser") graph.show(renderer="browser")
``` ```

View File

@ -722,6 +722,7 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
[--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]] [--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]]
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
[--timerange YYYYMMDD-[YYYYMMDD]]
optional arguments: optional arguments:
-h, --help show this help message and exit -h, --help show this help message and exit
@ -744,6 +745,10 @@ optional arguments:
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...] --indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
Comma separated list of indicators to analyse. e.g. Comma separated list of indicators to analyse. e.g.
'close,rsi,bb_lowerband,profit_abs' 'close,rsi,bb_lowerband,profit_abs'
--timerange YYYYMMDD-[YYYYMMDD]
Timerange to filter trades for analysis,
start inclusive, end exclusive. e.g.
20220101-20220201
Common arguments: Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages). -v, --verbose Verbose mode (-vv for more, -vvv to get all messages).

View File

@ -60,10 +60,4 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
logger.info('Starting freqtrade in analysis mode') logger.info('Starting freqtrade in analysis mode')
process_entry_exit_reasons(config['exportfilename'], process_entry_exit_reasons(config)
config['exchange']['pair_whitelist'],
config['analysis_groups'],
config['enter_reason_list'],
config['exit_reason_list'],
config['indicator_list']
)

View File

@ -106,7 +106,7 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
"disableparamexport", "backtest_breakdown"] "disableparamexport", "backtest_breakdown"]
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
"exit_reason_list", "indicator_list"] "exit_reason_list", "indicator_list", "timerange"]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels",

View File

@ -462,6 +462,9 @@ class Configuration:
self._args_to_config(config, argname='indicator_list', self._args_to_config(config, argname='indicator_list',
logstring='Analysis indicator list: {}') logstring='Analysis indicator list: {}')
self._args_to_config(config, argname='timerange',
logstring='Filter trades by timerange: {}')
def _process_runmode(self, config: Config) -> None: def _process_runmode(self, config: Config) -> None:
self._args_to_config(config, argname='dry_run', self._args_to_config(config, argname='dry_run',

View File

@ -591,6 +591,7 @@ CONF_SCHEMA = {
"model_type": {"type": "string", "default": "PPO"}, "model_type": {"type": "string", "default": "PPO"},
"policy_type": {"type": "string", "default": "MlpPolicy"}, "policy_type": {"type": "string", "default": "MlpPolicy"},
"net_arch": {"type": "array", "default": [128, 128]}, "net_arch": {"type": "array", "default": [128, 128]},
"randomize_startinng_position": {"type": "boolean", "default": False},
"model_reward_parameters": { "model_reward_parameters": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -1,11 +1,12 @@
import logging import logging
from pathlib import Path from pathlib import Path
from typing import List, Optional
import joblib import joblib
import pandas as pd import pandas as pd
from tabulate import tabulate from tabulate import tabulate
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data,
load_backtest_stats) load_backtest_stats)
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
@ -152,37 +153,55 @@ def _do_group_table_output(bigdf, glist):
logger.warning("Invalid group mask specified.") logger.warning("Invalid group mask specified.")
def _print_results(analysed_trades, stratname, analysis_groups, def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
enter_reason_list, exit_reason_list, if timerange:
indicator_list, columns=None): if timerange.starttype == 'date':
if columns is None: df = df.loc[(df[df_date_col] >= timerange.startdt)]
columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason'] if timerange.stoptype == 'date':
df = df.loc[(df[df_date_col] < timerange.stopdt)]
return df
bigdf = pd.DataFrame()
def _select_rows_by_tags(df, enter_reason_list, exit_reason_list):
if enter_reason_list and "all" not in enter_reason_list:
df = df.loc[(df['enter_reason'].isin(enter_reason_list))]
if exit_reason_list and "all" not in exit_reason_list:
df = df.loc[(df['exit_reason'].isin(exit_reason_list))]
return df
def prepare_results(analysed_trades, stratname,
enter_reason_list, exit_reason_list,
timerange=None):
res_df = pd.DataFrame()
for pair, trades in analysed_trades[stratname].items(): for pair, trades in analysed_trades[stratname].items():
bigdf = pd.concat([bigdf, trades], ignore_index=True) res_df = pd.concat([res_df, trades], ignore_index=True)
if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns): res_df = _select_rows_within_dates(res_df, timerange)
if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns):
res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list)
return res_df
def print_results(res_df, analysis_groups, indicator_list):
if res_df.shape[0] > 0:
if analysis_groups: if analysis_groups:
_do_group_table_output(bigdf, analysis_groups) _do_group_table_output(res_df, analysis_groups)
if enter_reason_list and "all" not in enter_reason_list:
bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))]
if exit_reason_list and "all" not in exit_reason_list:
bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))]
if "all" in indicator_list: if "all" in indicator_list:
print(bigdf) print(res_df)
elif indicator_list is not None: elif indicator_list is not None:
available_inds = [] available_inds = []
for ind in indicator_list: for ind in indicator_list:
if ind in bigdf: if ind in res_df:
available_inds.append(ind) available_inds.append(ind)
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
_print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False) _print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
else: else:
print("\\_ No trades to show") print("\\No trades to show")
def _print_table(df, sortcols=None, show_index=False): def _print_table(df, sortcols=None, show_index=False):
@ -201,27 +220,34 @@ def _print_table(df, sortcols=None, show_index=False):
) )
def process_entry_exit_reasons(backtest_dir: Path, def process_entry_exit_reasons(config: Config):
pairlist: List[str],
analysis_groups: Optional[List[str]] = ["0", "1", "2"],
enter_reason_list: Optional[List[str]] = ["all"],
exit_reason_list: Optional[List[str]] = ["all"],
indicator_list: Optional[List[str]] = []):
try: try:
backtest_stats = load_backtest_stats(backtest_dir) analysis_groups = config.get('analysis_groups', [])
enter_reason_list = config.get('enter_reason_list', ["all"])
exit_reason_list = config.get('exit_reason_list', ["all"])
indicator_list = config.get('indicator_list', [])
timerange = TimeRange.parse_timerange(None if config.get(
'timerange') is None else str(config.get('timerange')))
backtest_stats = load_backtest_stats(config['exportfilename'])
for strategy_name, results in backtest_stats['strategy'].items(): for strategy_name, results in backtest_stats['strategy'].items():
trades = load_backtest_data(backtest_dir, strategy_name) trades = load_backtest_data(config['exportfilename'], strategy_name)
if not trades.empty: if not trades.empty:
signal_candles = _load_signal_candles(backtest_dir) signal_candles = _load_signal_candles(config['exportfilename'])
analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name, analysed_trades_dict = _process_candles_and_indicators(
trades, signal_candles) config['exchange']['pair_whitelist'], strategy_name,
_print_results(analysed_trades_dict, trades, signal_candles)
strategy_name,
analysis_groups, res_df = prepare_results(analysed_trades_dict, strategy_name,
enter_reason_list, enter_reason_list, exit_reason_list,
exit_reason_list, timerange=timerange)
indicator_list)
print_results(res_df,
analysis_groups,
indicator_list)
except ValueError as e: except ValueError as e:
raise OperationalException(e) from e raise OperationalException(e) from e

View File

@ -1,4 +1,5 @@
import logging import logging
import random
from abc import abstractmethod from abc import abstractmethod
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional
@ -121,6 +122,10 @@ class BaseEnvironment(gym.Env):
self._done = False self._done = False
if self.starting_point is True: if self.starting_point is True:
if self.rl_config.get('randomize_starting_position', False):
length_of_data = int(self._end_tick / 4)
start_tick = random.randint(self.window_size + 1, length_of_data)
self._start_tick = start_tick
self._position_history = (self._start_tick * [None]) + [self._position] self._position_history = (self._start_tick * [None]) + [self._position]
else: else:
self._position_history = (self.window_size * [None]) + [self._position] self._position_history = (self.window_size * [None]) + [self._position]
@ -189,12 +194,12 @@ class BaseEnvironment(gym.Env):
if self._position == Positions.Neutral: if self._position == Positions.Neutral:
return 0. return 0.
elif self._position == Positions.Short: elif self._position == Positions.Short:
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
return (last_trade_price - current_price) / last_trade_price
elif self._position == Positions.Long:
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
return (last_trade_price - current_price) / last_trade_price
elif self._position == Positions.Long:
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
return (current_price - last_trade_price) / last_trade_price return (current_price - last_trade_price) / last_trade_price
else: else:
return 0. return 0.

View File

@ -64,6 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
self.policy_type = self.freqai_info['rl_config']['policy_type'] self.policy_type = self.freqai_info['rl_config']['policy_type']
self.unset_outlier_removal() self.unset_outlier_removal()
self.net_arch = self.rl_config.get('net_arch', [128, 128]) self.net_arch = self.rl_config.get('net_arch', [128, 128])
self.dd.model_type = import_str
def unset_outlier_removal(self): def unset_outlier_removal(self):
""" """
@ -192,6 +193,10 @@ class BaseReinforcementLearningModel(IFreqaiModel):
now = datetime.now(timezone.utc).timestamp() now = datetime.now(timezone.utc).timestamp()
trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds)
current_profit = trade.calc_profit_ratio(current_rate) current_profit = trade.calc_profit_ratio(current_rate)
if trade.is_short:
market_side = 0
else:
market_side = 1
return market_side, current_profit, int(trade_duration) return market_side, current_profit, int(trade_duration)

View File

@ -4,7 +4,7 @@ import logging
import re import re
import shutil import shutil
import threading import threading
from datetime import datetime, timezone from datetime import datetime, timedelta, timezone
from pathlib import Path from pathlib import Path
from typing import Any, Dict, Tuple, TypedDict from typing import Any, Dict, Tuple, TypedDict
@ -82,6 +82,7 @@ class FreqaiDataDrawer:
self.historic_predictions_bkp_path = Path( self.historic_predictions_bkp_path = Path(
self.full_path / "historic_predictions.backup.pkl") self.full_path / "historic_predictions.backup.pkl")
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json") self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
self.global_metadata_path = Path(self.full_path / "global_metadata.json")
self.metric_tracker_path = Path(self.full_path / "metric_tracker.json") self.metric_tracker_path = Path(self.full_path / "metric_tracker.json")
self.follow_mode = follow_mode self.follow_mode = follow_mode
if follow_mode: if follow_mode:
@ -99,12 +100,7 @@ class FreqaiDataDrawer:
self.empty_pair_dict: pair_info = { self.empty_pair_dict: pair_info = {
"model_filename": "", "trained_timestamp": 0, "model_filename": "", "trained_timestamp": 0,
"data_path": "", "extras": {}} "data_path": "", "extras": {}}
if 'Reinforcement' in self.config['freqaimodel']: self.model_type = self.freqai_info.get('model_save_type', 'joblib')
self.model_type = 'stable_baselines'
logger.warning('User passed a ReinforcementLearner model, FreqAI will '
'now use stable_baselines3 to save models.')
else:
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: def update_metric_tracker(self, metric: str, value: float, pair: str) -> None:
""" """
@ -132,6 +128,17 @@ class FreqaiDataDrawer:
self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) self.update_metric_tracker('cpu_load5min', load5 / cpus, pair)
self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) self.update_metric_tracker('cpu_load15min', load15 / cpus, pair)
def load_global_metadata_from_disk(self):
"""
Locate and load a previously saved global metadata in present model folder.
"""
exists = self.global_metadata_path.is_file()
if exists:
with open(self.global_metadata_path, "r") as fp:
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
return metatada_dict
return {}
def load_drawer_from_disk(self): def load_drawer_from_disk(self):
""" """
Locate and load a previously saved data drawer full of all pair model metadata in Locate and load a previously saved data drawer full of all pair model metadata in
@ -232,6 +239,15 @@ class FreqaiDataDrawer:
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder, rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
number_mode=rapidjson.NM_NATIVE) number_mode=rapidjson.NM_NATIVE)
def save_global_metadata_to_disk(self, metadata: Dict[str, Any]):
"""
Save global metadata json to disk
"""
with self.save_lock:
with open(self.global_metadata_path, 'w') as fp:
rapidjson.dump(metadata, fp, default=self.np_encoder,
number_mode=rapidjson.NM_NATIVE)
def create_follower_dict(self): def create_follower_dict(self):
""" """
Create or dictionary for each follower to maintain unique persistent prediction targets Create or dictionary for each follower to maintain unique persistent prediction targets
@ -487,7 +503,7 @@ class FreqaiDataDrawer:
dump(model, save_path / f"{dk.model_filename}_model.joblib") dump(model, save_path / f"{dk.model_filename}_model.joblib")
elif self.model_type == 'keras': elif self.model_type == 'keras':
model.save(save_path / f"{dk.model_filename}_model.h5") model.save(save_path / f"{dk.model_filename}_model.h5")
elif 'stable_baselines' in self.model_type: elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
model.save(save_path / f"{dk.model_filename}_model.zip") model.save(save_path / f"{dk.model_filename}_model.zip")
if dk.svm_model is not None: if dk.svm_model is not None:
@ -573,9 +589,9 @@ class FreqaiDataDrawer:
elif self.model_type == 'keras': elif self.model_type == 'keras':
from tensorflow import keras from tensorflow import keras
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
elif self.model_type == 'stable_baselines': elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
mod = importlib.import_module( mod = importlib.import_module(
'stable_baselines3', self.freqai_info['rl_config']['model_type']) self.model_type, self.freqai_info['rl_config']['model_type'])
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
@ -701,3 +717,31 @@ class FreqaiDataDrawer:
).reset_index(drop=True) ).reset_index(drop=True)
return corr_dataframes, base_dataframes return corr_dataframes, base_dataframes
def get_timerange_from_live_historic_predictions(self) -> TimeRange:
"""
Returns timerange information based on historic predictions file
:return: timerange calculated from saved live data
"""
if not self.historic_predictions_path.is_file():
raise OperationalException(
'Historic predictions not found. Historic predictions data is required '
'to run backtest with the freqai-backtest-live-models option '
)
self.load_historic_predictions_from_disk()
all_pairs_end_dates = []
for pair in self.historic_predictions:
pair_historic_data = self.historic_predictions[pair]
all_pairs_end_dates.append(pair_historic_data.date_pred.max())
global_metadata = self.load_global_metadata_from_disk()
start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"]))
end_date = max(all_pairs_end_dates)
# add 1 day to string timerange to ensure BT module will load all dataframe data
end_date = end_date + timedelta(days=1)
backtesting_timerange = TimeRange(
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
)
return backtesting_timerange

View File

@ -1,7 +1,7 @@
import copy import copy
import logging import logging
import shutil import shutil
from datetime import datetime, timedelta, timezone from datetime import datetime, timezone
from math import cos, sin from math import cos, sin
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Tuple from typing import Any, Dict, List, Tuple
@ -87,12 +87,7 @@ class FreqaiDataKitchen:
if not self.live: if not self.live:
self.full_path = self.get_full_models_path(self.config) self.full_path = self.get_full_models_path(self.config)
if self.backtest_live_models: if not self.backtest_live_models:
if self.pair:
self.set_timerange_from_ready_models()
(self.training_timeranges,
self.backtesting_timeranges) = self.split_timerange_live_models()
else:
self.full_timerange = self.create_fulltimerange( self.full_timerange = self.create_fulltimerange(
self.config["timerange"], self.freqai_config.get("train_period_days", 0) self.config["timerange"], self.freqai_config.get("train_period_days", 0)
) )
@ -460,29 +455,6 @@ class FreqaiDataKitchen:
# print(tr_training_list, tr_backtesting_list) # print(tr_training_list, tr_backtesting_list)
return tr_training_list_timerange, tr_backtesting_list_timerange return tr_training_list_timerange, tr_backtesting_list_timerange
def split_timerange_live_models(
self
) -> Tuple[list, list]:
tr_backtesting_list_timerange = []
asset = self.pair.split("/")[0]
if asset not in self.backtest_live_models_data["assets_end_dates"]:
raise OperationalException(
f"Model not available for pair {self.pair}. "
"Please, try again after removing this pair from the configuration file."
)
asset_data = self.backtest_live_models_data["assets_end_dates"][asset]
backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"]
model_end_dates = [x for x in asset_data]
model_end_dates.append(backtesting_timerange.stopts)
model_end_dates.sort()
for index, item in enumerate(model_end_dates):
if len(model_end_dates) > (index + 1):
tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1])
tr_backtesting_list_timerange.append(tr_to_add)
return tr_backtesting_list_timerange, tr_backtesting_list_timerange
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
""" """
Given a full dataframe, extract the user desired window Given a full dataframe, extract the user desired window
@ -978,7 +950,8 @@ class FreqaiDataKitchen:
return weights return weights
def get_predictions_to_append(self, predictions: DataFrame, def get_predictions_to_append(self, predictions: DataFrame,
do_predict: npt.ArrayLike) -> DataFrame: do_predict: npt.ArrayLike,
dataframe_backtest: DataFrame) -> DataFrame:
""" """
Get backtest prediction from current backtest period Get backtest prediction from current backtest period
""" """
@ -1000,7 +973,9 @@ class FreqaiDataKitchen:
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0: if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
append_df["DI_values"] = self.DI_values append_df["DI_values"] = self.DI_values
return append_df dataframe_backtest.reset_index(drop=True, inplace=True)
merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1)
return merged_df
def append_predictions(self, append_df: DataFrame) -> None: def append_predictions(self, append_df: DataFrame) -> None:
""" """
@ -1010,23 +985,18 @@ class FreqaiDataKitchen:
if self.full_df.empty: if self.full_df.empty:
self.full_df = append_df self.full_df = append_df
else: else:
self.full_df = pd.concat([self.full_df, append_df], axis=0) self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True)
def fill_predictions(self, dataframe): def fill_predictions(self, dataframe):
""" """
Back fill values to before the backtesting range so that the dataframe matches size Back fill values to before the backtesting range so that the dataframe matches size
when it goes back to the strategy. These rows are not included in the backtest. when it goes back to the strategy. These rows are not included in the backtest.
""" """
len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count
filler_df = pd.DataFrame(
np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns
)
self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True)
to_keep = [col for col in dataframe.columns if not col.startswith("&")] to_keep = [col for col in dataframe.columns if not col.startswith("&")]
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) self.return_dataframe = pd.merge(dataframe[to_keep],
self.full_df, how='left', on='date')
self.return_dataframe[self.full_df.columns] = (
self.return_dataframe[self.full_df.columns].fillna(value=0))
self.full_df = DataFrame() self.full_df = DataFrame()
return return
@ -1323,22 +1293,22 @@ class FreqaiDataKitchen:
self, append_df: DataFrame self, append_df: DataFrame
) -> None: ) -> None:
""" """
Save prediction dataframe from backtesting to h5 file format Save prediction dataframe from backtesting to feather file format
:param append_df: dataframe for backtesting period :param append_df: dataframe for backtesting period
""" """
full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder) full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder)
if not full_predictions_folder.is_dir(): if not full_predictions_folder.is_dir():
full_predictions_folder.mkdir(parents=True, exist_ok=True) full_predictions_folder.mkdir(parents=True, exist_ok=True)
append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w') append_df.to_feather(self.backtesting_results_path)
def get_backtesting_prediction( def get_backtesting_prediction(
self self
) -> DataFrame: ) -> DataFrame:
""" """
Get prediction dataframe from h5 file format Get prediction dataframe from feather file format
""" """
append_df = pd.read_hdf(self.backtesting_results_path) append_df = pd.read_feather(self.backtesting_results_path)
return append_df return append_df
def check_if_backtest_prediction_is_valid( def check_if_backtest_prediction_is_valid(
@ -1354,19 +1324,20 @@ class FreqaiDataKitchen:
""" """
path_to_predictionfile = Path(self.full_path / path_to_predictionfile = Path(self.full_path /
self.backtest_predictions_folder / self.backtest_predictions_folder /
f"{self.model_filename}_prediction.h5") f"{self.model_filename}_prediction.feather")
self.backtesting_results_path = path_to_predictionfile self.backtesting_results_path = path_to_predictionfile
file_exists = path_to_predictionfile.is_file() file_exists = path_to_predictionfile.is_file()
if file_exists: if file_exists:
append_df = self.get_backtesting_prediction() append_df = self.get_backtesting_prediction()
if len(append_df) == len_backtest_df: if len(append_df) == len_backtest_df and 'date' in append_df:
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
return True return True
else: else:
logger.info("A new backtesting prediction file is required. " logger.info("A new backtesting prediction file is required. "
"(Number of predictions is different from dataframe length).") "(Number of predictions is different from dataframe length or "
"old prediction file version).")
return False return False
else: else:
logger.info( logger.info(
@ -1374,17 +1345,6 @@ class FreqaiDataKitchen:
) )
return False return False
def set_timerange_from_ready_models(self):
backtesting_timerange, \
assets_end_dates = (
self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path))
self.backtest_live_models_data = {
"backtesting_timerange": backtesting_timerange,
"assets_end_dates": assets_end_dates
}
return
def get_full_models_path(self, config: Config) -> Path: def get_full_models_path(self, config: Config) -> Path:
""" """
Returns default FreqAI model path Returns default FreqAI model path
@ -1395,88 +1355,6 @@ class FreqaiDataKitchen:
config["user_data_dir"] / "models" / str(freqai_config.get("identifier")) config["user_data_dir"] / "models" / str(freqai_config.get("identifier"))
) )
def get_timerange_and_assets_end_dates_from_ready_models(
self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]:
"""
Returns timerange information based on a FreqAI model directory
:param models_path: FreqAI model path
:return: a Tuple with (Timerange calculated from directory and
a Dict with pair and model end training dates info)
"""
all_models_end_dates = []
assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models(
models_path)
for key in assets_end_dates:
for model_end_date in assets_end_dates[key]:
if model_end_date not in all_models_end_dates:
all_models_end_dates.append(model_end_date)
if len(all_models_end_dates) == 0:
raise OperationalException(
'At least 1 saved model is required to '
'run backtest with the freqai-backtest-live-models option'
)
if len(all_models_end_dates) == 1:
logger.warning(
"Only 1 model was found. Backtesting will run with the "
"timerange from the end of the training date to the current date"
)
finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp())
if len(all_models_end_dates) > 1:
# After last model end date, use the same period from previous model
# to finish the backtest
all_models_end_dates.sort(reverse=True)
finish_timestamp = all_models_end_dates[0] + \
(all_models_end_dates[0] - all_models_end_dates[1])
all_models_end_dates.append(finish_timestamp)
all_models_end_dates.sort()
start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates),
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates),
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
# add 1 day to string timerange to ensure BT module will load all dataframe data
end_date = end_date + timedelta(days=1)
backtesting_timerange = TimeRange(
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
)
return backtesting_timerange, assets_end_dates
def get_assets_timestamps_training_from_ready_models(
self, models_path: Path) -> Dict[str, Any]:
"""
Scan the models path and returns all assets end training dates (timestamp)
:param models_path: FreqAI model path
:return: a Dict with asset and model end training dates info
"""
assets_end_dates: Dict[str, Any] = {}
if not models_path.is_dir():
raise OperationalException(
'Model folders not found. Saved models are required '
'to run backtest with the freqai-backtest-live-models option'
)
for model_dir in models_path.iterdir():
if str(model_dir.name).startswith("sub-train"):
model_end_date = int(model_dir.name.split("_")[1])
asset = model_dir.name.split("_")[0].replace("sub-train-", "")
model_file_name = (
f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}"
"_model.joblib"
)
model_path_file = Path(model_dir / model_file_name)
if model_path_file.is_file():
if asset not in assets_end_dates:
assets_end_dates[asset] = []
assets_end_dates[asset].append(model_end_date)
return assets_end_dates
def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame: def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame:
""" """
Remove all special characters from feature strings (:) Remove all special characters from feature strings (:)

View File

@ -69,6 +69,7 @@ class IFreqaiModel(ABC):
self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True)
if self.save_backtest_models: if self.save_backtest_models:
logger.info('Backtesting module configured to save all models.') logger.info('Backtesting module configured to save all models.')
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
# set current candle to arbitrary historical date # set current candle to arbitrary historical date
self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc) self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc)
@ -100,6 +101,7 @@ class IFreqaiModel(ABC):
self.get_corr_dataframes: bool = True self.get_corr_dataframes: bool = True
self._threads: List[threading.Thread] = [] self._threads: List[threading.Thread] = []
self._stop_event = threading.Event() self._stop_event = threading.Event()
self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk()
self.data_provider: Optional[DataProvider] = None self.data_provider: Optional[DataProvider] = None
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
@ -136,6 +138,7 @@ class IFreqaiModel(ABC):
self.inference_timer('start') self.inference_timer('start')
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
dk = self.start_live(dataframe, metadata, strategy, self.dk) dk = self.start_live(dataframe, metadata, strategy, self.dk)
dataframe = dk.remove_features_from_df(dk.return_dataframe)
# For backtesting, each pair enters and then gets trained for each window along the # For backtesting, each pair enters and then gets trained for each window along the
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours" # sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
@ -144,20 +147,24 @@ class IFreqaiModel(ABC):
# the concatenated results for the full backtesting period back to the strategy. # the concatenated results for the full backtesting period back to the strategy.
elif not self.follow_mode: elif not self.follow_mode:
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
if self.dk.backtest_live_models:
logger.info(
f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)")
else:
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
dataframe = self.dk.use_strategy_to_populate_indicators( dataframe = self.dk.use_strategy_to_populate_indicators(
strategy, prediction_dataframe=dataframe, pair=metadata["pair"] strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
) )
dk = self.start_backtesting(dataframe, metadata, self.dk) if not self.config.get("freqai_backtest_live_models", False):
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
dk = self.start_backtesting(dataframe, metadata, self.dk)
dataframe = dk.remove_features_from_df(dk.return_dataframe)
else:
logger.info(
"Backtesting using historic predictions (live models)")
dk = self.start_backtesting_from_historic_predictions(
dataframe, metadata, self.dk)
dataframe = dk.return_dataframe
dataframe = dk.remove_features_from_df(dk.return_dataframe)
self.clean_up() self.clean_up()
if self.live: if self.live:
self.inference_timer('stop', metadata["pair"]) self.inference_timer('stop', metadata["pair"])
return dataframe return dataframe
def clean_up(self): def clean_up(self):
@ -316,10 +323,11 @@ class IFreqaiModel(ABC):
self.model = self.dd.load_data(pair, dk) self.model = self.dd.load_data(pair, dk)
pred_df, do_preds = self.predict(dataframe_backtest, dk) pred_df, do_preds = self.predict(dataframe_backtest, dk)
append_df = dk.get_predictions_to_append(pred_df, do_preds) append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest)
dk.append_predictions(append_df) dk.append_predictions(append_df)
dk.save_backtesting_prediction(append_df) dk.save_backtesting_prediction(append_df)
self.backtesting_fit_live_predictions(dk)
dk.fill_predictions(dataframe) dk.fill_predictions(dataframe)
return dk return dk
@ -632,6 +640,8 @@ class IFreqaiModel(ABC):
self.dd.historic_predictions[pair] = pred_df self.dd.historic_predictions[pair] = pred_df
hist_preds_df = self.dd.historic_predictions[pair] hist_preds_df = self.dd.historic_predictions[pair]
self.set_start_dry_live_date(strat_df)
for label in hist_preds_df.columns: for label in hist_preds_df.columns:
if hist_preds_df[label].dtype == object: if hist_preds_df[label].dtype == object:
continue continue
@ -672,7 +682,8 @@ class IFreqaiModel(ABC):
for label in full_labels: for label in full_labels:
if self.dd.historic_predictions[dk.pair][label].dtype == object: if self.dd.historic_predictions[dk.pair][label].dtype == object:
continue continue
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles)) f = spy.stats.norm.fit(
self.dd.historic_predictions[dk.pair][label].tail(num_candles))
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1] dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
return return
@ -826,6 +837,81 @@ class IFreqaiModel(ABC):
f"to {tr_train.stop_fmt}, {train_it}/{total_trains} " f"to {tr_train.stop_fmt}, {train_it}/{total_trains} "
"trains" "trains"
) )
def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen):
"""
Apply fit_live_predictions function in backtesting with a dummy historic_predictions
The loop is required to simulate dry/live operation, as it is not possible to predict
the type of logic implemented by the user.
:param dk: datakitchen object
"""
fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0)
if fit_live_predictions_candles:
logger.info("Applying fit_live_predictions in backtesting")
label_columns = [col for col in dk.full_df.columns if (
col.startswith("&") and
not (col.startswith("&") and col.endswith("_mean")) and
not (col.startswith("&") and col.endswith("_std")) and
col not in self.dk.data["extra_returns_per_train"])
]
for index in range(len(dk.full_df)):
if index >= fit_live_predictions_candles:
self.dd.historic_predictions[self.dk.pair] = (
dk.full_df.iloc[index - fit_live_predictions_candles:index])
self.fit_live_predictions(self.dk, self.dk.pair)
for label in label_columns:
if dk.full_df[label].dtype == object:
continue
if "labels_mean" in self.dk.data:
dk.full_df.at[index, f"{label}_mean"] = (
self.dk.data["labels_mean"][label])
if "labels_std" in self.dk.data:
dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label]
for extra_col in self.dk.data["extra_returns_per_train"]:
dk.full_df.at[index, f"{extra_col}"] = (
self.dk.data["extra_returns_per_train"][extra_col])
return
def update_metadata(self, metadata: Dict[str, Any]):
"""
Update global metadata and save the updated json file
:param metadata: new global metadata dict
"""
self.dd.save_global_metadata_to_disk(metadata)
self.metadata = metadata
def set_start_dry_live_date(self, live_dataframe: DataFrame):
key_name = "start_dry_live_date"
if key_name not in self.metadata:
metadata = self.metadata
metadata[key_name] = int(
pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp())
self.update_metadata(metadata)
def start_backtesting_from_historic_predictions(
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
) -> FreqaiDataKitchen:
"""
:param dataframe: DataFrame = strategy passed dataframe
:param metadata: Dict = pair metadata
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
:return:
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
"""
pair = metadata["pair"]
dk.return_dataframe = dataframe
saved_dataframe = self.dd.historic_predictions[pair]
columns_to_drop = list(set(saved_dataframe.columns).intersection(
dk.return_dataframe.columns))
dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop))
dk.return_dataframe = pd.merge(
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
# dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0)
return dk
# Following methods which are overridden by user made prediction models. # Following methods which are overridden by user made prediction models.
# See freqai/prediction_models/CatboostPredictionModel.py for an example. # See freqai/prediction_models/CatboostPredictionModel.py for an example.

View File

@ -14,6 +14,7 @@ from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange import timeframe_to_seconds
from freqtrade.exchange.exchange import market_is_active from freqtrade.exchange.exchange import market_is_active
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
@ -229,5 +230,6 @@ def get_timerange_backtest_live_models(config: Config) -> str:
""" """
dk = FreqaiDataKitchen(config) dk = FreqaiDataKitchen(config)
models_path = dk.get_full_models_path(config) models_path = dk.get_full_models_path(config)
timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) dd = FreqaiDataDrawer(models_path, config)
timerange = dd.get_timerange_from_live_historic_predictions()
return timerange.timerange_str return timerange.timerange_str

View File

@ -7,6 +7,8 @@ import logging
import sys import sys
from typing import Any, List from typing import Any, List
from freqtrade.util.gc_setup import gc_set_threshold
# check min. python version # check min. python version
if sys.version_info < (3, 8): # pragma: no cover if sys.version_info < (3, 8): # pragma: no cover
@ -36,6 +38,7 @@ def main(sysargv: List[str] = None) -> None:
# Call subcommand. # Call subcommand.
if 'func' in args: if 'func' in args:
logger.info(f'freqtrade {__version__}') logger.info(f'freqtrade {__version__}')
gc_set_threshold()
return_code = args['func'](args) return_code = args['func'](args)
else: else:
# No subcommand was issued. # No subcommand was issued.

View File

@ -87,7 +87,7 @@ class PairLocks():
Get the lock that expires the latest for the pair given. Get the lock that expires the latest for the pair given.
""" """
locks = PairLocks.get_pair_locks(pair, now, side=side) locks = PairLocks.get_pair_locks(pair, now, side=side)
locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True) locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True)
return locks[0] if locks else None return locks[0] if locks else None
@staticmethod @staticmethod

View File

@ -740,6 +740,24 @@ class RPC:
self._freqtrade.wallets.update() self._freqtrade.wallets.update()
return {'result': f'Created sell order for trade {trade_id}.'} return {'result': f'Created sell order for trade {trade_id}.'}
def _force_entry_validations(self, pair: str, order_side: SignalDirection):
if not self._freqtrade.config.get('force_entry_enable', False):
raise RPCException('Force_entry not enabled.')
if self._freqtrade.state != State.RUNNING:
raise RPCException('trader is not running')
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
raise RPCException("Can't go short on Spot markets.")
if pair not in self._freqtrade.exchange.get_markets(tradable_only=True):
raise RPCException('Symbol does not exist or market is not active.')
# Check if pair quote currency equals to the stake currency.
stake_currency = self._freqtrade.config.get('stake_currency')
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
raise RPCException(
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
def _rpc_force_entry(self, pair: str, price: Optional[float], *, def _rpc_force_entry(self, pair: str, price: Optional[float], *,
order_type: Optional[str] = None, order_type: Optional[str] = None,
order_side: SignalDirection = SignalDirection.LONG, order_side: SignalDirection = SignalDirection.LONG,
@ -750,21 +768,8 @@ class RPC:
Handler for forcebuy <asset> <price> Handler for forcebuy <asset> <price>
Buys a pair trade at the given or current price Buys a pair trade at the given or current price
""" """
self._force_entry_validations(pair, order_side)
if not self._freqtrade.config.get('force_entry_enable', False):
raise RPCException('Force_entry not enabled.')
if self._freqtrade.state != State.RUNNING:
raise RPCException('trader is not running')
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
raise RPCException("Can't go short on Spot markets.")
# Check if pair quote currency equals to the stake currency.
stake_currency = self._freqtrade.config.get('stake_currency')
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
raise RPCException(
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
# check if valid pair # check if valid pair
# check if pair already has an open pair # check if pair already has an open pair

View File

@ -79,6 +79,8 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]:
) )
try: try:
return command_handler(self, *args, **kwargs) return command_handler(self, *args, **kwargs)
except RPCException as e:
self._send_msg(str(e))
except BaseException: except BaseException:
logger.exception('Exception occurred within Telegram module') logger.exception('Exception occurred within Telegram module')
@ -538,72 +540,67 @@ class Telegram(RPCHandler):
handler for `/status` and `/status <id>`. handler for `/status` and `/status <id>`.
""" """
try: # Check if there's at least one numerical ID provided.
# If so, try to get only these trades.
trade_ids = []
if context.args and len(context.args) > 0:
trade_ids = [int(i) for i in context.args if i.isnumeric()]
# Check if there's at least one numerical ID provided. results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
# If so, try to get only these trades. position_adjust = self._config.get('position_adjustment_enable', False)
trade_ids = [] max_entries = self._config.get('max_entry_position_adjustment', -1)
if context.args and len(context.args) > 0: for r in results:
trade_ids = [int(i) for i in context.args if i.isnumeric()] r['open_date_hum'] = arrow.get(r['open_date']).humanize()
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
r['exit_reason'] = r.get('exit_reason', "")
lines = [
"*Trade ID:* `{trade_id}`" +
(" `(since {open_date_hum})`" if r['is_open'] else ""),
"*Current Pair:* {pair}",
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
]
results = self._rpc._rpc_trade_status(trade_ids=trade_ids) if position_adjust:
position_adjust = self._config.get('position_adjustment_enable', False) max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "")
max_entries = self._config.get('max_entry_position_adjustment', -1) lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str)
for r in results:
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
r['exit_reason'] = r.get('exit_reason', "")
lines = [
"*Trade ID:* `{trade_id}`" +
(" `(since {open_date_hum})`" if r['is_open'] else ""),
"*Current Pair:* {pair}",
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
]
if position_adjust: lines.extend([
max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") "*Open Rate:* `{open_rate:.8f}`",
lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str) "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
"*Open Date:* `{open_date}`",
"*Close Date:* `{close_date}`" if r['close_date'] else "",
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
("*Current Profit:* " if r['is_open'] else "*Close Profit: *")
+ "`{profit_ratio:.2%}`",
])
lines.extend([ if r['is_open']:
"*Open Rate:* `{open_rate:.8f}`", if r.get('realized_profit'):
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", lines.append("*Realized Profit:* `{realized_profit:.8f}`")
"*Open Date:* `{open_date}`", if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
"*Close Date:* `{close_date}`" if r['close_date'] else "", and r['initial_stop_loss_ratio'] is not None):
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", # Adding initial stoploss only if it is different from stoploss
("*Current Profit:* " if r['is_open'] else "*Close Profit: *") lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` "
+ "`{profit_ratio:.2%}`", "`({initial_stop_loss_ratio:.2%})`")
])
if r['is_open']: # Adding stoploss and stoploss percentage only if it is not None
if r.get('realized_profit'): lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
lines.append("*Realized Profit:* `{realized_profit:.8f}`") ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
and r['initial_stop_loss_ratio'] is not None): "`({stoploss_current_dist_ratio:.2%})`")
# Adding initial stoploss only if it is different from stoploss if r['open_order']:
lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " lines.append(
"`({initial_stop_loss_ratio:.2%})`") "*Open Order:* `{open_order}`"
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
# Adding stoploss and stoploss percentage only if it is not None lines_detail = self._prepare_order_details(
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + r['orders'], r['quote_currency'], r['is_open'])
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) lines.extend(lines_detail if lines_detail else "")
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " self.__send_status_msg(lines, r)
"`({stoploss_current_dist_ratio:.2%})`")
if r['open_order']:
lines.append(
"*Open Order:* `{open_order}`"
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
lines_detail = self._prepare_order_details(
r['orders'], r['quote_currency'], r['is_open'])
lines.extend(lines_detail if lines_detail else "")
self.__send_status_msg(lines, r)
except RPCException as e:
self._send_msg(str(e))
def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None: def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
""" """
@ -630,37 +627,34 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: fiat_currency = self._config.get('fiat_display_currency', '')
fiat_currency = self._config.get('fiat_display_currency', '') statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( self._config['stake_currency'], fiat_currency)
self._config['stake_currency'], fiat_currency)
show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 show_total = not isnan(fiat_profit_sum) and len(statlist) > 1
max_trades_per_msg = 50 max_trades_per_msg = 50
""" """
Calculate the number of messages of 50 trades per message Calculate the number of messages of 50 trades per message
0.99 is used to make sure that there are no extra (empty) messages 0.99 is used to make sure that there are no extra (empty) messages
As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message
""" """
messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1)
for i in range(0, messages_count): for i in range(0, messages_count):
trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg]
if show_total and i == messages_count - 1: if show_total and i == messages_count - 1:
# append total line # append total line
trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"])
message = tabulate(trades, message = tabulate(trades,
headers=head, headers=head,
tablefmt='simple') tablefmt='simple')
if show_total and i == messages_count - 1: if show_total and i == messages_count - 1:
# insert separators line between Total # insert separators line between Total
lines = message.split("\n") lines = message.split("\n")
message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]])
self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML, self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML,
reload_able=True, callback_path="update_status_table", reload_able=True, callback_path="update_status_table",
query=update.callback_query) query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None: def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None:
@ -686,35 +680,32 @@ class Telegram(RPCHandler):
timescale = int(context.args[0]) if context.args else val.default timescale = int(context.args[0]) if context.args else val.default
except (TypeError, ValueError, IndexError): except (TypeError, ValueError, IndexError):
timescale = val.default timescale = val.default
try: stats = self._rpc._rpc_timeunit_profit(
stats = self._rpc._rpc_timeunit_profit( timescale,
timescale, stake_cur,
stake_cur, fiat_disp_cur,
fiat_disp_cur, unit
unit )
) stats_tab = tabulate(
stats_tab = tabulate( [[f"{period['date']} ({period['trade_count']})",
[[f"{period['date']} ({period['trade_count']})", f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}",
f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}", f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}",
f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", f"{period['rel_profit']:.2%}",
f"{period['rel_profit']:.2%}", ] for period in stats['data']],
] for period in stats['data']], headers=[
headers=[ f"{val.header} (count)",
f"{val.header} (count)", f'{stake_cur}',
f'{stake_cur}', f'{fiat_disp_cur}',
f'{fiat_disp_cur}', 'Profit %',
'Profit %', 'Trades',
'Trades', ],
], tablefmt='simple')
tablefmt='simple') message = (
message = ( f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n'
f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n' f'<pre>{stats_tab}</pre>'
f'<pre>{stats_tab}</pre>' )
) self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True,
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, callback_path=val.callback, query=update.callback_query)
callback_path=val.callback, query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _daily(self, update: Update, context: CallbackContext) -> None: def _daily(self, update: Update, context: CallbackContext) -> None:
@ -878,79 +869,76 @@ class Telegram(RPCHandler):
@authorized_only @authorized_only
def _balance(self, update: Update, context: CallbackContext) -> None: def _balance(self, update: Update, context: CallbackContext) -> None:
""" Handler for /balance """ """ Handler for /balance """
try: result = self._rpc._rpc_balance(self._config['stake_currency'],
result = self._rpc._rpc_balance(self._config['stake_currency'], self._config.get('fiat_display_currency', ''))
self._config.get('fiat_display_currency', ''))
balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0)
if not balance_dust_level: if not balance_dust_level:
balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0)
output = '' output = ''
if self._config['dry_run']: if self._config['dry_run']:
output += "*Warning:* Simulated balances in Dry Mode.\n" output += "*Warning:* Simulated balances in Dry Mode.\n"
starting_cap = round_coin_value( starting_cap = round_coin_value(
result['starting_capital'], self._config['stake_currency']) result['starting_capital'], self._config['stake_currency'])
output += f"Starting capital: `{starting_cap}`" output += f"Starting capital: `{starting_cap}`"
starting_cap_fiat = round_coin_value( starting_cap_fiat = round_coin_value(
result['starting_capital_fiat'], self._config['fiat_display_currency'] result['starting_capital_fiat'], self._config['fiat_display_currency']
) if result['starting_capital_fiat'] > 0 else '' ) if result['starting_capital_fiat'] > 0 else ''
output += (f" `, {starting_cap_fiat}`.\n" output += (f" `, {starting_cap_fiat}`.\n"
) if result['starting_capital_fiat'] > 0 else '.\n' ) if result['starting_capital_fiat'] > 0 else '.\n'
total_dust_balance = 0 total_dust_balance = 0
total_dust_currencies = 0 total_dust_currencies = 0
for curr in result['currencies']: for curr in result['currencies']:
curr_output = '' curr_output = ''
if curr['est_stake'] > balance_dust_level: if curr['est_stake'] > balance_dust_level:
if curr['is_position']: if curr['is_position']:
curr_output = ( curr_output = (
f"*{curr['currency']}:*\n" f"*{curr['currency']}:*\n"
f"\t`{curr['side']}: {curr['position']:.8f}`\n" f"\t`{curr['side']}: {curr['position']:.8f}`\n"
f"\t`Leverage: {curr['leverage']:.1f}`\n" f"\t`Leverage: {curr['leverage']:.1f}`\n"
f"\t`Est. {curr['stake']}: " f"\t`Est. {curr['stake']}: "
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
else:
curr_output = (
f"*{curr['currency']}:*\n"
f"\t`Available: {curr['free']:.8f}`\n"
f"\t`Balance: {curr['balance']:.8f}`\n"
f"\t`Pending: {curr['used']:.8f}`\n"
f"\t`Est. {curr['stake']}: "
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
elif curr['est_stake'] <= balance_dust_level:
total_dust_balance += curr['est_stake']
total_dust_currencies += 1
# Handle overflowing message length
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
self._send_msg(output)
output = curr_output
else: else:
output += curr_output curr_output = (
f"*{curr['currency']}:*\n"
f"\t`Available: {curr['free']:.8f}`\n"
f"\t`Balance: {curr['balance']:.8f}`\n"
f"\t`Pending: {curr['used']:.8f}`\n"
f"\t`Est. {curr['stake']}: "
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
elif curr['est_stake'] <= balance_dust_level:
total_dust_balance += curr['est_stake']
total_dust_currencies += 1
if total_dust_balance > 0: # Handle overflowing message length
output += ( if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
f"*{total_dust_currencies} Other " self._send_msg(output)
f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " output = curr_output
f"(< {balance_dust_level} {result['stake']}):*\n" else:
f"\t`Est. {result['stake']}: " output += curr_output
f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
tc = result['trade_count'] > 0
stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
output += ("\n*Estimated Value*:\n" if total_dust_balance > 0:
f"\t`{result['stake']}: " output += (
f"{round_coin_value(result['total'], result['stake'], False)}`" f"*{total_dust_currencies} Other "
f"{stake_improve}\n" f"{plural(total_dust_currencies, 'Currency', 'Currencies')} "
f"\t`{result['symbol']}: " f"(< {balance_dust_level} {result['stake']}):*\n"
f"{round_coin_value(result['value'], result['symbol'], False)}`" f"\t`Est. {result['stake']}: "
f"{fiat_val}\n") f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
self._send_msg(output, reload_able=True, callback_path="update_balance", tc = result['trade_count'] > 0
query=update.callback_query) stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
except RPCException as e: fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
self._send_msg(str(e))
output += ("\n*Estimated Value*:\n"
f"\t`{result['stake']}: "
f"{round_coin_value(result['total'], result['stake'], False)}`"
f"{stake_improve}\n"
f"\t`{result['symbol']}: "
f"{round_coin_value(result['value'], result['symbol'], False)}`"
f"{fiat_val}\n")
self._send_msg(output, reload_able=True, callback_path="update_balance",
query=update.callback_query)
@authorized_only @authorized_only
def _start(self, update: Update, context: CallbackContext) -> None: def _start(self, update: Update, context: CallbackContext) -> None:
@ -1125,26 +1113,23 @@ class Telegram(RPCHandler):
nrecent = int(context.args[0]) if context.args else 10 nrecent = int(context.args[0]) if context.args else 10
except (TypeError, ValueError, IndexError): except (TypeError, ValueError, IndexError):
nrecent = 10 nrecent = 10
try: trades = self._rpc._rpc_trade_history(
trades = self._rpc._rpc_trade_history( nrecent
nrecent )
) trades_tab = tabulate(
trades_tab = tabulate( [[arrow.get(trade['close_date']).humanize(),
[[arrow.get(trade['close_date']).humanize(), trade['pair'] + " (#" + str(trade['trade_id']) + ")",
trade['pair'] + " (#" + str(trade['trade_id']) + ")", f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"]
f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] for trade in trades['trades']],
for trade in trades['trades']], headers=[
headers=[ 'Close Date',
'Close Date', 'Pair (ID)',
'Pair (ID)', f'Profit ({stake_cur})',
f'Profit ({stake_cur})', ],
], tablefmt='simple')
tablefmt='simple') message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n"
message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n" + (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else ''))
+ (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else '')) self._send_msg(message, parse_mode=ParseMode.HTML)
self._send_msg(message, parse_mode=ParseMode.HTML)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _delete_trade(self, update: Update, context: CallbackContext) -> None: def _delete_trade(self, update: Update, context: CallbackContext) -> None:
@ -1155,18 +1140,14 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: if not context.args or len(context.args) == 0:
if not context.args or len(context.args) == 0: raise RPCException("Trade-id not set.")
raise RPCException("Trade-id not set.") trade_id = int(context.args[0])
trade_id = int(context.args[0]) msg = self._rpc._rpc_delete(trade_id)
msg = self._rpc._rpc_delete(trade_id) self._send_msg((
self._send_msg(( f"`{msg['result_msg']}`\n"
f"`{msg['result_msg']}`\n" 'Please make sure to take care of this asset on the exchange manually.'
'Please make sure to take care of this asset on the exchange manually.' ))
))
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _performance(self, update: Update, context: CallbackContext) -> None: def _performance(self, update: Update, context: CallbackContext) -> None:
@ -1177,27 +1158,24 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: trades = self._rpc._rpc_performance()
trades = self._rpc._rpc_performance() output = "<b>Performance:</b>\n"
output = "<b>Performance:</b>\n" for i, trade in enumerate(trades):
for i, trade in enumerate(trades): stat_line = (
stat_line = ( f"{i+1}.\t <code>{trade['pair']}\t"
f"{i+1}.\t <code>{trade['pair']}\t" f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " f"({trade['profit_ratio']:.2%}) "
f"({trade['profit_ratio']:.2%}) " f"({trade['count']})</code>\n")
f"({trade['count']})</code>\n")
if len(output + stat_line) >= MAX_MESSAGE_LENGTH: if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
self._send_msg(output, parse_mode=ParseMode.HTML) self._send_msg(output, parse_mode=ParseMode.HTML)
output = stat_line output = stat_line
else: else:
output += stat_line output += stat_line
self._send_msg(output, parse_mode=ParseMode.HTML, self._send_msg(output, parse_mode=ParseMode.HTML,
reload_able=True, callback_path="update_performance", reload_able=True, callback_path="update_performance",
query=update.callback_query) query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None: def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None:
@ -1208,31 +1186,28 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: pair = None
pair = None if context.args and isinstance(context.args[0], str):
if context.args and isinstance(context.args[0], str): pair = context.args[0]
pair = context.args[0]
trades = self._rpc._rpc_enter_tag_performance(pair) trades = self._rpc._rpc_enter_tag_performance(pair)
output = "<b>Entry Tag Performance:</b>\n" output = "<b>Entry Tag Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['enter_tag']}\t" f"{i+1}.\t <code>{trade['enter_tag']}\t"
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
if len(output + stat_line) >= MAX_MESSAGE_LENGTH: if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
self._send_msg(output, parse_mode=ParseMode.HTML) self._send_msg(output, parse_mode=ParseMode.HTML)
output = stat_line output = stat_line
else: else:
output += stat_line output += stat_line
self._send_msg(output, parse_mode=ParseMode.HTML, self._send_msg(output, parse_mode=ParseMode.HTML,
reload_able=True, callback_path="update_enter_tag_performance", reload_able=True, callback_path="update_enter_tag_performance",
query=update.callback_query) query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None: def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None:
@ -1243,31 +1218,28 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: pair = None
pair = None if context.args and isinstance(context.args[0], str):
if context.args and isinstance(context.args[0], str): pair = context.args[0]
pair = context.args[0]
trades = self._rpc._rpc_exit_reason_performance(pair) trades = self._rpc._rpc_exit_reason_performance(pair)
output = "<b>Exit Reason Performance:</b>\n" output = "<b>Exit Reason Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['exit_reason']}\t" f"{i+1}.\t <code>{trade['exit_reason']}\t"
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
if len(output + stat_line) >= MAX_MESSAGE_LENGTH: if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
self._send_msg(output, parse_mode=ParseMode.HTML) self._send_msg(output, parse_mode=ParseMode.HTML)
output = stat_line output = stat_line
else: else:
output += stat_line output += stat_line
self._send_msg(output, parse_mode=ParseMode.HTML, self._send_msg(output, parse_mode=ParseMode.HTML,
reload_able=True, callback_path="update_exit_reason_performance", reload_able=True, callback_path="update_exit_reason_performance",
query=update.callback_query) query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None: def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None:
@ -1278,31 +1250,28 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: pair = None
pair = None if context.args and isinstance(context.args[0], str):
if context.args and isinstance(context.args[0], str): pair = context.args[0]
pair = context.args[0]
trades = self._rpc._rpc_mix_tag_performance(pair) trades = self._rpc._rpc_mix_tag_performance(pair)
output = "<b>Mix Tag Performance:</b>\n" output = "<b>Mix Tag Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['mix_tag']}\t" f"{i+1}.\t <code>{trade['mix_tag']}\t"
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit']:.2%}) " f"({trade['profit']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
if len(output + stat_line) >= MAX_MESSAGE_LENGTH: if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
self._send_msg(output, parse_mode=ParseMode.HTML) self._send_msg(output, parse_mode=ParseMode.HTML)
output = stat_line output = stat_line
else: else:
output += stat_line output += stat_line
self._send_msg(output, parse_mode=ParseMode.HTML, self._send_msg(output, parse_mode=ParseMode.HTML,
reload_able=True, callback_path="update_mix_tag_performance", reload_able=True, callback_path="update_mix_tag_performance",
query=update.callback_query) query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _count(self, update: Update, context: CallbackContext) -> None: def _count(self, update: Update, context: CallbackContext) -> None:
@ -1313,18 +1282,15 @@ class Telegram(RPCHandler):
:param update: message update :param update: message update
:return: None :return: None
""" """
try: counts = self._rpc._rpc_count()
counts = self._rpc._rpc_count() message = tabulate({k: [v] for k, v in counts.items()},
message = tabulate({k: [v] for k, v in counts.items()}, headers=['current', 'max', 'total stake'],
headers=['current', 'max', 'total stake'], tablefmt='simple')
tablefmt='simple') message = "<pre>{}</pre>".format(message)
message = "<pre>{}</pre>".format(message) logger.debug(message)
logger.debug(message) self._send_msg(message, parse_mode=ParseMode.HTML,
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, callback_path="update_count",
reload_able=True, callback_path="update_count", query=update.callback_query)
query=update.callback_query)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _locks(self, update: Update, context: CallbackContext) -> None: def _locks(self, update: Update, context: CallbackContext) -> None:
@ -1372,22 +1338,19 @@ class Telegram(RPCHandler):
Handler for /whitelist Handler for /whitelist
Shows the currently active whitelist Shows the currently active whitelist
""" """
try: whitelist = self._rpc._rpc_whitelist()
whitelist = self._rpc._rpc_whitelist()
if context.args: if context.args:
if "sorted" in context.args: if "sorted" in context.args:
whitelist['whitelist'] = sorted(whitelist['whitelist']) whitelist['whitelist'] = sorted(whitelist['whitelist'])
if "baseonly" in context.args: if "baseonly" in context.args:
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
message += f"`{', '.join(whitelist['whitelist'])}`" message += f"`{', '.join(whitelist['whitelist'])}`"
logger.debug(message) logger.debug(message)
self._send_msg(message) self._send_msg(message)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _blacklist(self, update: Update, context: CallbackContext) -> None: def _blacklist(self, update: Update, context: CallbackContext) -> None:
@ -1425,30 +1388,27 @@ class Telegram(RPCHandler):
Shows the latest logs Shows the latest logs
""" """
try: try:
try: limit = int(context.args[0]) if context.args else 10
limit = int(context.args[0]) if context.args else 10 except (TypeError, ValueError, IndexError):
except (TypeError, ValueError, IndexError): limit = 10
limit = 10 logs = RPC._rpc_get_logs(limit)['logs']
logs = RPC._rpc_get_logs(limit)['logs'] msgs = ''
msgs = '' msg_template = "*{}* {}: {} \\- `{}`"
msg_template = "*{}* {}: {} \\- `{}`" for logrec in logs:
for logrec in logs: msg = msg_template.format(escape_markdown(logrec[0], version=2),
msg = msg_template.format(escape_markdown(logrec[0], version=2), escape_markdown(logrec[2], version=2),
escape_markdown(logrec[2], version=2), escape_markdown(logrec[3], version=2),
escape_markdown(logrec[3], version=2), escape_markdown(logrec[4], version=2))
escape_markdown(logrec[4], version=2)) if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: # Send message immediately if it would become too long
# Send message immediately if it would become too long
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
msgs = msg + '\n'
else:
# Append message to messages to send
msgs += msg + '\n'
if msgs:
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
except RPCException as e: msgs = msg + '\n'
self._send_msg(str(e)) else:
# Append message to messages to send
msgs += msg + '\n'
if msgs:
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
@authorized_only @authorized_only
def _edge(self, update: Update, context: CallbackContext) -> None: def _edge(self, update: Update, context: CallbackContext) -> None:
@ -1456,21 +1416,17 @@ class Telegram(RPCHandler):
Handler for /edge Handler for /edge
Shows information related to Edge Shows information related to Edge
""" """
try: edge_pairs = self._rpc._rpc_edge()
edge_pairs = self._rpc._rpc_edge() if not edge_pairs:
if not edge_pairs: message = '<b>Edge only validated following pairs:</b>'
message = '<b>Edge only validated following pairs:</b>' self._send_msg(message, parse_mode=ParseMode.HTML)
self._send_msg(message, parse_mode=ParseMode.HTML)
for chunk in chunks(edge_pairs, 25): for chunk in chunks(edge_pairs, 25):
edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple')
message = (f'<b>Edge only validated following pairs:</b>\n' message = (f'<b>Edge only validated following pairs:</b>\n'
f'<pre>{edge_pairs_tab}</pre>') f'<pre>{edge_pairs_tab}</pre>')
self._send_msg(message, parse_mode=ParseMode.HTML) self._send_msg(message, parse_mode=ParseMode.HTML)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _help(self, update: Update, context: CallbackContext) -> None: def _help(self, update: Update, context: CallbackContext) -> None:
@ -1551,12 +1507,9 @@ class Telegram(RPCHandler):
Handler for /health Handler for /health
Shows the last process timestamp Shows the last process timestamp
""" """
try: health = self._rpc._health()
health = self._rpc._health() message = f"Last process: `{health['last_process_loc']}`"
message = f"Last process: `{health['last_process_loc']}`" self._send_msg(message)
self._send_msg(message)
except RPCException as e:
self._send_msg(str(e))
@authorized_only @authorized_only
def _version(self, update: Update, context: CallbackContext) -> None: def _version(self, update: Update, context: CallbackContext) -> None:

View File

@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
Launching this strategy would be: Launching this strategy would be:
freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates
--freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json --freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json
or the user simply adds this to their config: or the user simply adds this to their config:
@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
process_only_new_candles = True process_only_new_candles = True
stoploss = -0.05 stoploss = -0.05
use_exit_signal = True use_exit_signal = True
startup_candle_count: int = 300 startup_candle_count: int = 30
can_short = True can_short = True
# Hyperoptable parameters # Hyperoptable parameters

View File

@ -328,7 +328,7 @@
"# Show graph inline\n", "# Show graph inline\n",
"# graph.show()\n", "# graph.show()\n",
"\n", "\n",
"# Render graph in a seperate window\n", "# Render graph in a separate window\n",
"graph.show(renderer=\"browser\")\n" "graph.show(renderer=\"browser\")\n"
] ]
}, },

View File

@ -0,0 +1,18 @@
import gc
import logging
import platform
logger = logging.getLogger(__name__)
def gc_set_threshold():
"""
Reduce number of GC runs to improve performance (explanation video)
https://www.youtube.com/watch?v=p4Sn6UcFTOU
"""
if platform.python_implementation() == "CPython":
# allocs, g1, g2 = gc.get_threshold()
gc.set_threshold(50_000, 500, 1000)
logger.debug("Adjusting python allocations to reduce GC runs")

View File

@ -7,7 +7,7 @@
-r docs/requirements-docs.txt -r docs/requirements-docs.txt
coveralls==3.3.1 coveralls==3.3.1
flake8==5.0.4 flake8==6.0.0
flake8-tidy-imports==4.8.0 flake8-tidy-imports==4.8.0
mypy==0.991 mypy==0.991
pre-commit==2.20.0 pre-commit==2.20.0
@ -15,7 +15,7 @@ pytest==7.2.0
pytest-asyncio==0.20.2 pytest-asyncio==0.20.2
pytest-cov==4.0.0 pytest-cov==4.0.0
pytest-mock==3.10.0 pytest-mock==3.10.0
pytest-random-order==1.0.4 pytest-random-order==1.1.0
isort==5.10.1 isort==5.10.1
# For datetime mocking # For datetime mocking
time-machine==2.8.2 time-machine==2.8.2

View File

@ -2,7 +2,8 @@
-r requirements-freqai.txt -r requirements-freqai.txt
# Required for freqai-rl # Required for freqai-rl
torch==1.12.1 torch==1.13.0
stable-baselines3==1.6.1 stable-baselines3==1.6.2
sb3-contrib==1.6.2
# Gym is forced to this version by stable-baselines3.
gym==0.21 gym==0.21
sb3-contrib==1.6.1

View File

@ -1,19 +1,19 @@
numpy==1.23.5 numpy==1.23.5
pandas==1.5.1 pandas==1.5.2
pandas-ta==0.3.14b pandas-ta==0.3.14b
ccxt==2.1.96 ccxt==2.2.67
# Pin cryptography for now due to rust build errors with piwheels # Pin cryptography for now due to rust build errors with piwheels
cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.1; platform_machine == 'armv7l'
cryptography==38.0.3; platform_machine != 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l'
aiohttp==3.8.3 aiohttp==3.8.3
SQLAlchemy==1.4.44 SQLAlchemy==1.4.44
python-telegram-bot==13.14 python-telegram-bot==13.14
arrow==1.2.3 arrow==1.2.3
cachetools==4.2.2 cachetools==4.2.2
requests==2.28.1 requests==2.28.1
urllib3==1.26.12 urllib3==1.26.13
jsonschema==4.17.0 jsonschema==4.17.3
TA-Lib==0.4.25 TA-Lib==0.4.25
technical==1.3.0 technical==1.3.0
tabulate==0.9.0 tabulate==0.9.0
@ -22,7 +22,7 @@ jinja2==3.1.2
tables==3.7.0 tables==3.7.0
blosc==1.10.6 blosc==1.10.6
joblib==1.2.0 joblib==1.2.0
pyarrow==10.0.0; platform_machine != 'armv7l' pyarrow==10.0.1; platform_machine != 'armv7l'
# find first, C search in arrays # find first, C search in arrays
py_find_1st==1.1.5 py_find_1st==1.1.5
@ -30,13 +30,13 @@ py_find_1st==1.1.5
# Load ticker files 30% faster # Load ticker files 30% faster
python-rapidjson==1.9 python-rapidjson==1.9
# Properly format api responses # Properly format api responses
orjson==3.8.2 orjson==3.8.3
# Notify systemd # Notify systemd
sdnotify==0.3.2 sdnotify==0.3.2
# API Server # API Server
fastapi==0.87.0 fastapi==0.88.0
pydantic==1.10.2 pydantic==1.10.2
uvicorn==0.20.0 uvicorn==0.20.0
pyjwt==2.6.0 pyjwt==2.6.0
@ -47,7 +47,7 @@ psutil==5.9.4
colorama==0.4.6 colorama==0.4.6
# Building config files interactively # Building config files interactively
questionary==1.10.0 questionary==1.10.0
prompt-toolkit==3.0.32 prompt-toolkit==3.0.33
# Extensions to datetime library # Extensions to datetime library
python-dateutil==2.8.2 python-dateutil==2.8.2

View File

@ -189,3 +189,10 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
assert '0.5' in captured.out assert '0.5' in captured.out
assert '1' in captured.out assert '1' in captured.out
assert '2.5' in captured.out assert '2.5' in captured.out
# test date filtering
args = get_args(base_args + ['--timerange', "20180129-20180130"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' not in captured.out

View File

@ -28,15 +28,15 @@ EXCHANGES = {
'leverage_tiers_public': False, 'leverage_tiers_public': False,
'leverage_in_spot_market': False, 'leverage_in_spot_market': False,
}, },
'binance': { # 'binance': {
'pair': 'BTC/USDT', # 'pair': 'BTC/USDT',
'stake_currency': 'USDT', # 'stake_currency': 'USDT',
'hasQuoteVolume': True, # 'hasQuoteVolume': True,
'timeframe': '5m', # 'timeframe': '5m',
'futures': True, # 'futures': True,
'leverage_tiers_public': False, # 'leverage_tiers_public': False,
'leverage_in_spot_market': False, # 'leverage_in_spot_market': False,
}, # },
'kraken': { 'kraken': {
'pair': 'BTC/USDT', 'pair': 'BTC/USDT',
'stake_currency': 'USDT', 'stake_currency': 'USDT',

View File

@ -65,6 +65,8 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda
mocker.patch('freqtrade.optimize.backtesting.history.load_data') mocker.patch('freqtrade.optimize.backtesting.history.load_data')
mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now))
freqai_conf["timerange"] = "" freqai_conf["timerange"] = ""
freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False})
patched_configuration_load_config_file(mocker, freqai_conf) patched_configuration_load_config_file(mocker, freqai_conf)
args = [ args = [
@ -79,7 +81,7 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda
bt_config = setup_optimize_configuration(args, RunMode.BACKTEST) bt_config = setup_optimize_configuration(args, RunMode.BACKTEST)
with pytest.raises(OperationalException, with pytest.raises(OperationalException,
match=r".* Saved models are required to run backtest .*"): match=r".* Historic predictions data is required to run backtest .*"):
Backtesting(bt_config) Backtesting(bt_config)
Backtesting.cleanup() Backtesting.cleanup()

View File

@ -2,8 +2,11 @@
import shutil import shutil
from pathlib import Path from pathlib import Path
import pytest
from freqtrade.configuration import TimeRange from freqtrade.configuration import TimeRange
from freqtrade.data.dataprovider import DataProvider from freqtrade.data.dataprovider import DataProvider
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from tests.conftest import get_patched_exchange from tests.conftest import get_patched_exchange
from tests.freqai.conftest import get_patched_freqai_strategy from tests.freqai.conftest import get_patched_freqai_strategy
@ -93,3 +96,37 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf):
assert len(df.columns) == 33 assert len(df.columns) == 33
shutil.rmtree(Path(freqai.dk.full_path)) shutil.rmtree(Path(freqai.dk.full_path))
def test_get_timerange_from_live_historic_predictions(mocker, freqai_conf):
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
freqai = strategy.freqai
freqai.live = True
freqai.dk = FreqaiDataKitchen(freqai_conf)
timerange = TimeRange.parse_timerange("20180126-20180130")
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
sub_timerange = TimeRange.parse_timerange("20180128-20180130")
_, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "ADA/BTC", freqai.dk)
base_df["5m"]["date_pred"] = base_df["5m"]["date"]
freqai.dd.historic_predictions = {}
freqai.dd.historic_predictions["ADA/USDT"] = base_df["5m"]
freqai.dd.save_historic_predictions_to_disk()
freqai.dd.save_global_metadata_to_disk({"start_dry_live_date": 1516406400})
timerange = freqai.dd.get_timerange_from_live_historic_predictions()
assert timerange.startts == 1516406400
assert timerange.stopts == 1517356500
def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf):
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
freqai = strategy.freqai
with pytest.raises(
OperationalException,
match=r'Historic predictions not found.*'
):
freqai.dd.get_timerange_from_live_historic_predictions()

View File

@ -9,7 +9,6 @@ from freqtrade.configuration import TimeRange
from freqtrade.data.dataprovider import DataProvider from freqtrade.data.dataprovider import DataProvider
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.utils import get_timerange_backtest_live_models
from tests.conftest import get_patched_exchange, log_has_re from tests.conftest import get_patched_exchange, log_has_re
from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy,
make_data_dictionary, make_unfiltered_dataframe) make_data_dictionary, make_unfiltered_dataframe)
@ -166,71 +165,6 @@ def test_make_train_test_datasets(mocker, freqai_conf):
assert len(data_dictionary['train_features'].index) == 1916 assert len(data_dictionary['train_features'].index) == 1916
def test_get_pairs_timestamp_validation(mocker, freqai_conf):
exchange = get_patched_exchange(mocker, freqai_conf)
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai
freqai.live = True
freqai.dk = FreqaiDataKitchen(freqai_conf)
freqai_conf['freqai'].update({"identifier": "invalid_id"})
model_path = freqai.dk.get_full_models_path(freqai_conf)
with pytest.raises(
OperationalException,
match=r'.*required to run backtest with the freqai-backtest-live-models.*'
):
freqai.dk.get_assets_timestamps_training_from_ready_models(model_path)
@pytest.mark.parametrize('model', [
'LightGBMRegressor'
])
def test_get_timerange_from_ready_models(mocker, freqai_conf, model):
freqai_conf.update({"freqaimodel": model})
freqai_conf.update({"timerange": "20180110-20180130"})
freqai_conf.update({"strategy": "freqai_test_strat"})
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai
freqai.live = True
freqai.dk = FreqaiDataKitchen(freqai_conf)
timerange = TimeRange.parse_timerange("20180101-20180130")
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
freqai.dd.pair_dict = MagicMock()
data_load_timerange = TimeRange.parse_timerange("20180101-20180130")
# 1516233600 (2018-01-18 00:00) - Start Training 1
# 1516406400 (2018-01-20 00:00) - End Training 1 (Backtest slice 1)
# 1516579200 (2018-01-22 00:00) - End Training 2 (Backtest slice 2)
# 1516838400 (2018-01-25 00:00) - End Timerange
new_timerange = TimeRange("date", "date", 1516233600, 1516406400)
freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
new_timerange = TimeRange("date", "date", 1516406400, 1516579200)
freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
model_path = freqai.dk.get_full_models_path(freqai_conf)
(backtesting_timerange,
pairs_end_dates) = freqai.dk.get_timerange_and_assets_end_dates_from_ready_models(
models_path=model_path)
assert len(pairs_end_dates["ADA"]) == 2
assert backtesting_timerange.startts == 1516406400
assert backtesting_timerange.stopts == 1516838400
backtesting_string_timerange = get_timerange_backtest_live_models(freqai_conf)
assert backtesting_string_timerange == '20180120-20180125'
@pytest.mark.parametrize('model', [ @pytest.mark.parametrize('model', [
'LightGBMRegressor' 'LightGBMRegressor'
]) ])

View File

@ -301,7 +301,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
metadata = {"pair": "ADA/BTC"} pair = "ADA/BTC"
metadata = {"pair": pair}
freqai.dk.pair = pair
freqai.start_backtesting(df, metadata, freqai.dk) freqai.start_backtesting(df, metadata, freqai.dk)
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
@ -324,6 +326,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
pair = "ADA/BTC"
metadata = {"pair": pair}
freqai.dk.pair = pair
freqai.start_backtesting(df, metadata, freqai.dk) freqai.start_backtesting(df, metadata, freqai.dk)
assert log_has_re( assert log_has_re(
@ -331,13 +336,43 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog):
caplog, caplog,
) )
pair = "ETH/BTC"
metadata = {"pair": pair}
freqai.dk.pair = pair
freqai.start_backtesting(df, metadata, freqai.dk)
path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder)
prediction_files = [x for x in path.iterdir() if x.is_file()] prediction_files = [x for x in path.iterdir() if x.is_file()]
assert len(prediction_files) == 1 assert len(prediction_files) == 2
shutil.rmtree(Path(freqai.dk.full_path)) shutil.rmtree(Path(freqai.dk.full_path))
def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog):
freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10})
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai
freqai.live = False
freqai.dk = FreqaiDataKitchen(freqai_conf)
timerange = TimeRange.parse_timerange("20180128-20180130")
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
sub_timerange = TimeRange.parse_timerange("20180129-20180130")
corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk)
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
freqai.dk.pair = "ADA/BTC"
freqai.dk.full_df = df.fillna(0)
freqai.dk.full_df
assert "&-s_close_mean" not in freqai.dk.full_df.columns
assert "&-s_close_std" not in freqai.dk.full_df.columns
freqai.backtesting_fit_live_predictions(freqai.dk)
assert "&-s_close_mean" in freqai.dk.full_df.columns
assert "&-s_close_std" in freqai.dk.full_df.columns
shutil.rmtree(Path(freqai.dk.full_path))
def test_follow_mode(mocker, freqai_conf): def test_follow_mode(mocker, freqai_conf):
freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"timerange": "20180110-20180130"})

View File

@ -1056,6 +1056,10 @@ def test_rpc_force_entry(mocker, default_conf, ticker, fee, limit_buy_order_open
assert trade.pair == pair assert trade.pair == pair
assert trade.open_rate == 0.0001 assert trade.open_rate == 0.0001
with pytest.raises(RPCException,
match=r'Symbol does not exist or market is not active.'):
rpc._rpc_force_entry('LTC/NOTHING', 0.0001)
# Test buy pair not with stakes # Test buy pair not with stakes
with pytest.raises(RPCException, with pytest.raises(RPCException,
match=r'Wrong pair selected. Only pairs with stake-currency.*'): match=r'Wrong pair selected. Only pairs with stake-currency.*'):