Merge branch 'develop' into dev-merge-rl
This commit is contained in:
commit
81417cb795
@ -1,4 +1,4 @@
|
|||||||
FROM python:3.10.6-slim-bullseye as base
|
FROM python:3.10.7-slim-bullseye as base
|
||||||
|
|
||||||
# Setup env
|
# Setup env
|
||||||
ENV LANG C.UTF-8
|
ENV LANG C.UTF-8
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 191 KiB After Width: | Height: | Size: 185 KiB |
@ -107,7 +107,7 @@ Strategy arguments:
|
|||||||
|
|
||||||
## Test your strategy with Backtesting
|
## Test your strategy with Backtesting
|
||||||
|
|
||||||
Now you have good Buy and Sell strategies and some historic data, you want to test it against
|
Now you have good Entry and exit strategies and some historic data, you want to test it against
|
||||||
real data. This is what we call [backtesting](https://en.wikipedia.org/wiki/Backtesting).
|
real data. This is what we call [backtesting](https://en.wikipedia.org/wiki/Backtesting).
|
||||||
|
|
||||||
Backtesting will use the crypto-currencies (pairs) from your config file and load historical candle (OHLCV) data from `user_data/data/<exchange>` by default.
|
Backtesting will use the crypto-currencies (pairs) from your config file and load historical candle (OHLCV) data from `user_data/data/<exchange>` by default.
|
||||||
@ -215,7 +215,7 @@ Sometimes your account has certain fee rebates (fee reductions starting with a c
|
|||||||
To account for this in backtesting, you can use the `--fee` command line option to supply this value to backtesting.
|
To account for this in backtesting, you can use the `--fee` command line option to supply this value to backtesting.
|
||||||
This fee must be a ratio, and will be applied twice (once for trade entry, and once for trade exit).
|
This fee must be a ratio, and will be applied twice (once for trade entry, and once for trade exit).
|
||||||
|
|
||||||
For example, if the buying and selling commission fee is 0.1% (i.e., 0.001 written as ratio), then you would run backtesting as the following:
|
For example, if the commission fee per order is 0.1% (i.e., 0.001 written as ratio), then you would run backtesting as the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade backtesting --fee 0.001
|
freqtrade backtesting --fee 0.001
|
||||||
@ -252,41 +252,41 @@ The most important in the backtesting is to understand the result.
|
|||||||
A backtesting result will look like that:
|
A backtesting result will look like that:
|
||||||
|
|
||||||
```
|
```
|
||||||
========================================================= BACKTESTING REPORT ==========================================================
|
========================================================= BACKTESTING REPORT =========================================================
|
||||||
| Pair | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins Draws Loss Win% |
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins Draws Loss Win% |
|
||||||
|:---------|-------:|---------------:|---------------:|-----------------:|---------------:|:-------------|-------------------------:|
|
|:---------|--------:|---------------:|---------------:|-----------------:|---------------:|:-------------|-------------------------:|
|
||||||
| ADA/BTC | 35 | -0.11 | -3.88 | -0.00019428 | -1.94 | 4:35:00 | 14 0 21 40.0 |
|
| ADA/BTC | 35 | -0.11 | -3.88 | -0.00019428 | -1.94 | 4:35:00 | 14 0 21 40.0 |
|
||||||
| ARK/BTC | 11 | -0.41 | -4.52 | -0.00022647 | -2.26 | 2:03:00 | 3 0 8 27.3 |
|
| ARK/BTC | 11 | -0.41 | -4.52 | -0.00022647 | -2.26 | 2:03:00 | 3 0 8 27.3 |
|
||||||
| BTS/BTC | 32 | 0.31 | 9.78 | 0.00048938 | 4.89 | 5:05:00 | 18 0 14 56.2 |
|
| BTS/BTC | 32 | 0.31 | 9.78 | 0.00048938 | 4.89 | 5:05:00 | 18 0 14 56.2 |
|
||||||
| DASH/BTC | 13 | -0.08 | -1.07 | -0.00005343 | -0.53 | 4:39:00 | 6 0 7 46.2 |
|
| DASH/BTC | 13 | -0.08 | -1.07 | -0.00005343 | -0.53 | 4:39:00 | 6 0 7 46.2 |
|
||||||
| ENG/BTC | 18 | 1.36 | 24.54 | 0.00122807 | 12.27 | 2:50:00 | 8 0 10 44.4 |
|
| ENG/BTC | 18 | 1.36 | 24.54 | 0.00122807 | 12.27 | 2:50:00 | 8 0 10 44.4 |
|
||||||
| EOS/BTC | 36 | 0.08 | 3.06 | 0.00015304 | 1.53 | 3:34:00 | 16 0 20 44.4 |
|
| EOS/BTC | 36 | 0.08 | 3.06 | 0.00015304 | 1.53 | 3:34:00 | 16 0 20 44.4 |
|
||||||
| ETC/BTC | 26 | 0.37 | 9.51 | 0.00047576 | 4.75 | 6:14:00 | 11 0 15 42.3 |
|
| ETC/BTC | 26 | 0.37 | 9.51 | 0.00047576 | 4.75 | 6:14:00 | 11 0 15 42.3 |
|
||||||
| ETH/BTC | 33 | 0.30 | 9.96 | 0.00049856 | 4.98 | 7:31:00 | 16 0 17 48.5 |
|
| ETH/BTC | 33 | 0.30 | 9.96 | 0.00049856 | 4.98 | 7:31:00 | 16 0 17 48.5 |
|
||||||
| IOTA/BTC | 32 | 0.03 | 1.09 | 0.00005444 | 0.54 | 3:12:00 | 14 0 18 43.8 |
|
| IOTA/BTC | 32 | 0.03 | 1.09 | 0.00005444 | 0.54 | 3:12:00 | 14 0 18 43.8 |
|
||||||
| LSK/BTC | 15 | 1.75 | 26.26 | 0.00131413 | 13.13 | 2:58:00 | 6 0 9 40.0 |
|
| LSK/BTC | 15 | 1.75 | 26.26 | 0.00131413 | 13.13 | 2:58:00 | 6 0 9 40.0 |
|
||||||
| LTC/BTC | 32 | -0.04 | -1.38 | -0.00006886 | -0.69 | 4:49:00 | 11 0 21 34.4 |
|
| LTC/BTC | 32 | -0.04 | -1.38 | -0.00006886 | -0.69 | 4:49:00 | 11 0 21 34.4 |
|
||||||
| NANO/BTC | 17 | 1.26 | 21.39 | 0.00107058 | 10.70 | 1:55:00 | 10 0 7 58.5 |
|
| NANO/BTC | 17 | 1.26 | 21.39 | 0.00107058 | 10.70 | 1:55:00 | 10 0 7 58.5 |
|
||||||
| NEO/BTC | 23 | 0.82 | 18.97 | 0.00094936 | 9.48 | 2:59:00 | 10 0 13 43.5 |
|
| NEO/BTC | 23 | 0.82 | 18.97 | 0.00094936 | 9.48 | 2:59:00 | 10 0 13 43.5 |
|
||||||
| REQ/BTC | 9 | 1.17 | 10.54 | 0.00052734 | 5.27 | 3:47:00 | 4 0 5 44.4 |
|
| REQ/BTC | 9 | 1.17 | 10.54 | 0.00052734 | 5.27 | 3:47:00 | 4 0 5 44.4 |
|
||||||
| XLM/BTC | 16 | 1.22 | 19.54 | 0.00097800 | 9.77 | 3:15:00 | 7 0 9 43.8 |
|
| XLM/BTC | 16 | 1.22 | 19.54 | 0.00097800 | 9.77 | 3:15:00 | 7 0 9 43.8 |
|
||||||
| XMR/BTC | 23 | -0.18 | -4.13 | -0.00020696 | -2.07 | 5:30:00 | 12 0 11 52.2 |
|
| XMR/BTC | 23 | -0.18 | -4.13 | -0.00020696 | -2.07 | 5:30:00 | 12 0 11 52.2 |
|
||||||
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
||||||
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
||||||
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
||||||
========================================================= EXIT REASON STATS ==========================================================
|
========================================================= EXIT REASON STATS ==========================================================
|
||||||
| Exit Reason | Sells | Wins | Draws | Losses |
|
| Exit Reason | Exits | Wins | Draws | Losses |
|
||||||
|:-------------------|--------:|------:|-------:|--------:|
|
|:-------------------|--------:|------:|-------:|--------:|
|
||||||
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
||||||
| stop_loss | 166 | 0 | 0 | 166 |
|
| stop_loss | 166 | 0 | 0 | 166 |
|
||||||
| exit_signal | 56 | 36 | 0 | 20 |
|
| exit_signal | 56 | 36 | 0 | 20 |
|
||||||
| force_exit | 2 | 0 | 0 | 2 |
|
| force_exit | 2 | 0 | 0 | 2 |
|
||||||
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
||||||
| Pair | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
||||||
|:---------|-------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
||||||
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
||||||
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
||||||
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
||||||
================== SUMMARY METRICS ==================
|
================== SUMMARY METRICS ==================
|
||||||
| Metric | Value |
|
| Metric | Value |
|
||||||
|-----------------------------+---------------------|
|
|-----------------------------+---------------------|
|
||||||
@ -356,7 +356,7 @@ The column `Avg Profit %` shows the average profit for all trades made while the
|
|||||||
The column `Tot Profit %` shows instead the total profit % in relation to the starting balance.
|
The column `Tot Profit %` shows instead the total profit % in relation to the starting balance.
|
||||||
In the above results, we have a starting balance of 0.01 BTC and an absolute profit of 0.00762792 BTC - so the `Tot Profit %` will be `(0.00762792 / 0.01) * 100 ~= 76.2%`.
|
In the above results, we have a starting balance of 0.01 BTC and an absolute profit of 0.00762792 BTC - so the `Tot Profit %` will be `(0.00762792 / 0.01) * 100 ~= 76.2%`.
|
||||||
|
|
||||||
Your strategy performance is influenced by your buy strategy, your exit strategy, and also by the `minimal_roi` and `stop_loss` you have set.
|
Your strategy performance is influenced by your entry strategy, your exit strategy, and also by the `minimal_roi` and `stop_loss` you have set.
|
||||||
|
|
||||||
For example, if your `minimal_roi` is only `"0": 0.01` you cannot expect the bot to make more profit than 1% (because it will exit every time a trade reaches 1%).
|
For example, if your `minimal_roi` is only `"0": 0.01` you cannot expect the bot to make more profit than 1% (because it will exit every time a trade reaches 1%).
|
||||||
|
|
||||||
@ -515,7 +515,7 @@ You can then load the trades to perform further analysis as shown in the [data a
|
|||||||
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
||||||
|
|
||||||
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
||||||
- Buys happen at open-price
|
- Entries happen at open-price
|
||||||
- All orders are filled at the requested price (no slippage, no unfilled orders)
|
- All orders are filled at the requested price (no slippage, no unfilled orders)
|
||||||
- Exit-signal exits happen at open-price of the consecutive candle
|
- Exit-signal exits happen at open-price of the consecutive candle
|
||||||
- Exit-signal is favored over Stoploss, because exit-signals are assumed to trigger on candle's open
|
- Exit-signal is favored over Stoploss, because exit-signals are assumed to trigger on candle's open
|
||||||
@ -612,11 +612,11 @@ There will be an additional table comparing win/losses of the different strategi
|
|||||||
Detailed output for all strategies one after the other will be available, so make sure to scroll up to see the details per strategy.
|
Detailed output for all strategies one after the other will be available, so make sure to scroll up to see the details per strategy.
|
||||||
|
|
||||||
```
|
```
|
||||||
=========================================================== STRATEGY SUMMARY =========================================================================
|
=========================================================== STRATEGY SUMMARY ===========================================================================
|
||||||
| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses | Drawdown % |
|
| Strategy | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses | Drawdown % |
|
||||||
|:------------|-------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|-------:|-----------:|
|
|:------------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|-------:|-----------:|
|
||||||
| Strategy1 | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 | 45.2 |
|
| Strategy1 | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 | 45.2 |
|
||||||
| Strategy2 | 1487 | -0.13 | -197.58 | -0.00988917 | -98.79 | 4:43:00 | 662 | 0 | 825 | 241.68 |
|
| Strategy2 | 1487 | -0.13 | -197.58 | -0.00988917 | -98.79 | 4:43:00 | 662 | 0 | 825 | 241.68 |
|
||||||
```
|
```
|
||||||
|
|
||||||
## Next step
|
## Next step
|
||||||
|
27
docs/faq.md
27
docs/faq.md
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
Freqtrade supports spot trading only.
|
Freqtrade supports spot trading only.
|
||||||
|
|
||||||
### Can I open short positions?
|
### Can my bot open short positions?
|
||||||
|
|
||||||
Freqtrade can open short positions in futures markets.
|
Freqtrade can open short positions in futures markets.
|
||||||
This requires the strategy to be made for this - and `"trading_mode": "futures"` in the configuration.
|
This requires the strategy to be made for this - and `"trading_mode": "futures"` in the configuration.
|
||||||
@ -12,9 +12,9 @@ Please make sure to read the [relevant documentation page](leverage.md) first.
|
|||||||
|
|
||||||
In spot markets, you can in some cases use leveraged spot tokens, which reflect an inverted pair (eg. BTCUP/USD, BTCDOWN/USD, ETHBULL/USD, ETHBEAR/USD,...) which can be traded with Freqtrade.
|
In spot markets, you can in some cases use leveraged spot tokens, which reflect an inverted pair (eg. BTCUP/USD, BTCDOWN/USD, ETHBULL/USD, ETHBEAR/USD,...) which can be traded with Freqtrade.
|
||||||
|
|
||||||
### Can I trade options or futures?
|
### Can my bot trade options or futures?
|
||||||
|
|
||||||
Futures trading is supported for selected exchanges.
|
Futures trading is supported for selected exchanges. Please refer to the [documentation start page](index.md#supported-futures-exchanges-experimental) for an uptodate list of supported exchanges.
|
||||||
|
|
||||||
## Beginner Tips & Tricks
|
## Beginner Tips & Tricks
|
||||||
|
|
||||||
@ -22,6 +22,13 @@ Futures trading is supported for selected exchanges.
|
|||||||
|
|
||||||
## Freqtrade common issues
|
## Freqtrade common issues
|
||||||
|
|
||||||
|
### Can freqtrade open multiple positions on the same pair in parallel?
|
||||||
|
|
||||||
|
No. Freqtrade will only open one position per pair at a time.
|
||||||
|
You can however use the [`adjust_trade_position()` callback](strategy-callbacks.md#adjust-trade-position) to adjust an open position.
|
||||||
|
|
||||||
|
Backtesting provides an option for this in `--eps` - however this is only there to highlight "hidden" signals, and will not work in live.
|
||||||
|
|
||||||
### The bot does not start
|
### The bot does not start
|
||||||
|
|
||||||
Running the bot with `freqtrade trade --config config.json` shows the output `freqtrade: command not found`.
|
Running the bot with `freqtrade trade --config config.json` shows the output `freqtrade: command not found`.
|
||||||
@ -30,7 +37,7 @@ This could be caused by the following reasons:
|
|||||||
|
|
||||||
* The virtual environment is not active.
|
* The virtual environment is not active.
|
||||||
* Run `source .env/bin/activate` to activate the virtual environment.
|
* Run `source .env/bin/activate` to activate the virtual environment.
|
||||||
* The installation did not work correctly.
|
* The installation did not complete successfully.
|
||||||
* Please check the [Installation documentation](installation.md).
|
* Please check the [Installation documentation](installation.md).
|
||||||
|
|
||||||
### I have waited 5 minutes, why hasn't the bot made any trades yet?
|
### I have waited 5 minutes, why hasn't the bot made any trades yet?
|
||||||
@ -67,7 +74,7 @@ This is not a bot-problem, but will also happen while manual trading.
|
|||||||
While freqtrade can handle this (it'll sell 99 COIN), fees are often below the minimum tradable lot-size (you can only trade full COIN, not 0.9 COIN).
|
While freqtrade can handle this (it'll sell 99 COIN), fees are often below the minimum tradable lot-size (you can only trade full COIN, not 0.9 COIN).
|
||||||
Leaving the dust (0.9 COIN) on the exchange makes usually sense, as the next time freqtrade buys COIN, it'll eat into the remaining small balance, this time selling everything it bought, and therefore slowly declining the dust balance (although it most likely will never reach exactly 0).
|
Leaving the dust (0.9 COIN) on the exchange makes usually sense, as the next time freqtrade buys COIN, it'll eat into the remaining small balance, this time selling everything it bought, and therefore slowly declining the dust balance (although it most likely will never reach exactly 0).
|
||||||
|
|
||||||
Where possible (e.g. on binance), the use of the exchange's dedicated fee currency will fix this.
|
Where possible (e.g. on binance), the use of the exchange's dedicated fee currency will fix this.
|
||||||
On binance, it's sufficient to have BNB in your account, and have "Pay fees in BNB" enabled in your profile. Your BNB balance will slowly decline (as it's used to pay fees) - but you'll no longer encounter dust (Freqtrade will include the fees in the profit calculations).
|
On binance, it's sufficient to have BNB in your account, and have "Pay fees in BNB" enabled in your profile. Your BNB balance will slowly decline (as it's used to pay fees) - but you'll no longer encounter dust (Freqtrade will include the fees in the profit calculations).
|
||||||
Other exchanges don't offer such possibilities, where it's simply something you'll have to accept or move to a different exchange.
|
Other exchanges don't offer such possibilities, where it's simply something you'll have to accept or move to a different exchange.
|
||||||
|
|
||||||
@ -109,7 +116,7 @@ This warning can point to one of the below problems:
|
|||||||
|
|
||||||
### I'm getting the "RESTRICTED_MARKET" message in the log
|
### I'm getting the "RESTRICTED_MARKET" message in the log
|
||||||
|
|
||||||
Currently known to happen for US Bittrex users.
|
Currently known to happen for US Bittrex users.
|
||||||
|
|
||||||
Read [the Bittrex section about restricted markets](exchanges.md#restricted-markets) for more information.
|
Read [the Bittrex section about restricted markets](exchanges.md#restricted-markets) for more information.
|
||||||
|
|
||||||
@ -177,8 +184,8 @@ The GPU improvements would only apply to pandas-native calculations - or ones wr
|
|||||||
For hyperopt, freqtrade is using scikit-optimize, which is built on top of scikit-learn.
|
For hyperopt, freqtrade is using scikit-optimize, which is built on top of scikit-learn.
|
||||||
Their statement about GPU support is [pretty clear](https://scikit-learn.org/stable/faq.html#will-you-add-gpu-support).
|
Their statement about GPU support is [pretty clear](https://scikit-learn.org/stable/faq.html#will-you-add-gpu-support).
|
||||||
|
|
||||||
GPU's also are only good at crunching numbers (floating point operations).
|
GPU's also are only good at crunching numbers (floating point operations).
|
||||||
For hyperopt, we need both number-crunching (find next parameters) and running python code (running backtesting).
|
For hyperopt, we need both number-crunching (find next parameters) and running python code (running backtesting).
|
||||||
As such, GPU's are not too well suited for most parts of hyperopt.
|
As such, GPU's are not too well suited for most parts of hyperopt.
|
||||||
|
|
||||||
The benefit of using GPU would therefore be pretty slim - and will not justify the complexity introduced by trying to add GPU support.
|
The benefit of using GPU would therefore be pretty slim - and will not justify the complexity introduced by trying to add GPU support.
|
||||||
@ -219,9 +226,9 @@ already 8\*10^9\*10 evaluations. A roughly total of 80 billion evaluations.
|
|||||||
Did you run 100 000 evaluations? Congrats, you've done roughly 1 / 100 000 th
|
Did you run 100 000 evaluations? Congrats, you've done roughly 1 / 100 000 th
|
||||||
of the search space, assuming that the bot never tests the same parameters more than once.
|
of the search space, assuming that the bot never tests the same parameters more than once.
|
||||||
|
|
||||||
* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 100000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades.
|
* The time it takes to run 1000 hyperopt epochs depends on things like: The available cpu, hard-disk, ram, timeframe, timerange, indicator settings, indicator count, amount of coins that hyperopt test strategies on and the resulting trade count - which can be 650 trades in a year or 100000 trades depending if the strategy aims for big profits by trading rarely or for many low profit trades.
|
||||||
|
|
||||||
Example: 4% profit 650 times vs 0,3% profit a trade 10000 times in a year. If we assume you set the --timerange to 365 days.
|
Example: 4% profit 650 times vs 0,3% profit a trade 10000 times in a year. If we assume you set the --timerange to 365 days.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
`freqtrade --config config.json --strategy SampleStrategy --hyperopt SampleHyperopt -e 1000 --timerange 20190601-20200601`
|
`freqtrade --config config.json --strategy SampleStrategy --hyperopt SampleHyperopt -e 1000 --timerange 20190601-20200601`
|
||||||
|
149
docs/freqai.md
149
docs/freqai.md
@ -98,6 +98,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> Defaults set to 0, which means models never expire. <br> **Datatype:** Positive integer.
|
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> Defaults set to 0, which means models never expire. <br> **Datatype:** Positive integer.
|
||||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training data set. <br> **Datatype:** Positive integer.
|
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training data set. <br> **Datatype:** Positive integer.
|
||||||
| `follow_mode` | If true, this instance of FreqAI will look for models associated with `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. Default: `False`.
|
| `follow_mode` | If true, this instance of FreqAI will look for models associated with `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. Default: `False`.
|
||||||
|
| `continual_learning` | If true, FreqAI will start training new models from the final state of the most recently trained model. <br> **Datatype:** Boolean. Default: `False`.
|
||||||
| | **Feature parameters**
|
| | **Feature parameters**
|
||||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](#feature-engineering). <br> **Datatype:** Dictionary.
|
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](#feature-engineering). <br> **Datatype:** Dictionary.
|
||||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base asset feature set. <br> **Datatype:** List of timeframes (strings).
|
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base asset feature set. <br> **Datatype:** List of timeframes (strings).
|
||||||
@ -112,15 +113,17 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `DI_threshold` | Activates the Dissimilarity Index for outlier detection when > 0. See details about how it works [here](#removing-outliers-with-the-dissimilarity-index). <br> **Datatype:** Positive float (typically < 1).
|
| `DI_threshold` | Activates the Dissimilarity Index for outlier detection when > 0. See details about how it works [here](#removing-outliers-with-the-dissimilarity-index). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training data set, as well as from incoming data points. See details about how it works [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training data set, as well as from incoming data points. See details about how it works [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](#removing-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
| `use_DBSCAN_to_remove_outliers` | Cluster data using DBSCAN to identify and remove outliers from training and prediction data. See details about how it works [here](#removing-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||||
| `outlier_protection_percentage` | If more than `outlier_protection_percentage` fraction of points are removed as outliers, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact. <br> **Datatype:** float. Default: `30`
|
| `inlier_metric_window` | If set, FreqAI will add the `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`. Details of how the `inlier_metric` is computed can be found [here](#using-the-inliermetric) <br> **Datatype:** int. Default: 0
|
||||||
| `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it. <br> **Datatype:** bool. Default: False
|
| `noise_standard_deviation` | If > 0, FreqAI adds noise to the training features. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. Value should be kept relative to the normalized space between -1 and 1). In other words, since data is always normalized between -1 and 1 in FreqAI, the user can expect a `noise_standard_deviation: 0.05` to see 32% of data randomly increased/decreased by more than 2.5% (i.e. the percent of data falling within the first standard deviation). Good for preventing overfitting. <br> **Datatype:** int. Default: 0
|
||||||
|
| `outlier_protection_percentage` | If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection while keeping the original dataset intact. If the outlier protection is triggered, no predictions will be made based on the training data. <br> **Datatype:** Float. Default: `30`
|
||||||
|
| `reverse_train_test_order` | If true, FreqAI will train on the latest data split and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, users should be careful to understand unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. Default: False
|
||||||
| | **Data split parameters**
|
| | **Data split parameters**
|
||||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||||
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
| `test_size` | Fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||||
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br>
|
| `shuffle` | Shuffle the training data points during training. Typically, for time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean.
|
||||||
| | **Model training parameters**
|
| | **Model training parameters**
|
||||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, such as `PPO` from stable_baselines3, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary
|
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the user selected model library. For example, if the user uses `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If the user selects a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.
|
||||||
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
| `n_estimators` | The number of boosted trees to fit in regression. <br> **Datatype:** Integer.
|
||||||
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
| `learning_rate` | Boosting learning rate during regression. <br> **Datatype:** Float.
|
||||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||||
@ -289,8 +292,10 @@ The FreqAI strategy requires the user to include the following lines of code in
|
|||||||
|
|
||||||
Notice how the `populate_any_indicators()` is where the user adds their own features ([more information](#feature-engineering)) and labels ([more information](#setting-classifier-targets)). See a full example at `templates/FreqaiExampleStrategy.py`.
|
Notice how the `populate_any_indicators()` is where the user adds their own features ([more information](#feature-engineering)) and labels ([more information](#setting-classifier-targets)). See a full example at `templates/FreqaiExampleStrategy.py`.
|
||||||
|
|
||||||
|
*Important*: The `self.freqai.start()` function cannot be called outside the `populate_indicators()`.
|
||||||
|
|
||||||
### Setting the `startup_candle_count`
|
### Setting the `startup_candle_count`
|
||||||
Users need to take care to set the `startup_candle_count` in their strategy the same way they would for any normal Freqtrade strategy (see details [here](strategy-customization.md/#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling on the `dataprovider` to avoid any NaNs at the beginning of the first training. Users can easily set this value by identifying the longest period (in candle units) that they pass to their indicator creation functions (e.g. talib functions). In the present example, the user would pass 20 to as this value (since it is the maximum value in their `indicators_periods_candles`).
|
Users need to take care to set the `startup_candle_count` in their strategy the same way they would for any normal Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling on the `dataprovider` to avoid any NaNs at the beginning of the first training. Users can easily set this value by identifying the longest period (in candle units) that they pass to their indicator creation functions (e.g. talib functions). In the present example, the user would pass 20 to as this value (since it is the maximum value in their `indicators_periods_candles`).
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Typically it is best for users to be safe and multiply their expected `startup_candle_count` by 2. There are instances where the talib functions actually require more data than just the passed `period`. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Look out for this log message to confirm that your data is clean:
|
Typically it is best for users to be safe and multiply their expected `startup_candle_count` by 2. There are instances where the talib functions actually require more data than just the passed `period`. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Look out for this log message to confirm that your data is clean:
|
||||||
@ -525,10 +530,10 @@ and if a full `live_retrain_hours` has elapsed since the end of the loaded model
|
|||||||
The FreqAI backtesting module can be executed with the following command:
|
The FreqAI backtesting module can be executed with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade backtesting --strategy FreqaiExampleStrategy --config config_examples/config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
freqtrade backtesting --strategy FreqaiExampleStrategy --strategy-path freqtrade/templates --config config_examples/config_freqai.example.json --freqaimodel LightGBMRegressor --timerange 20210501-20210701
|
||||||
```
|
```
|
||||||
|
|
||||||
Backtesting mode requires the user to have the data pre-downloaded (unlike in dry/live mode where FreqAI automatically downloads the necessary data). The user should be careful to consider that the time range of the downloaded data is more than the backtesting time range. This is because FreqAI needs data prior to the desired backtesting time range in order to train a model to be ready to make predictions on the first candle of the user-set backtesting time range. More details on how to calculate the data to download can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
Backtesting mode requires the user to have the data [pre-downloaded](#downloading-data-for-backtesting) (unlike in dry/live mode where FreqAI automatically downloads the necessary data). The user should be careful to consider that the time range of the downloaded data is more than the backtesting time range. This is because FreqAI needs data prior to the desired backtesting time range in order to train a model to be ready to make predictions on the first candle of the user-set backtesting time range. More details on how to calculate the data to download can be found [here](#deciding-the-sliding-training-window-and-backtesting-duration).
|
||||||
|
|
||||||
If this command has never been executed with the existing config file, it will train a new model
|
If this command has never been executed with the existing config file, it will train a new model
|
||||||
for each pair, for each backtesting window within the expanded `--timerange`.
|
for each pair, for each backtesting window within the expanded `--timerange`.
|
||||||
@ -542,6 +547,31 @@ for each pair, for each backtesting window within the expanded `--timerange`.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### Hyperopt
|
||||||
|
|
||||||
|
Users can hyperopt using the same command as typical [hyperopt](hyperopt.md):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade hyperopt --hyperopt-loss SharpeHyperOptLoss --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates --config config_examples/config_freqai.example.json --timerange 20220428-20220507
|
||||||
|
```
|
||||||
|
|
||||||
|
Users need to have the data pre-downloaded in the same fashion as if they were doing a FreqAI [backtest](#backtesting). In addition, users must consider some restrictions when trying to [Hyperopt](hyperopt.md) FreqAI strategies:
|
||||||
|
|
||||||
|
- The `--analyze-per-epoch` hyperopt parameter is not compatible with FreqAI.
|
||||||
|
- It's not possible to hyperopt indicators in `populate_any_indicators()` function. This means that the user cannot optimize model parameters using hyperopt. Apart from this exception, it is possible to optimize all other [spaces](hyperopt.md#running-hyperopt-with-smaller-search-space).
|
||||||
|
- The [Backtesting](#backtesting) instructions also apply to Hyperopt.
|
||||||
|
|
||||||
|
The best method for combining hyperopt and FreqAI is to focus on hyperopting entry/exit thresholds/criteria. Users need to focus on hyperopting parameters that are not used in their FreqAI features. For example, users should not try to hyperopt rolling window lengths in their feature creation, or any of their FreqAI config which changes predictions. In order to efficiently hyperopt the FreqAI strategy, FreqAI stores predictions as dataframes and reuses them. Hence the requirement to hyperopt entry/exit thresholds/criteria only.
|
||||||
|
|
||||||
|
A good example of a hyperoptable parameter in FreqAI is a value for `DI_values` beyond which we consider outliers and below which we consider inliers:
|
||||||
|
|
||||||
|
```python
|
||||||
|
di_max = IntParameter(low=1, high=20, default=10, space='buy', optimize=True, load=True)
|
||||||
|
dataframe['outlier'] = np.where(dataframe['DI_values'] > self.di_max.value/10, 1, 0)
|
||||||
|
```
|
||||||
|
|
||||||
|
Which would help the user understand the appropriate Dissimilarity Index values for their particular parameter space.
|
||||||
|
|
||||||
### Deciding the size of the sliding training window and backtesting duration
|
### Deciding the size of the sliding training window and backtesting duration
|
||||||
|
|
||||||
The user defines the backtesting timerange with the typical `--timerange` parameter in the
|
The user defines the backtesting timerange with the typical `--timerange` parameter in the
|
||||||
@ -556,7 +586,7 @@ FreqAI will train have trained 8 separate models at the end of `--timerange` (be
|
|||||||
Although fractional `backtest_period_days` is allowed, the user should be aware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. Because of this, a true backtest of FreqAI adaptive training would take a *very* long time. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
Although fractional `backtest_period_days` is allowed, the user should be aware that the `--timerange` is divided by this value to determine the number of models that FreqAI will need to train in order to backtest the full range. For example, if the user wants to set a `--timerange` of 10 days, and asks for a `backtest_period_days` of 0.1, FreqAI will need to train 100 models per pair to complete the full backtest. Because of this, a true backtest of FreqAI adaptive training would take a *very* long time. The best way to fully test a model is to run it dry and let it constantly train. In this case, backtesting would take the exact same amount of time as a dry run.
|
||||||
|
|
||||||
### Downloading data for backtesting
|
### Downloading data for backtesting
|
||||||
Live/dry instances will download the data automatically for the user, but users who wish to use backtesting functionality still need to download the necessary data using `download-data` (details [here](data-download/#data-downloading)). FreqAI users need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that they have a sufficient amount of training data *before* the start of their backtesting timerange. The amount of additional data can be roughly estimated by taking subtracting `train_period_days` and the `startup_candle_count` ([details](#setting-the-startupcandlecount)) from the beginning of the desired backtesting timerange.
|
Live/dry instances will download the data automatically for the user, but users who wish to use backtesting functionality still need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). FreqAI users need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that they have a sufficient amount of training data *before* the start of their backtesting timerange. The amount of additional data can be roughly estimated by moving the start date of the timerange backwards by `train_period_days` and the `startup_candle_count` ([details](#setting-the-startupcandlecount)) from the beginning of the desired backtesting timerange.
|
||||||
|
|
||||||
As an example, if we wish to backtest the `--timerange` above of `20210501-20210701`, and we use the example config which sets `train_period_days` to 15. The startup candle count is 40 on a maximum `include_timeframes` of 1h. We would need 20210501 - 15 days - 40 * 1h / 24 hours = 20210414 (16.7 days earlier than the start of the desired training timerange).
|
As an example, if we wish to backtest the `--timerange` above of `20210501-20210701`, and we use the example config which sets `train_period_days` to 15. The startup candle count is 40 on a maximum `include_timeframes` of 1h. We would need 20210501 - 15 days - 40 * 1h / 24 hours = 20210414 (16.7 days earlier than the start of the desired training timerange).
|
||||||
|
|
||||||
@ -653,6 +683,18 @@ testing; the other points are used for training.
|
|||||||
|
|
||||||
The test data is used to evaluate the performance of the model after training. If the test score is high, the model is able to capture the behavior of the data well. If the test score is low, either the model either does not capture the complexity of the data, the test data is significantly different from the train data, or a different model should be used.
|
The test data is used to evaluate the performance of the model after training. If the test score is high, the model is able to capture the behavior of the data well. If the test score is low, either the model either does not capture the complexity of the data, the test data is significantly different from the train data, or a different model should be used.
|
||||||
|
|
||||||
|
### Using the `inlier_metric`
|
||||||
|
|
||||||
|
The `inlier_metric` is a metric aimed at quantifying how different a prediction data point is from the most recent historic data points.
|
||||||
|
|
||||||
|
User can set `inlier_metric_window` to set the look back window. FreqAI will compute the distance between the present prediction point and each of the previous data points (total of `inlier_metric_window` points).
|
||||||
|
|
||||||
|
This function goes one step further - during training, it computes the `inlier_metric` for all training data points and builds weibull distributions for each each lookback point. The cumulative distribution function for the weibull distribution is used to produce a quantile for each of the data points. The quantiles for each lookback point are averaged to create the `inlier_metric`.
|
||||||
|
|
||||||
|
FreqAI adds this `inlier_metric` score to the training features! In other words, your model is trained to recognize how this temporal inlier metric is related to the user set labels.
|
||||||
|
|
||||||
|
This function does **not** remove outliers from the data set.
|
||||||
|
|
||||||
### Controlling the model learning process
|
### Controlling the model learning process
|
||||||
|
|
||||||
Model training parameters are unique to the machine learning library selected by the user. FreqAI allows the user to set any parameter for any library using the `model_training_parameters` dictionary in the user configuration file. The example configuration file (found in `config_examples/config_freqai.example.json`) show some of the example parameters associated with `Catboost` and `LightGBM`, but the user can add any parameters available in those libraries.
|
Model training parameters are unique to the machine learning library selected by the user. FreqAI allows the user to set any parameter for any library using the `model_training_parameters` dictionary in the user configuration file. The example configuration file (found in `config_examples/config_freqai.example.json`) show some of the example parameters associated with `Catboost` and `LightGBM`, but the user can add any parameters available in those libraries.
|
||||||
@ -750,93 +792,6 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters
|
|||||||
|
|
||||||
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)) with `min_samples` ($N$) taken as double the no. of user-defined features, and `eps` ($\varepsilon$) taken as the longest distance in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
||||||
|
|
||||||
## Reinforcement Learning
|
|
||||||
|
|
||||||
Setting up and running a Reinforcement Learning model is as quick and simple as running a Regressor. Users can start training and trading live from example files using:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade trade --freqaimodel ReinforcementLearner --strategy ReinforcementLearningExample5ac --strategy-path freqtrade/freqai/example_strats --config config_examples/config_freqai-rl.example.json
|
|
||||||
```
|
|
||||||
|
|
||||||
As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
|
||||||
|
|
||||||
```python
|
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
|
||||||
"""
|
|
||||||
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
|
||||||
Users can override any functions from those parent classes. Here is an example
|
|
||||||
of a user customized `calculate_reward()` function.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def calculate_reward(self, action):
|
|
||||||
|
|
||||||
# first, penalize if the action is not valid
|
|
||||||
if not self._is_valid(action):
|
|
||||||
return -2
|
|
||||||
|
|
||||||
pnl = self.get_unrealized_profit()
|
|
||||||
rew = np.sign(pnl) * (pnl + 1)
|
|
||||||
factor = 100
|
|
||||||
|
|
||||||
# reward agent for entering trades
|
|
||||||
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
|
||||||
and self._position == Positions.Neutral:
|
|
||||||
return 25
|
|
||||||
# discourage agent from not entering trades
|
|
||||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
|
||||||
return -1
|
|
||||||
|
|
||||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
|
||||||
trade_duration = self._current_tick - self._last_trade_tick
|
|
||||||
|
|
||||||
if trade_duration <= max_trade_duration:
|
|
||||||
factor *= 1.5
|
|
||||||
elif trade_duration > max_trade_duration:
|
|
||||||
factor *= 0.5
|
|
||||||
|
|
||||||
# discourage sitting in position
|
|
||||||
if self._position in (Positions.Short, Positions.Long) and \
|
|
||||||
action == Actions.Neutral.value:
|
|
||||||
return -1 * trade_duration / max_trade_duration
|
|
||||||
|
|
||||||
# close long
|
|
||||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
|
||||||
if pnl > self.profit_aim * self.rr:
|
|
||||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
|
||||||
return float(rew * factor)
|
|
||||||
|
|
||||||
# close short
|
|
||||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
|
||||||
if pnl > self.profit_aim * self.rr:
|
|
||||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
|
||||||
return float(rew * factor)
|
|
||||||
|
|
||||||
return 0.
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple (as shown in `example_strats/ReinforcementLearningExample5ac.py`). The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy.
|
|
||||||
|
|
||||||
|
|
||||||
### Using Tensorboard
|
|
||||||
|
|
||||||
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install tensorboard
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, the user can activate Tensorboard with the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd freqtrade
|
|
||||||
tensorboard --logdir user_data/models/unique-id
|
|
||||||
```
|
|
||||||
|
|
||||||
where `unique-id` is the `identifier` set in the `freqai` configuration file.
|
|
||||||
|
|
||||||
![tensorboard](assets/tensorboard.jpg)
|
|
||||||
|
|
||||||
## Additional information
|
## Additional information
|
||||||
|
|
||||||
### Common pitfalls
|
### Common pitfalls
|
||||||
@ -860,5 +815,5 @@ Code review, software architecture brainstorming:
|
|||||||
@xmatthias
|
@xmatthias
|
||||||
|
|
||||||
Beta testing and bug reporting:
|
Beta testing and bug reporting:
|
||||||
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm,
|
||||||
Juha Nykänen @suikula, Wagner Costa @wagnercosta
|
Juha Nykänen @suikula, Wagner Costa @wagnercosta
|
||||||
|
@ -824,6 +824,8 @@ Options:
|
|||||||
- Merge the dataframe without lookahead bias
|
- Merge the dataframe without lookahead bias
|
||||||
- Forward-fill (optional)
|
- Forward-fill (optional)
|
||||||
|
|
||||||
|
For a full sample, please refer to the [complete data provider example](#complete-data-provider-sample) below.
|
||||||
|
|
||||||
All columns of the informative dataframe will be available on the returning dataframe in a renamed fashion:
|
All columns of the informative dataframe will be available on the returning dataframe in a renamed fashion:
|
||||||
|
|
||||||
!!! Example "Column renaming"
|
!!! Example "Column renaming"
|
||||||
|
@ -90,7 +90,8 @@ Example configuration showing the different settings:
|
|||||||
"trailing_stop_loss": "on",
|
"trailing_stop_loss": "on",
|
||||||
"stop_loss": "on",
|
"stop_loss": "on",
|
||||||
"stoploss_on_exchange": "on",
|
"stoploss_on_exchange": "on",
|
||||||
"custom_exit": "silent"
|
"custom_exit": "silent",
|
||||||
|
"partial_exit": "on"
|
||||||
},
|
},
|
||||||
"entry_cancel": "silent",
|
"entry_cancel": "silent",
|
||||||
"exit_cancel": "on",
|
"exit_cancel": "on",
|
||||||
@ -138,7 +139,7 @@ You can create your own keyboard in `config.json`:
|
|||||||
"enabled": true,
|
"enabled": true,
|
||||||
"token": "your_telegram_token",
|
"token": "your_telegram_token",
|
||||||
"chat_id": "your_telegram_chat_id",
|
"chat_id": "your_telegram_chat_id",
|
||||||
"keyboard": [
|
"keyboard": [
|
||||||
["/daily", "/stats", "/balance", "/profit"],
|
["/daily", "/stats", "/balance", "/profit"],
|
||||||
["/status table", "/performance"],
|
["/status table", "/performance"],
|
||||||
["/reload_config", "/count", "/logs"]
|
["/reload_config", "/count", "/logs"]
|
||||||
@ -225,16 +226,16 @@ Once all positions are sold, run `/stop` to completely stop the bot.
|
|||||||
For each open trade, the bot will send you the following message.
|
For each open trade, the bot will send you the following message.
|
||||||
Enter Tag is configurable via Strategy.
|
Enter Tag is configurable via Strategy.
|
||||||
|
|
||||||
> **Trade ID:** `123` `(since 1 days ago)`
|
> **Trade ID:** `123` `(since 1 days ago)`
|
||||||
> **Current Pair:** CVC/BTC
|
> **Current Pair:** CVC/BTC
|
||||||
> **Direction:** Long
|
> **Direction:** Long
|
||||||
> **Leverage:** 1.0
|
> **Leverage:** 1.0
|
||||||
> **Amount:** `26.64180098`
|
> **Amount:** `26.64180098`
|
||||||
> **Enter Tag:** Awesome Long Signal
|
> **Enter Tag:** Awesome Long Signal
|
||||||
> **Open Rate:** `0.00007489`
|
> **Open Rate:** `0.00007489`
|
||||||
> **Current Rate:** `0.00007489`
|
> **Current Rate:** `0.00007489`
|
||||||
> **Current Profit:** `12.95%`
|
> **Current Profit:** `12.95%`
|
||||||
> **Stoploss:** `0.00007389 (-0.02%)`
|
> **Stoploss:** `0.00007389 (-0.02%)`
|
||||||
|
|
||||||
### /status table
|
### /status table
|
||||||
|
|
||||||
@ -261,26 +262,26 @@ current max
|
|||||||
|
|
||||||
Return a summary of your profit/loss and performance.
|
Return a summary of your profit/loss and performance.
|
||||||
|
|
||||||
> **ROI:** Close trades
|
> **ROI:** Close trades
|
||||||
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
|
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
|
||||||
> ∙ `62.968 USD`
|
> ∙ `62.968 USD`
|
||||||
> **ROI:** All trades
|
> **ROI:** All trades
|
||||||
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
|
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
|
||||||
> ∙ `33.095 EUR`
|
> ∙ `33.095 EUR`
|
||||||
>
|
>
|
||||||
> **Total Trade Count:** `138`
|
> **Total Trade Count:** `138`
|
||||||
> **First Trade opened:** `3 days ago`
|
> **First Trade opened:** `3 days ago`
|
||||||
> **Latest Trade opened:** `2 minutes ago`
|
> **Latest Trade opened:** `2 minutes ago`
|
||||||
> **Avg. Duration:** `2:33:45`
|
> **Avg. Duration:** `2:33:45`
|
||||||
> **Best Performing:** `PAY/BTC: 50.23%`
|
> **Best Performing:** `PAY/BTC: 50.23%`
|
||||||
> **Trading volume:** `0.5 BTC`
|
> **Trading volume:** `0.5 BTC`
|
||||||
> **Profit factor:** `1.04`
|
> **Profit factor:** `1.04`
|
||||||
> **Max Drawdown:** `9.23% (0.01255 BTC)`
|
> **Max Drawdown:** `9.23% (0.01255 BTC)`
|
||||||
|
|
||||||
The relative profit of `1.2%` is the average profit per trade.
|
The relative profit of `1.2%` is the average profit per trade.
|
||||||
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
|
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
|
||||||
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
||||||
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
||||||
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
||||||
|
|
||||||
### /forceexit <trade_id>
|
### /forceexit <trade_id>
|
||||||
@ -309,27 +310,27 @@ Note that for this to work, `force_entry_enable` needs to be set to true.
|
|||||||
### /performance
|
### /performance
|
||||||
|
|
||||||
Return the performance of each crypto-currency the bot has sold.
|
Return the performance of each crypto-currency the bot has sold.
|
||||||
> Performance:
|
> Performance:
|
||||||
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
|
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
|
||||||
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
|
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
|
||||||
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
|
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
|
||||||
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
|
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
|
||||||
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
|
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
|
||||||
> ...
|
> ...
|
||||||
|
|
||||||
### /balance
|
### /balance
|
||||||
|
|
||||||
Return the balance of all crypto-currency your have on the exchange.
|
Return the balance of all crypto-currency your have on the exchange.
|
||||||
|
|
||||||
> **Currency:** BTC
|
> **Currency:** BTC
|
||||||
> **Available:** 3.05890234
|
> **Available:** 3.05890234
|
||||||
> **Balance:** 3.05890234
|
> **Balance:** 3.05890234
|
||||||
> **Pending:** 0.0
|
> **Pending:** 0.0
|
||||||
|
|
||||||
> **Currency:** CVC
|
> **Currency:** CVC
|
||||||
> **Available:** 86.64180098
|
> **Available:** 86.64180098
|
||||||
> **Balance:** 86.64180098
|
> **Balance:** 86.64180098
|
||||||
> **Pending:** 0.0
|
> **Pending:** 0.0
|
||||||
|
|
||||||
### /daily <n>
|
### /daily <n>
|
||||||
|
|
||||||
@ -376,7 +377,7 @@ Month (count) Profit BTC Profit USD Profit %
|
|||||||
|
|
||||||
Shows the current whitelist
|
Shows the current whitelist
|
||||||
|
|
||||||
> Using whitelist `StaticPairList` with 22 pairs
|
> Using whitelist `StaticPairList` with 22 pairs
|
||||||
> `IOTA/BTC, NEO/BTC, TRX/BTC, VET/BTC, ADA/BTC, ETC/BTC, NCASH/BTC, DASH/BTC, XRP/BTC, XVG/BTC, EOS/BTC, LTC/BTC, OMG/BTC, BTG/BTC, LSK/BTC, ZEC/BTC, HOT/BTC, IOTX/BTC, XMR/BTC, AST/BTC, XLM/BTC, NANO/BTC`
|
> `IOTA/BTC, NEO/BTC, TRX/BTC, VET/BTC, ADA/BTC, ETC/BTC, NCASH/BTC, DASH/BTC, XRP/BTC, XVG/BTC, EOS/BTC, LTC/BTC, OMG/BTC, BTG/BTC, LSK/BTC, ZEC/BTC, HOT/BTC, IOTX/BTC, XMR/BTC, AST/BTC, XLM/BTC, NANO/BTC`
|
||||||
|
|
||||||
### /blacklist [pair]
|
### /blacklist [pair]
|
||||||
@ -386,7 +387,7 @@ If Pair is set, then this pair will be added to the pairlist.
|
|||||||
Also supports multiple pairs, separated by a space.
|
Also supports multiple pairs, separated by a space.
|
||||||
Use `/reload_config` to reset the blacklist.
|
Use `/reload_config` to reset the blacklist.
|
||||||
|
|
||||||
> Using blacklist `StaticPairList` with 2 pairs
|
> Using blacklist `StaticPairList` with 2 pairs
|
||||||
>`DODGE/BTC`, `HOT/BTC`.
|
>`DODGE/BTC`, `HOT/BTC`.
|
||||||
|
|
||||||
### /edge
|
### /edge
|
||||||
|
@ -455,8 +455,6 @@ AVAILABLE_CLI_OPTIONS = {
|
|||||||
'-t', '--timeframes',
|
'-t', '--timeframes',
|
||||||
help='Specify which tickers to download. Space-separated list. '
|
help='Specify which tickers to download. Space-separated list. '
|
||||||
'Default: `1m 5m`.',
|
'Default: `1m 5m`.',
|
||||||
choices=['1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h',
|
|
||||||
'6h', '8h', '12h', '1d', '3d', '1w', '2w', '1M', '1y'],
|
|
||||||
default=['1m', '5m'],
|
default=['1m', '5m'],
|
||||||
nargs='+',
|
nargs='+',
|
||||||
),
|
),
|
||||||
|
@ -4,7 +4,7 @@ from typing import Any, Dict
|
|||||||
from sqlalchemy import func
|
from sqlalchemy import func
|
||||||
|
|
||||||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||||
from freqtrade.enums.runmode import RunMode
|
from freqtrade.enums import RunMode
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -84,6 +84,7 @@ def validate_config_consistency(conf: Dict[str, Any], preliminary: bool = False)
|
|||||||
_validate_protections(conf)
|
_validate_protections(conf)
|
||||||
_validate_unlimited_amount(conf)
|
_validate_unlimited_amount(conf)
|
||||||
_validate_ask_orderbook(conf)
|
_validate_ask_orderbook(conf)
|
||||||
|
_validate_freqai_hyperopt(conf)
|
||||||
validate_migrated_strategy_settings(conf)
|
validate_migrated_strategy_settings(conf)
|
||||||
|
|
||||||
# validate configuration before returning
|
# validate configuration before returning
|
||||||
@ -323,6 +324,14 @@ def _validate_pricing_rules(conf: Dict[str, Any]) -> None:
|
|||||||
del conf['ask_strategy']
|
del conf['ask_strategy']
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_freqai_hyperopt(conf: Dict[str, Any]) -> None:
|
||||||
|
freqai_enabled = conf.get('freqai', {}).get('enabled', False)
|
||||||
|
analyze_per_epoch = conf.get('analyze_per_epoch', False)
|
||||||
|
if analyze_per_epoch and freqai_enabled:
|
||||||
|
raise OperationalException(
|
||||||
|
'Using analyze-per-epoch parameter is not supported with a FreqAI strategy.')
|
||||||
|
|
||||||
|
|
||||||
def _strategy_settings(conf: Dict[str, Any]) -> None:
|
def _strategy_settings(conf: Dict[str, Any]) -> None:
|
||||||
|
|
||||||
process_deprecated_setting(conf, None, 'use_sell_signal', None, 'use_exit_signal')
|
process_deprecated_setting(conf, None, 'use_sell_signal', None, 'use_exit_signal')
|
||||||
|
@ -228,9 +228,9 @@ def _download_pair_history(pair: str, *,
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Current Start: %s",
|
logger.debug("Current Start: %s",
|
||||||
f"{data.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
|
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
|
||||||
logger.debug("Current End: %s",
|
logger.debug("Current End: %s",
|
||||||
f"{data.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
|
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
|
||||||
|
|
||||||
# Default since_ms to 30 days if nothing is given
|
# Default since_ms to 30 days if nothing is given
|
||||||
new_data = exchange.get_historic_ohlcv(pair=pair,
|
new_data = exchange.get_historic_ohlcv(pair=pair,
|
||||||
@ -254,9 +254,9 @@ def _download_pair_history(pair: str, *,
|
|||||||
fill_missing=False, drop_incomplete=False)
|
fill_missing=False, drop_incomplete=False)
|
||||||
|
|
||||||
logger.debug("New Start: %s",
|
logger.debug("New Start: %s",
|
||||||
f"{data.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
|
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
|
||||||
logger.debug("New End: %s",
|
logger.debug("New End: %s",
|
||||||
f"{data.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}" if not data.empty else 'None')
|
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
|
||||||
|
|
||||||
data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type)
|
data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type)
|
||||||
return True
|
return True
|
||||||
|
@ -205,7 +205,7 @@ class Exchange:
|
|||||||
logger.debug("Exchange object destroyed, closing async loop")
|
logger.debug("Exchange object destroyed, closing async loop")
|
||||||
if (self._api_async and inspect.iscoroutinefunction(self._api_async.close)
|
if (self._api_async and inspect.iscoroutinefunction(self._api_async.close)
|
||||||
and self._api_async.session):
|
and self._api_async.session):
|
||||||
logger.info("Closing async ccxt session.")
|
logger.debug("Closing async ccxt session.")
|
||||||
self.loop.run_until_complete(self._api_async.close())
|
self.loop.run_until_complete(self._api_async.close())
|
||||||
|
|
||||||
def validate_config(self, config):
|
def validate_config(self, config):
|
||||||
@ -446,6 +446,15 @@ class Exchange:
|
|||||||
contract_size = self.get_contract_size(pair)
|
contract_size = self.get_contract_size(pair)
|
||||||
return contracts_to_amount(num_contracts, contract_size)
|
return contracts_to_amount(num_contracts, contract_size)
|
||||||
|
|
||||||
|
def amount_to_contract_precision(self, pair: str, amount: float) -> float:
|
||||||
|
"""
|
||||||
|
Helper wrapper around amount_to_contract_precision
|
||||||
|
"""
|
||||||
|
contract_size = self.get_contract_size(pair)
|
||||||
|
|
||||||
|
return amount_to_contract_precision(amount, self.get_precision_amount(pair),
|
||||||
|
self.precisionMode, contract_size)
|
||||||
|
|
||||||
def set_sandbox(self, api: ccxt.Exchange, exchange_config: dict, name: str) -> None:
|
def set_sandbox(self, api: ccxt.Exchange, exchange_config: dict, name: str) -> None:
|
||||||
if exchange_config.get('sandbox'):
|
if exchange_config.get('sandbox'):
|
||||||
if api.urls.get('test'):
|
if api.urls.get('test'):
|
||||||
@ -2500,8 +2509,13 @@ class Exchange:
|
|||||||
cache=False,
|
cache=False,
|
||||||
drop_incomplete=False,
|
drop_incomplete=False,
|
||||||
)
|
)
|
||||||
funding_rates = candle_histories[funding_comb]
|
try:
|
||||||
mark_rates = candle_histories[mark_comb]
|
# we can't assume we always get histories - for example during exchange downtimes
|
||||||
|
funding_rates = candle_histories[funding_comb]
|
||||||
|
mark_rates = candle_histories[mark_comb]
|
||||||
|
except KeyError:
|
||||||
|
raise ExchangeError("Could not find funding rates.") from None
|
||||||
|
|
||||||
funding_mark_rates = self.combine_funding_and_mark(
|
funding_mark_rates = self.combine_funding_and_mark(
|
||||||
funding_rates=funding_rates, mark_rates=mark_rates)
|
funding_rates=funding_rates, mark_rates=mark_rates)
|
||||||
|
|
||||||
@ -2581,6 +2595,8 @@ class Exchange:
|
|||||||
:param is_short: trade direction
|
:param is_short: trade direction
|
||||||
:param amount: Trade amount
|
:param amount: Trade amount
|
||||||
:param open_date: Open date of the trade
|
:param open_date: Open date of the trade
|
||||||
|
:return: funding fee since open_date
|
||||||
|
:raies: ExchangeError if something goes wrong.
|
||||||
"""
|
"""
|
||||||
if self.trading_mode == TradingMode.FUTURES:
|
if self.trading_mode == TradingMode.FUTURES:
|
||||||
if self._config['dry_run']:
|
if self._config['dry_run']:
|
||||||
|
@ -4,8 +4,7 @@ from typing import Dict, List, Optional, Tuple
|
|||||||
import ccxt
|
import ccxt
|
||||||
|
|
||||||
from freqtrade.constants import BuySell
|
from freqtrade.constants import BuySell
|
||||||
from freqtrade.enums import MarginMode, TradingMode
|
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
||||||
from freqtrade.enums.candletype import CandleType
|
|
||||||
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
|
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
|
||||||
from freqtrade.exchange import Exchange, date_minus_candles
|
from freqtrade.exchange import Exchange, date_minus_candles
|
||||||
from freqtrade.exchange.common import retrier
|
from freqtrade.exchange.common import retrier
|
||||||
|
@ -21,12 +21,12 @@ class BaseClassifierModel(IFreqaiModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""
|
"""
|
||||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||||
for storing, saving, loading, and analyzing the data.
|
for storing, saving, loading, and analyzing the data.
|
||||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
:param metadata: pair metadata from strategy.
|
:param metadata: pair metadata from strategy.
|
||||||
:return:
|
:return:
|
||||||
:model: Trained model which can be used to inference (self.predict)
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
@ -36,14 +36,14 @@ class BaseClassifierModel(IFreqaiModel):
|
|||||||
|
|
||||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||||
features_filtered, labels_filtered = dk.filter_features(
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
unfiltered_dataframe,
|
unfiltered_df,
|
||||||
dk.training_features_list,
|
dk.training_features_list,
|
||||||
dk.label_list,
|
dk.label_list,
|
||||||
training_filter=True,
|
training_filter=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d")
|
||||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||||
f"{end_date}--------------------")
|
f"{end_date}--------------------")
|
||||||
# split data into train/test data.
|
# split data into train/test data.
|
||||||
@ -61,32 +61,32 @@ class BaseClassifierModel(IFreqaiModel):
|
|||||||
f' features and {len(data_dictionary["train_features"])} data points'
|
f' features and {len(data_dictionary["train_features"])} data points'
|
||||||
)
|
)
|
||||||
|
|
||||||
model = self.fit(data_dictionary)
|
model = self.fit(data_dictionary, dk)
|
||||||
|
|
||||||
logger.info(f"--------------------done training {pair}--------------------")
|
logger.info(f"--------------------done training {pair}--------------------")
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def predict(
|
def predict(
|
||||||
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
"""
|
"""
|
||||||
Filter the prediction features data and predict with it.
|
Filter the prediction features data and predict with it.
|
||||||
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
:param: unfiltered_df: Full dataframe for the current backtest period.
|
||||||
:return:
|
:return:
|
||||||
:pred_df: dataframe containing the predictions
|
:pred_df: dataframe containing the predictions
|
||||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
dk.find_features(unfiltered_dataframe)
|
dk.find_features(unfiltered_df)
|
||||||
filtered_dataframe, _ = dk.filter_features(
|
filtered_df, _ = dk.filter_features(
|
||||||
unfiltered_dataframe, dk.training_features_list, training_filter=False
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
)
|
)
|
||||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
||||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
dk.data_dictionary["prediction_features"] = filtered_df
|
||||||
|
|
||||||
self.data_cleaning_predict(dk, filtered_dataframe)
|
self.data_cleaning_predict(dk, filtered_df)
|
||||||
|
|
||||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
pred_df = DataFrame(predictions, columns=dk.label_list)
|
@ -20,12 +20,12 @@ class BaseRegressionModel(IFreqaiModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""
|
"""
|
||||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||||
for storing, saving, loading, and analyzing the data.
|
for storing, saving, loading, and analyzing the data.
|
||||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
:param metadata: pair metadata from strategy.
|
:param metadata: pair metadata from strategy.
|
||||||
:return:
|
:return:
|
||||||
:model: Trained model which can be used to inference (self.predict)
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
@ -35,14 +35,14 @@ class BaseRegressionModel(IFreqaiModel):
|
|||||||
|
|
||||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||||
features_filtered, labels_filtered = dk.filter_features(
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
unfiltered_dataframe,
|
unfiltered_df,
|
||||||
dk.training_features_list,
|
dk.training_features_list,
|
||||||
dk.label_list,
|
dk.label_list,
|
||||||
training_filter=True,
|
training_filter=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d")
|
||||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||||
f"{end_date}--------------------")
|
f"{end_date}--------------------")
|
||||||
# split data into train/test data.
|
# split data into train/test data.
|
||||||
@ -60,33 +60,33 @@ class BaseRegressionModel(IFreqaiModel):
|
|||||||
f' features and {len(data_dictionary["train_features"])} data points'
|
f' features and {len(data_dictionary["train_features"])} data points'
|
||||||
)
|
)
|
||||||
|
|
||||||
model = self.fit(data_dictionary)
|
model = self.fit(data_dictionary, dk)
|
||||||
|
|
||||||
logger.info(f"--------------------done training {pair}--------------------")
|
logger.info(f"--------------------done training {pair}--------------------")
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def predict(
|
def predict(
|
||||||
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
"""
|
"""
|
||||||
Filter the prediction features data and predict with it.
|
Filter the prediction features data and predict with it.
|
||||||
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
:param: unfiltered_df: Full dataframe for the current backtest period.
|
||||||
:return:
|
:return:
|
||||||
:pred_df: dataframe containing the predictions
|
:pred_df: dataframe containing the predictions
|
||||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
dk.find_features(unfiltered_dataframe)
|
dk.find_features(unfiltered_df)
|
||||||
filtered_dataframe, _ = dk.filter_features(
|
filtered_df, _ = dk.filter_features(
|
||||||
unfiltered_dataframe, dk.training_features_list, training_filter=False
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
)
|
)
|
||||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
||||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
dk.data_dictionary["prediction_features"] = filtered_df
|
||||||
|
|
||||||
# optional additional data cleaning/analysis
|
# optional additional data cleaning/analysis
|
||||||
self.data_cleaning_predict(dk, filtered_dataframe)
|
self.data_cleaning_predict(dk, filtered_df)
|
||||||
|
|
||||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
pred_df = DataFrame(predictions, columns=dk.label_list)
|
@ -17,12 +17,12 @@ class BaseTensorFlowModel(IFreqaiModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""
|
"""
|
||||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||||
for storing, saving, loading, and analyzing the data.
|
for storing, saving, loading, and analyzing the data.
|
||||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
:param metadata: pair metadata from strategy.
|
:param metadata: pair metadata from strategy.
|
||||||
:return:
|
:return:
|
||||||
:model: Trained model which can be used to inference (self.predict)
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
@ -32,14 +32,14 @@ class BaseTensorFlowModel(IFreqaiModel):
|
|||||||
|
|
||||||
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
# filter the features requested by user in the configuration file and elegantly handle NaNs
|
||||||
features_filtered, labels_filtered = dk.filter_features(
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
unfiltered_dataframe,
|
unfiltered_df,
|
||||||
dk.training_features_list,
|
dk.training_features_list,
|
||||||
dk.label_list,
|
dk.label_list,
|
||||||
training_filter=True,
|
training_filter=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
|
start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d")
|
||||||
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
|
end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d")
|
||||||
logger.info(f"-------------------- Training on data from {start_date} to "
|
logger.info(f"-------------------- Training on data from {start_date} to "
|
||||||
f"{end_date}--------------------")
|
f"{end_date}--------------------")
|
||||||
# split data into train/test data.
|
# split data into train/test data.
|
||||||
@ -57,7 +57,7 @@ class BaseTensorFlowModel(IFreqaiModel):
|
|||||||
f' features and {len(data_dictionary["train_features"])} data points'
|
f' features and {len(data_dictionary["train_features"])} data points'
|
||||||
)
|
)
|
||||||
|
|
||||||
model = self.fit(data_dictionary)
|
model = self.fit(data_dictionary, dk)
|
||||||
|
|
||||||
logger.info(f"--------------------done training {pair}--------------------")
|
logger.info(f"--------------------done training {pair}--------------------")
|
||||||
|
|
65
freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py
Normal file
65
freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
|
||||||
|
from joblib import Parallel
|
||||||
|
from sklearn.multioutput import MultiOutputRegressor, _fit_estimator
|
||||||
|
from sklearn.utils.fixes import delayed
|
||||||
|
from sklearn.utils.validation import has_fit_parameter
|
||||||
|
|
||||||
|
|
||||||
|
class FreqaiMultiOutputRegressor(MultiOutputRegressor):
|
||||||
|
|
||||||
|
def fit(self, X, y, sample_weight=None, fit_params=None):
|
||||||
|
"""Fit the model to data, separately for each output variable.
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
||||||
|
The input data.
|
||||||
|
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
|
||||||
|
Multi-output targets. An indicator matrix turns on multilabel
|
||||||
|
estimation.
|
||||||
|
sample_weight : array-like of shape (n_samples,), default=None
|
||||||
|
Sample weights. If `None`, then samples are equally weighted.
|
||||||
|
Only supported if the underlying regressor supports sample
|
||||||
|
weights.
|
||||||
|
fit_params : A list of dicts for the fit_params
|
||||||
|
Parameters passed to the ``estimator.fit`` method of each step.
|
||||||
|
Each dict may contain same or different values (e.g. different
|
||||||
|
eval_sets or init_models)
|
||||||
|
.. versionadded:: 0.23
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
self : object
|
||||||
|
Returns a fitted instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not hasattr(self.estimator, "fit"):
|
||||||
|
raise ValueError("The base estimator should implement a fit method")
|
||||||
|
|
||||||
|
y = self._validate_data(X="no_validation", y=y, multi_output=True)
|
||||||
|
|
||||||
|
if y.ndim == 1:
|
||||||
|
raise ValueError(
|
||||||
|
"y must have at least two dimensions for "
|
||||||
|
"multi-output regression but has only one."
|
||||||
|
)
|
||||||
|
|
||||||
|
if sample_weight is not None and not has_fit_parameter(
|
||||||
|
self.estimator, "sample_weight"
|
||||||
|
):
|
||||||
|
raise ValueError("Underlying estimator does not support sample weights.")
|
||||||
|
|
||||||
|
if not fit_params:
|
||||||
|
fit_params = [None] * y.shape[1]
|
||||||
|
|
||||||
|
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
|
||||||
|
delayed(_fit_estimator)(
|
||||||
|
self.estimator, X, y[:, i], sample_weight, **fit_params[i]
|
||||||
|
)
|
||||||
|
for i in range(y.shape[1])
|
||||||
|
)
|
||||||
|
|
||||||
|
if hasattr(self.estimators_[0], "n_features_in_"):
|
||||||
|
self.n_features_in_ = self.estimators_[0].n_features_in_
|
||||||
|
if hasattr(self.estimators_[0], "feature_names_in_"):
|
||||||
|
self.feature_names_in_ = self.estimators_[0].feature_names_in_
|
||||||
|
|
||||||
|
return
|
@ -76,6 +76,8 @@ class FreqaiDataDrawer:
|
|||||||
self.full_path / f"follower_dictionary-{self.follower_name}.json"
|
self.full_path / f"follower_dictionary-{self.follower_name}.json"
|
||||||
)
|
)
|
||||||
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
||||||
|
self.historic_predictions_bkp_path = Path(
|
||||||
|
self.full_path / "historic_predictions.backup.pkl")
|
||||||
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
||||||
self.follow_mode = follow_mode
|
self.follow_mode = follow_mode
|
||||||
if follow_mode:
|
if follow_mode:
|
||||||
@ -119,13 +121,21 @@ class FreqaiDataDrawer:
|
|||||||
"""
|
"""
|
||||||
exists = self.historic_predictions_path.is_file()
|
exists = self.historic_predictions_path.is_file()
|
||||||
if exists:
|
if exists:
|
||||||
with open(self.historic_predictions_path, "rb") as fp:
|
try:
|
||||||
self.historic_predictions = cloudpickle.load(fp)
|
with open(self.historic_predictions_path, "rb") as fp:
|
||||||
logger.info(
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
f"Found existing historic predictions at {self.full_path}, but beware "
|
logger.info(
|
||||||
"that statistics may be inaccurate if the bot has been offline for "
|
f"Found existing historic predictions at {self.full_path}, but beware "
|
||||||
"an extended period of time."
|
"that statistics may be inaccurate if the bot has been offline for "
|
||||||
)
|
"an extended period of time."
|
||||||
|
)
|
||||||
|
except EOFError:
|
||||||
|
logger.warning(
|
||||||
|
'Historical prediction file was corrupted. Trying to load backup file.')
|
||||||
|
with open(self.historic_predictions_bkp_path, "rb") as fp:
|
||||||
|
self.historic_predictions = cloudpickle.load(fp)
|
||||||
|
logger.warning('FreqAI successfully loaded the backup historical predictions file.')
|
||||||
|
|
||||||
elif not self.follow_mode:
|
elif not self.follow_mode:
|
||||||
logger.info("Could not find existing historic_predictions, starting from scratch")
|
logger.info("Could not find existing historic_predictions, starting from scratch")
|
||||||
else:
|
else:
|
||||||
@ -143,6 +153,9 @@ class FreqaiDataDrawer:
|
|||||||
with open(self.historic_predictions_path, "wb") as fp:
|
with open(self.historic_predictions_path, "wb") as fp:
|
||||||
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
||||||
|
|
||||||
|
# create a backup
|
||||||
|
shutil.copy(self.historic_predictions_path, self.historic_predictions_bkp_path)
|
||||||
|
|
||||||
def save_drawer_to_disk(self):
|
def save_drawer_to_disk(self):
|
||||||
"""
|
"""
|
||||||
Save data drawer full of all pair model metadata in present model folder.
|
Save data drawer full of all pair model metadata in present model folder.
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import copy
|
import copy
|
||||||
import datetime
|
|
||||||
import logging
|
import logging
|
||||||
import shutil
|
import shutil
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from math import cos, sin
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Tuple
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
@ -9,6 +10,7 @@ import numpy as np
|
|||||||
import numpy.typing as npt
|
import numpy.typing as npt
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
from scipy import stats
|
||||||
from sklearn import linear_model
|
from sklearn import linear_model
|
||||||
from sklearn.cluster import DBSCAN
|
from sklearn.cluster import DBSCAN
|
||||||
from sklearn.metrics.pairwise import pairwise_distances
|
from sklearn.metrics.pairwise import pairwise_distances
|
||||||
@ -182,7 +184,7 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
def filter_features(
|
def filter_features(
|
||||||
self,
|
self,
|
||||||
unfiltered_dataframe: DataFrame,
|
unfiltered_df: DataFrame,
|
||||||
training_feature_list: List,
|
training_feature_list: List,
|
||||||
label_list: List = list(),
|
label_list: List = list(),
|
||||||
training_filter: bool = True,
|
training_filter: bool = True,
|
||||||
@ -193,31 +195,35 @@ class FreqaiDataKitchen:
|
|||||||
0s in the prediction dataset. However, prediction dataset do_predict will reflect any
|
0s in the prediction dataset. However, prediction dataset do_predict will reflect any
|
||||||
row that had a NaN and will shield user from that prediction.
|
row that had a NaN and will shield user from that prediction.
|
||||||
:params:
|
:params:
|
||||||
:unfiltered_dataframe: the full dataframe for the present training period
|
:unfiltered_df: the full dataframe for the present training period
|
||||||
:training_feature_list: list, the training feature list constructed by
|
:training_feature_list: list, the training feature list constructed by
|
||||||
self.build_feature_list() according to user specified parameters in the configuration file.
|
self.build_feature_list() according to user specified parameters in the configuration file.
|
||||||
:labels: the labels for the dataset
|
:labels: the labels for the dataset
|
||||||
:training_filter: boolean which lets the function know if it is training data or
|
:training_filter: boolean which lets the function know if it is training data or
|
||||||
prediction data to be filtered.
|
prediction data to be filtered.
|
||||||
:returns:
|
:returns:
|
||||||
:filtered_dataframe: dataframe cleaned of NaNs and only containing the user
|
:filtered_df: dataframe cleaned of NaNs and only containing the user
|
||||||
requested feature set.
|
requested feature set.
|
||||||
:labels: labels cleaned of NaNs.
|
:labels: labels cleaned of NaNs.
|
||||||
"""
|
"""
|
||||||
filtered_dataframe = unfiltered_dataframe.filter(training_feature_list, axis=1)
|
filtered_df = unfiltered_df.filter(training_feature_list, axis=1)
|
||||||
filtered_dataframe = filtered_dataframe.replace([np.inf, -np.inf], np.nan)
|
filtered_df = filtered_df.replace([np.inf, -np.inf], np.nan)
|
||||||
|
|
||||||
drop_index = pd.isnull(filtered_dataframe).any(1) # get the rows that have NaNs,
|
drop_index = pd.isnull(filtered_df).any(1) # get the rows that have NaNs,
|
||||||
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
|
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
|
||||||
if (training_filter):
|
if (training_filter):
|
||||||
|
const_cols = list((filtered_df.nunique() == 1).loc[lambda x: x].index)
|
||||||
|
if const_cols:
|
||||||
|
filtered_df = filtered_df.filter(filtered_df.columns.difference(const_cols))
|
||||||
|
logger.warning(f"Removed features {const_cols} with constant values.")
|
||||||
# we don't care about total row number (total no. datapoints) in training, we only care
|
# we don't care about total row number (total no. datapoints) in training, we only care
|
||||||
# about removing any row with NaNs
|
# about removing any row with NaNs
|
||||||
# if labels has multiple columns (user wants to train multiple modelEs), we detect here
|
# if labels has multiple columns (user wants to train multiple modelEs), we detect here
|
||||||
labels = unfiltered_dataframe.filter(label_list, axis=1)
|
labels = unfiltered_df.filter(label_list, axis=1)
|
||||||
drop_index_labels = pd.isnull(labels).any(1)
|
drop_index_labels = pd.isnull(labels).any(1)
|
||||||
drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)
|
drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)
|
||||||
dates = unfiltered_dataframe['date']
|
dates = unfiltered_df['date']
|
||||||
filtered_dataframe = filtered_dataframe[
|
filtered_df = filtered_df[
|
||||||
(drop_index == 0) & (drop_index_labels == 0)
|
(drop_index == 0) & (drop_index_labels == 0)
|
||||||
] # dropping values
|
] # dropping values
|
||||||
labels = labels[
|
labels = labels[
|
||||||
@ -227,13 +233,13 @@ class FreqaiDataKitchen:
|
|||||||
(drop_index == 0) & (drop_index_labels == 0)
|
(drop_index == 0) & (drop_index_labels == 0)
|
||||||
]
|
]
|
||||||
logger.info(
|
logger.info(
|
||||||
f"dropped {len(unfiltered_dataframe) - len(filtered_dataframe)} training points"
|
f"dropped {len(unfiltered_df) - len(filtered_df)} training points"
|
||||||
f" due to NaNs in populated dataset {len(unfiltered_dataframe)}."
|
f" due to NaNs in populated dataset {len(unfiltered_df)}."
|
||||||
)
|
)
|
||||||
if (1 - len(filtered_dataframe) / len(unfiltered_dataframe)) > 0.1 and self.live:
|
if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live:
|
||||||
worst_indicator = str(unfiltered_dataframe.count().idxmin())
|
worst_indicator = str(unfiltered_df.count().idxmin())
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f" {(1 - len(filtered_dataframe)/len(unfiltered_dataframe)) * 100:.0f} percent "
|
f" {(1 - len(filtered_df)/len(unfiltered_df)) * 100:.0f} percent "
|
||||||
" of training data dropped due to NaNs, model may perform inconsistent "
|
" of training data dropped due to NaNs, model may perform inconsistent "
|
||||||
f"with expectations. Verify {worst_indicator}"
|
f"with expectations. Verify {worst_indicator}"
|
||||||
)
|
)
|
||||||
@ -242,9 +248,9 @@ class FreqaiDataKitchen:
|
|||||||
else:
|
else:
|
||||||
# we are backtesting so we need to preserve row number to send back to strategy,
|
# we are backtesting so we need to preserve row number to send back to strategy,
|
||||||
# so now we use do_predict to avoid any prediction based on a NaN
|
# so now we use do_predict to avoid any prediction based on a NaN
|
||||||
drop_index = pd.isnull(filtered_dataframe).any(1)
|
drop_index = pd.isnull(filtered_df).any(1)
|
||||||
self.data["filter_drop_index_prediction"] = drop_index
|
self.data["filter_drop_index_prediction"] = drop_index
|
||||||
filtered_dataframe.fillna(0, inplace=True)
|
filtered_df.fillna(0, inplace=True)
|
||||||
# replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction
|
# replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction
|
||||||
# that was based on a single NaN is ultimately protected from buys with do_predict
|
# that was based on a single NaN is ultimately protected from buys with do_predict
|
||||||
drop_index = ~drop_index
|
drop_index = ~drop_index
|
||||||
@ -253,11 +259,11 @@ class FreqaiDataKitchen:
|
|||||||
logger.info(
|
logger.info(
|
||||||
"dropped %s of %s prediction data points due to NaNs.",
|
"dropped %s of %s prediction data points due to NaNs.",
|
||||||
len(self.do_predict) - self.do_predict.sum(),
|
len(self.do_predict) - self.do_predict.sum(),
|
||||||
len(filtered_dataframe),
|
len(filtered_df),
|
||||||
)
|
)
|
||||||
labels = []
|
labels = []
|
||||||
|
|
||||||
return filtered_dataframe, labels
|
return filtered_df, labels
|
||||||
|
|
||||||
def build_data_dictionary(
|
def build_data_dictionary(
|
||||||
self,
|
self,
|
||||||
@ -360,7 +366,7 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
|
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
Normalize a set of data using the mean and standard deviation from
|
Denormalize a set of data using the mean and standard deviation from
|
||||||
the associated training data.
|
the associated training data.
|
||||||
:param df: Dataframe of predictions to be denormalized
|
:param df: Dataframe of predictions to be denormalized
|
||||||
"""
|
"""
|
||||||
@ -399,7 +405,7 @@ class FreqaiDataKitchen:
|
|||||||
config_timerange = TimeRange.parse_timerange(self.config["timerange"])
|
config_timerange = TimeRange.parse_timerange(self.config["timerange"])
|
||||||
if config_timerange.stopts == 0:
|
if config_timerange.stopts == 0:
|
||||||
config_timerange.stopts = int(
|
config_timerange.stopts = int(
|
||||||
datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
datetime.now(tz=timezone.utc).timestamp()
|
||||||
)
|
)
|
||||||
timerange_train = copy.deepcopy(full_timerange)
|
timerange_train = copy.deepcopy(full_timerange)
|
||||||
timerange_backtest = copy.deepcopy(full_timerange)
|
timerange_backtest = copy.deepcopy(full_timerange)
|
||||||
@ -416,8 +422,8 @@ class FreqaiDataKitchen:
|
|||||||
timerange_train.stopts = timerange_train.startts + train_period_days
|
timerange_train.stopts = timerange_train.startts + train_period_days
|
||||||
|
|
||||||
first = False
|
first = False
|
||||||
start = datetime.datetime.utcfromtimestamp(timerange_train.startts)
|
start = datetime.fromtimestamp(timerange_train.startts, tz=timezone.utc)
|
||||||
stop = datetime.datetime.utcfromtimestamp(timerange_train.stopts)
|
stop = datetime.fromtimestamp(timerange_train.stopts, tz=timezone.utc)
|
||||||
tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
||||||
tr_training_list_timerange.append(copy.deepcopy(timerange_train))
|
tr_training_list_timerange.append(copy.deepcopy(timerange_train))
|
||||||
|
|
||||||
@ -430,8 +436,8 @@ class FreqaiDataKitchen:
|
|||||||
if timerange_backtest.stopts > config_timerange.stopts:
|
if timerange_backtest.stopts > config_timerange.stopts:
|
||||||
timerange_backtest.stopts = config_timerange.stopts
|
timerange_backtest.stopts = config_timerange.stopts
|
||||||
|
|
||||||
start = datetime.datetime.utcfromtimestamp(timerange_backtest.startts)
|
start = datetime.fromtimestamp(timerange_backtest.startts, tz=timezone.utc)
|
||||||
stop = datetime.datetime.utcfromtimestamp(timerange_backtest.stopts)
|
stop = datetime.fromtimestamp(timerange_backtest.stopts, tz=timezone.utc)
|
||||||
tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
||||||
tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest))
|
tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest))
|
||||||
|
|
||||||
@ -451,13 +457,35 @@ class FreqaiDataKitchen:
|
|||||||
it is sliced down to just the present training period.
|
it is sliced down to just the present training period.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
start = datetime.datetime.fromtimestamp(timerange.startts, tz=datetime.timezone.utc)
|
start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
||||||
stop = datetime.datetime.fromtimestamp(timerange.stopts, tz=datetime.timezone.utc)
|
stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
||||||
df = df.loc[df["date"] >= start, :]
|
df = df.loc[df["date"] >= start, :]
|
||||||
df = df.loc[df["date"] <= stop, :]
|
if not self.live:
|
||||||
|
df = df.loc[df["date"] < stop, :]
|
||||||
|
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
def remove_training_from_backtesting(
|
||||||
|
self
|
||||||
|
) -> DataFrame:
|
||||||
|
"""
|
||||||
|
Function which takes the backtesting time range and
|
||||||
|
remove training data from dataframe, keeping only the
|
||||||
|
startup_candle_count candles
|
||||||
|
"""
|
||||||
|
startup_candle_count = self.config.get('startup_candle_count', 0)
|
||||||
|
tf = self.config['timeframe']
|
||||||
|
tr = self.config["timerange"]
|
||||||
|
|
||||||
|
backtesting_timerange = TimeRange.parse_timerange(tr)
|
||||||
|
if startup_candle_count > 0 and backtesting_timerange:
|
||||||
|
backtesting_timerange.subtract_start(timeframe_to_seconds(tf) * startup_candle_count)
|
||||||
|
|
||||||
|
start = datetime.fromtimestamp(backtesting_timerange.startts, tz=timezone.utc)
|
||||||
|
df = self.return_dataframe
|
||||||
|
df = df.loc[df["date"] >= start, :]
|
||||||
|
return df
|
||||||
|
|
||||||
def principal_component_analysis(self) -> None:
|
def principal_component_analysis(self) -> None:
|
||||||
"""
|
"""
|
||||||
Performs Principal Component Analysis on the data for dimensionality reduction
|
Performs Principal Component Analysis on the data for dimensionality reduction
|
||||||
@ -652,8 +680,6 @@ class FreqaiDataKitchen:
|
|||||||
is an outlier.
|
is an outlier.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from math import cos, sin
|
|
||||||
|
|
||||||
if predict:
|
if predict:
|
||||||
if not self.data['DBSCAN_eps']:
|
if not self.data['DBSCAN_eps']:
|
||||||
return
|
return
|
||||||
@ -746,6 +772,111 @@ class FreqaiDataKitchen:
|
|||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def compute_inlier_metric(self, set_='train') -> None:
|
||||||
|
"""
|
||||||
|
|
||||||
|
Compute inlier metric from backwards distance distributions.
|
||||||
|
This metric defines how well features from a timepoint fit
|
||||||
|
into previous timepoints.
|
||||||
|
"""
|
||||||
|
|
||||||
|
no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"]
|
||||||
|
|
||||||
|
if set_ == 'train':
|
||||||
|
compute_df = copy.deepcopy(self.data_dictionary['train_features'])
|
||||||
|
elif set_ == 'test':
|
||||||
|
compute_df = copy.deepcopy(self.data_dictionary['test_features'])
|
||||||
|
else:
|
||||||
|
compute_df = copy.deepcopy(self.data_dictionary['prediction_features'])
|
||||||
|
|
||||||
|
compute_df_reindexed = compute_df.reindex(
|
||||||
|
index=np.flip(compute_df.index)
|
||||||
|
)
|
||||||
|
|
||||||
|
pairwise = pd.DataFrame(
|
||||||
|
np.triu(
|
||||||
|
pairwise_distances(compute_df_reindexed, n_jobs=self.thread_count)
|
||||||
|
),
|
||||||
|
columns=compute_df_reindexed.index,
|
||||||
|
index=compute_df_reindexed.index
|
||||||
|
)
|
||||||
|
pairwise = pairwise.round(5)
|
||||||
|
|
||||||
|
column_labels = [
|
||||||
|
'{}{}'.format('d', i) for i in range(1, no_prev_pts + 1)
|
||||||
|
]
|
||||||
|
distances = pd.DataFrame(
|
||||||
|
columns=column_labels, index=compute_df.index
|
||||||
|
)
|
||||||
|
|
||||||
|
for index in compute_df.index[no_prev_pts:]:
|
||||||
|
current_row = pairwise.loc[[index]]
|
||||||
|
current_row_no_zeros = current_row.loc[
|
||||||
|
:, (current_row != 0).any(axis=0)
|
||||||
|
]
|
||||||
|
distances.loc[[index]] = current_row_no_zeros.iloc[
|
||||||
|
:, :no_prev_pts
|
||||||
|
]
|
||||||
|
distances = distances.replace([np.inf, -np.inf], np.nan)
|
||||||
|
drop_index = pd.isnull(distances).any(1)
|
||||||
|
distances = distances[drop_index == 0]
|
||||||
|
|
||||||
|
inliers = pd.DataFrame(index=distances.index)
|
||||||
|
for key in distances.keys():
|
||||||
|
current_distances = distances[key].dropna()
|
||||||
|
fit_params = stats.weibull_min.fit(current_distances)
|
||||||
|
quantiles = stats.weibull_min.cdf(current_distances, *fit_params)
|
||||||
|
|
||||||
|
df_inlier = pd.DataFrame(
|
||||||
|
{key: quantiles}, index=distances.index
|
||||||
|
)
|
||||||
|
inliers = pd.concat(
|
||||||
|
[inliers, df_inlier], axis=1
|
||||||
|
)
|
||||||
|
|
||||||
|
inlier_metric = pd.DataFrame(
|
||||||
|
data=inliers.sum(axis=1) / no_prev_pts,
|
||||||
|
columns=['inlier_metric'],
|
||||||
|
index=compute_df.index
|
||||||
|
)
|
||||||
|
|
||||||
|
inlier_metric = (2 * (inlier_metric - inlier_metric.min()) /
|
||||||
|
(inlier_metric.max() - inlier_metric.min()) - 1)
|
||||||
|
|
||||||
|
if set_ in ('train', 'test'):
|
||||||
|
inlier_metric = inlier_metric.iloc[no_prev_pts:]
|
||||||
|
compute_df = compute_df.iloc[no_prev_pts:]
|
||||||
|
self.remove_beginning_points_from_data_dict(set_, no_prev_pts)
|
||||||
|
self.data_dictionary[f'{set_}_features'] = pd.concat(
|
||||||
|
[compute_df, inlier_metric], axis=1)
|
||||||
|
else:
|
||||||
|
self.data_dictionary['prediction_features'] = pd.concat(
|
||||||
|
[compute_df, inlier_metric], axis=1)
|
||||||
|
self.data_dictionary['prediction_features'].fillna(0, inplace=True)
|
||||||
|
|
||||||
|
logger.info('Inlier metric computed and added to features.')
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10):
|
||||||
|
features = self.data_dictionary[f'{set_}_features']
|
||||||
|
weights = self.data_dictionary[f'{set_}_weights']
|
||||||
|
labels = self.data_dictionary[f'{set_}_labels']
|
||||||
|
self.data_dictionary[f'{set_}_weights'] = weights[no_prev_pts:]
|
||||||
|
self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:]
|
||||||
|
self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:]
|
||||||
|
|
||||||
|
def add_noise_to_training_features(self) -> None:
|
||||||
|
"""
|
||||||
|
Add noise to train features to reduce the risk of overfitting.
|
||||||
|
"""
|
||||||
|
mu = 0 # no shift
|
||||||
|
sigma = self.freqai_config["feature_parameters"]["noise_standard_deviation"]
|
||||||
|
compute_df = self.data_dictionary['train_features']
|
||||||
|
noise = np.random.normal(mu, sigma, [compute_df.shape[0], compute_df.shape[1]])
|
||||||
|
self.data_dictionary['train_features'] += noise
|
||||||
|
return
|
||||||
|
|
||||||
def find_features(self, dataframe: DataFrame) -> None:
|
def find_features(self, dataframe: DataFrame) -> None:
|
||||||
"""
|
"""
|
||||||
Find features in the strategy provided dataframe
|
Find features in the strategy provided dataframe
|
||||||
@ -848,6 +979,7 @@ class FreqaiDataKitchen:
|
|||||||
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
||||||
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
||||||
|
|
||||||
|
self.return_dataframe = self.remove_training_from_backtesting()
|
||||||
self.full_df = DataFrame()
|
self.full_df = DataFrame()
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -871,14 +1003,14 @@ class FreqaiDataKitchen:
|
|||||||
"Please indicate the end date of your desired backtesting. "
|
"Please indicate the end date of your desired backtesting. "
|
||||||
"timerange.")
|
"timerange.")
|
||||||
# backtest_timerange.stopts = int(
|
# backtest_timerange.stopts = int(
|
||||||
# datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
# datetime.now(tz=timezone.utc).timestamp()
|
||||||
# )
|
# )
|
||||||
|
|
||||||
backtest_timerange.startts = (
|
backtest_timerange.startts = (
|
||||||
backtest_timerange.startts - backtest_period_days * SECONDS_IN_DAY
|
backtest_timerange.startts - backtest_period_days * SECONDS_IN_DAY
|
||||||
)
|
)
|
||||||
start = datetime.datetime.utcfromtimestamp(backtest_timerange.startts)
|
start = datetime.fromtimestamp(backtest_timerange.startts, tz=timezone.utc)
|
||||||
stop = datetime.datetime.utcfromtimestamp(backtest_timerange.stopts)
|
stop = datetime.fromtimestamp(backtest_timerange.stopts, tz=timezone.utc)
|
||||||
full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")
|
full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")
|
||||||
|
|
||||||
self.full_path = Path(
|
self.full_path = Path(
|
||||||
@ -904,7 +1036,7 @@ class FreqaiDataKitchen:
|
|||||||
:return:
|
:return:
|
||||||
bool = If the model is expired or not.
|
bool = If the model is expired or not.
|
||||||
"""
|
"""
|
||||||
time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
time = datetime.now(tz=timezone.utc).timestamp()
|
||||||
elapsed_time = (time - trained_timestamp) / 3600 # hours
|
elapsed_time = (time - trained_timestamp) / 3600 # hours
|
||||||
max_time = self.freqai_config.get("expiration_hours", 0)
|
max_time = self.freqai_config.get("expiration_hours", 0)
|
||||||
if max_time > 0:
|
if max_time > 0:
|
||||||
@ -916,7 +1048,7 @@ class FreqaiDataKitchen:
|
|||||||
self, trained_timestamp: int
|
self, trained_timestamp: int
|
||||||
) -> Tuple[bool, TimeRange, TimeRange]:
|
) -> Tuple[bool, TimeRange, TimeRange]:
|
||||||
|
|
||||||
time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
time = datetime.now(tz=timezone.utc).timestamp()
|
||||||
trained_timerange = TimeRange()
|
trained_timerange = TimeRange()
|
||||||
data_load_timerange = TimeRange()
|
data_load_timerange = TimeRange()
|
||||||
|
|
||||||
@ -1094,7 +1226,6 @@ class FreqaiDataKitchen:
|
|||||||
def save_backtesting_prediction(
|
def save_backtesting_prediction(
|
||||||
self, append_df: DataFrame
|
self, append_df: DataFrame
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Save prediction dataframe from backtesting to h5 file format
|
Save prediction dataframe from backtesting to h5 file format
|
||||||
:param append_df: dataframe for backtesting period
|
:param append_df: dataframe for backtesting period
|
||||||
@ -1108,7 +1239,6 @@ class FreqaiDataKitchen:
|
|||||||
def get_backtesting_prediction(
|
def get_backtesting_prediction(
|
||||||
self
|
self
|
||||||
) -> DataFrame:
|
) -> DataFrame:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Get prediction dataframe from h5 file format
|
Get prediction dataframe from h5 file format
|
||||||
"""
|
"""
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
# import contextlib
|
|
||||||
import datetime
|
|
||||||
import logging
|
import logging
|
||||||
import shutil
|
import shutil
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
from typing import Any, Dict, Optional, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@ -15,6 +14,7 @@ from numpy.typing import NDArray
|
|||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
|
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||||
from freqtrade.enums import RunMode
|
from freqtrade.enums import RunMode
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_seconds
|
||||||
@ -27,13 +27,6 @@ pd.options.mode.chained_assignment = None
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def threaded(fn):
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class IFreqaiModel(ABC):
|
class IFreqaiModel(ABC):
|
||||||
"""
|
"""
|
||||||
Class containing all tools for training and prediction in the strategy.
|
Class containing all tools for training and prediction in the strategy.
|
||||||
@ -66,7 +59,6 @@ class IFreqaiModel(ABC):
|
|||||||
"data_split_parameters", {})
|
"data_split_parameters", {})
|
||||||
self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
||||||
"model_training_parameters", {})
|
"model_training_parameters", {})
|
||||||
self.feature_parameters = config.get("freqai", {}).get("feature_parameters")
|
|
||||||
self.retrain = False
|
self.retrain = False
|
||||||
self.first = True
|
self.first = True
|
||||||
self.set_full_path()
|
self.set_full_path()
|
||||||
@ -77,11 +69,14 @@ class IFreqaiModel(ABC):
|
|||||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||||
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
||||||
self.scanning = False
|
self.scanning = False
|
||||||
|
self.ft_params = self.freqai_info["feature_parameters"]
|
||||||
self.keras: bool = self.freqai_info.get("keras", False)
|
self.keras: bool = self.freqai_info.get("keras", False)
|
||||||
if self.keras and self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0):
|
if self.keras and self.ft_params.get("DI_threshold", 0):
|
||||||
self.freqai_info["feature_parameters"]["DI_threshold"] = 0
|
self.ft_params["DI_threshold"] = 0
|
||||||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||||
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
||||||
|
if self.ft_params.get("inlier_metric_window", 0):
|
||||||
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||||
self.pair_it = 0
|
self.pair_it = 0
|
||||||
self.pair_it_train = 0
|
self.pair_it_train = 0
|
||||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||||
@ -93,6 +88,16 @@ class IFreqaiModel(ABC):
|
|||||||
self.begin_time: float = 0
|
self.begin_time: float = 0
|
||||||
self.begin_time_train: float = 0
|
self.begin_time_train: float = 0
|
||||||
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
||||||
|
self.continual_learning = self.freqai_info.get('continual_learning', False)
|
||||||
|
|
||||||
|
self._threads: List[threading.Thread] = []
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
"""
|
||||||
|
Return an empty state to be pickled in hyperopt
|
||||||
|
"""
|
||||||
|
return ({})
|
||||||
self.strategy: Optional[IStrategy] = None
|
self.strategy: Optional[IStrategy] = None
|
||||||
|
|
||||||
def assert_config(self, config: Dict[str, Any]) -> None:
|
def assert_config(self, config: Dict[str, Any]) -> None:
|
||||||
@ -148,15 +153,34 @@ class IFreqaiModel(ABC):
|
|||||||
self.model = None
|
self.model = None
|
||||||
self.dk = None
|
self.dk = None
|
||||||
|
|
||||||
@threaded
|
def shutdown(self):
|
||||||
def start_scanning(self, strategy: IStrategy) -> None:
|
"""
|
||||||
|
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
||||||
|
for current training iteration.
|
||||||
|
"""
|
||||||
|
logger.info("Stopping FreqAI")
|
||||||
|
self._stop_event.set()
|
||||||
|
|
||||||
|
logger.info("Waiting on Training iteration")
|
||||||
|
for _thread in self._threads:
|
||||||
|
_thread.join()
|
||||||
|
|
||||||
|
def start_scanning(self, *args, **kwargs) -> None:
|
||||||
|
"""
|
||||||
|
Start `self._start_scanning` in a separate thread
|
||||||
|
"""
|
||||||
|
_thread = threading.Thread(target=self._start_scanning, args=args, kwargs=kwargs)
|
||||||
|
self._threads.append(_thread)
|
||||||
|
_thread.start()
|
||||||
|
|
||||||
|
def _start_scanning(self, strategy: IStrategy) -> None:
|
||||||
"""
|
"""
|
||||||
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
||||||
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
||||||
it simply trains on what ever data is available in the self.dd.
|
it simply trains on what ever data is available in the self.dd.
|
||||||
:param strategy: IStrategy = The user defined strategy class
|
:param strategy: IStrategy = The user defined strategy class
|
||||||
"""
|
"""
|
||||||
while 1:
|
while not self._stop_event.is_set():
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
|
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
|
||||||
|
|
||||||
@ -175,7 +199,7 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
if retrain:
|
if retrain:
|
||||||
self.train_timer('start')
|
self.train_timer('start')
|
||||||
self.train_model_in_series(
|
self.extract_data_and_train_model(
|
||||||
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
||||||
)
|
)
|
||||||
self.train_timer('stop')
|
self.train_timer('stop')
|
||||||
@ -215,12 +239,12 @@ class IFreqaiModel(ABC):
|
|||||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||||
|
|
||||||
trained_timestamp = tr_train
|
trained_timestamp = tr_train
|
||||||
tr_train_startts_str = datetime.datetime.utcfromtimestamp(tr_train.startts).strftime(
|
tr_train_startts_str = datetime.fromtimestamp(
|
||||||
"%Y-%m-%d %H:%M:%S"
|
tr_train.startts,
|
||||||
)
|
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||||
tr_train_stopts_str = datetime.datetime.utcfromtimestamp(tr_train.stopts).strftime(
|
tr_train_stopts_str = datetime.fromtimestamp(
|
||||||
"%Y-%m-%d %H:%M:%S"
|
tr_train.stopts,
|
||||||
)
|
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Training {metadata['pair']}, {self.pair_it}/{self.total_pairs} pairs"
|
f"Training {metadata['pair']}, {self.pair_it}/{self.total_pairs} pairs"
|
||||||
f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} "
|
f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} "
|
||||||
@ -405,24 +429,30 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
||||||
"""
|
"""
|
||||||
Base data cleaning method for train
|
Base data cleaning method for train.
|
||||||
Any function inside this method should drop training data points from the filtered_dataframe
|
Functions here improve/modify the input data by identifying outliers,
|
||||||
based on user decided logic. See FreqaiDataKitchen::use_SVM_to_remove_outliers() for an
|
computing additional metrics, adding noise, reducing dimensionality etc.
|
||||||
example of how outlier data points are dropped from the dataframe used for training.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get(
|
ft_params = self.freqai_info["feature_parameters"]
|
||||||
|
|
||||||
|
if ft_params.get('inlier_metric_window', 0):
|
||||||
|
dk.compute_inlier_metric(set_='train')
|
||||||
|
if self.freqai_info["data_split_parameters"]["test_size"] > 0:
|
||||||
|
dk.compute_inlier_metric(set_='test')
|
||||||
|
|
||||||
|
if ft_params.get(
|
||||||
"principal_component_analysis", False
|
"principal_component_analysis", False
|
||||||
):
|
):
|
||||||
dk.principal_component_analysis()
|
dk.principal_component_analysis()
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
if ft_params.get("use_SVM_to_remove_outliers", False):
|
||||||
dk.use_SVM_to_remove_outliers(predict=False)
|
dk.use_SVM_to_remove_outliers(predict=False)
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
if ft_params.get("DI_threshold", 0):
|
||||||
dk.data["avg_mean_dist"] = dk.compute_distances()
|
dk.data["avg_mean_dist"] = dk.compute_distances()
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
|
||||||
if dk.pair in self.dd.old_DBSCAN_eps:
|
if dk.pair in self.dd.old_DBSCAN_eps:
|
||||||
eps = self.dd.old_DBSCAN_eps[dk.pair]
|
eps = self.dd.old_DBSCAN_eps[dk.pair]
|
||||||
else:
|
else:
|
||||||
@ -430,29 +460,31 @@ class IFreqaiModel(ABC):
|
|||||||
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
|
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
|
||||||
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
|
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
|
||||||
|
|
||||||
|
if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0):
|
||||||
|
dk.add_noise_to_training_features()
|
||||||
|
|
||||||
def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None:
|
def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None:
|
||||||
"""
|
"""
|
||||||
Base data cleaning method for predict.
|
Base data cleaning method for predict.
|
||||||
These functions each modify dk.do_predict, which is a dataframe with equal length
|
Functions here are complementary to the functions of data_cleaning_train.
|
||||||
to the number of candles coming from and returning to the strategy. Inside do_predict,
|
|
||||||
1 allows prediction and < 0 signals to the strategy that the model is not confident in
|
|
||||||
the prediction.
|
|
||||||
See FreqaiDataKitchen::remove_outliers() for an example
|
|
||||||
of how the do_predict vector is modified. do_predict is ultimately passed back to strategy
|
|
||||||
for buy signals.
|
|
||||||
"""
|
"""
|
||||||
if self.freqai_info["feature_parameters"].get(
|
ft_params = self.freqai_info["feature_parameters"]
|
||||||
|
|
||||||
|
if ft_params.get('inlier_metric_window', 0):
|
||||||
|
dk.compute_inlier_metric(set_='predict')
|
||||||
|
|
||||||
|
if ft_params.get(
|
||||||
"principal_component_analysis", False
|
"principal_component_analysis", False
|
||||||
):
|
):
|
||||||
dk.pca_transform(dataframe)
|
dk.pca_transform(self.dk.data_dictionary['prediction_features'])
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
|
if ft_params.get("use_SVM_to_remove_outliers", False):
|
||||||
dk.use_SVM_to_remove_outliers(predict=True)
|
dk.use_SVM_to_remove_outliers(predict=True)
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
|
if ft_params.get("DI_threshold", 0):
|
||||||
dk.check_if_pred_in_training_spaces()
|
dk.check_if_pred_in_training_spaces()
|
||||||
|
|
||||||
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
|
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
|
||||||
dk.use_DBSCAN_to_remove_outliers(predict=True)
|
dk.use_DBSCAN_to_remove_outliers(predict=True)
|
||||||
|
|
||||||
def model_exists(
|
def model_exists(
|
||||||
@ -488,7 +520,7 @@ class IFreqaiModel(ABC):
|
|||||||
Path(self.full_path, Path(self.config["config_files"][0]).name),
|
Path(self.full_path, Path(self.config["config_files"][0]).name),
|
||||||
)
|
)
|
||||||
|
|
||||||
def train_model_in_series(
|
def extract_data_and_train_model(
|
||||||
self,
|
self,
|
||||||
new_trained_timerange: TimeRange,
|
new_trained_timerange: TimeRange,
|
||||||
pair: str,
|
pair: str,
|
||||||
@ -580,7 +612,7 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
# # for keras type models, the conv_window needs to be prepended so
|
# # for keras type models, the conv_window needs to be prepended so
|
||||||
# # viewing is correct in frequi
|
# # viewing is correct in frequi
|
||||||
if self.freqai_info.get('keras', False):
|
if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0):
|
||||||
n_lost_points = self.freqai_info.get('conv_width', 2)
|
n_lost_points = self.freqai_info.get('conv_width', 2)
|
||||||
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
||||||
columns=hist_preds_df.columns)
|
columns=hist_preds_df.columns)
|
||||||
@ -646,21 +678,30 @@ class IFreqaiModel(ABC):
|
|||||||
self.train_time = 0
|
self.train_time = 0
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def get_init_model(self, pair: str) -> Any:
|
||||||
|
if pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
|
init_model = None
|
||||||
|
else:
|
||||||
|
init_model = self.dd.model_dictionary[pair]
|
||||||
|
|
||||||
|
return init_model
|
||||||
|
|
||||||
# Following methods which are overridden by user made prediction models.
|
# Following methods which are overridden by user made prediction models.
|
||||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def train(self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen) -> Any:
|
def train(self, unfiltered_df: DataFrame, pair: str,
|
||||||
|
dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
Filter the training data and train a model to it. Train makes heavy use of the datahandler
|
Filter the training data and train a model to it. Train makes heavy use of the datahandler
|
||||||
for storing, saving, loading, and analyzing the data.
|
for storing, saving, loading, and analyzing the data.
|
||||||
:param unfiltered_dataframe: Full dataframe for the current training period
|
:param unfiltered_df: Full dataframe for the current training period
|
||||||
:param metadata: pair metadata from strategy.
|
:param metadata: pair metadata from strategy.
|
||||||
:return: Trained model which can be used to inference (self.predict)
|
:return: Trained model which can be used to inference (self.predict)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any:
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
Most regressors use the same function names and arguments e.g. user
|
Most regressors use the same function names and arguments e.g. user
|
||||||
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
||||||
@ -673,11 +714,11 @@ class IFreqaiModel(ABC):
|
|||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def predict(
|
def predict(
|
||||||
self, dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = True
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
) -> Tuple[DataFrame, NDArray[np.int_]]:
|
) -> Tuple[DataFrame, NDArray[np.int_]]:
|
||||||
"""
|
"""
|
||||||
Filter the prediction features data and predict with it.
|
Filter the prediction features data and predict with it.
|
||||||
:param unfiltered_dataframe: Full dataframe for the current backtest period.
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||||
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||||
:param first: boolean = whether this is the first prediction or not.
|
:param first: boolean = whether this is the first prediction or not.
|
||||||
:return:
|
:return:
|
||||||
|
@ -3,7 +3,8 @@ from typing import Any, Dict
|
|||||||
|
|
||||||
from catboost import CatBoostClassifier, Pool
|
from catboost import CatBoostClassifier, Pool
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseClassifierModel import BaseClassifierModel
|
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -16,7 +17,7 @@ class CatboostClassifier(BaseClassifierModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:params:
|
:params:
|
||||||
@ -36,6 +37,8 @@ class CatboostClassifier(BaseClassifierModel):
|
|||||||
**self.model_training_parameters,
|
**self.model_training_parameters,
|
||||||
)
|
)
|
||||||
|
|
||||||
cbr.fit(train_data)
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
|
cbr.fit(train_data, init_model=init_model)
|
||||||
|
|
||||||
return cbr
|
return cbr
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
import gc
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from catboost import CatBoostRegressor, Pool
|
from catboost import CatBoostRegressor, Pool
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -17,7 +17,7 @@ class CatboostRegressor(BaseRegressionModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict[str, Any], pair: str = '') -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
@ -38,16 +38,13 @@ class CatboostRegressor(BaseRegressionModel):
|
|||||||
weight=data_dictionary["test_weights"],
|
weight=data_dictionary["test_weights"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
model = CatBoostRegressor(
|
model = CatBoostRegressor(
|
||||||
allow_writing_files=False,
|
allow_writing_files=False,
|
||||||
**self.model_training_parameters,
|
**self.model_training_parameters,
|
||||||
)
|
)
|
||||||
|
|
||||||
model.fit(X=train_data, eval_set=test_data)
|
model.fit(X=train_data, eval_set=test_data, init_model=init_model)
|
||||||
|
|
||||||
# some evidence that catboost pools have memory leaks:
|
|
||||||
# https://github.com/catboost/catboost/issues/1835
|
|
||||||
del train_data, test_data
|
|
||||||
gc.collect()
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from catboost import CatBoostRegressor # , Pool
|
from catboost import CatBoostRegressor, Pool
|
||||||
from sklearn.multioutput import MultiOutputRegressor
|
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.base_models.FreqaiMultiOutputRegressor import FreqaiMultiOutputRegressor
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -17,7 +18,7 @@ class CatboostRegressorMultiTarget(BaseRegressionModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
@ -31,14 +32,37 @@ class CatboostRegressorMultiTarget(BaseRegressionModel):
|
|||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
y = data_dictionary["train_labels"]
|
y = data_dictionary["train_labels"]
|
||||||
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
|
|
||||||
sample_weight = data_dictionary["train_weights"]
|
sample_weight = data_dictionary["train_weights"]
|
||||||
|
|
||||||
model = MultiOutputRegressor(estimator=cbr)
|
eval_sets = [None] * y.shape[1]
|
||||||
model.fit(X=X, y=y, sample_weight=sample_weight) # , eval_set=eval_set)
|
|
||||||
|
|
||||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||||
train_score = model.score(X, y)
|
eval_sets = [None] * data_dictionary['test_labels'].shape[1]
|
||||||
test_score = model.score(*eval_set)
|
|
||||||
logger.info(f"Train score {train_score}, Test score {test_score}")
|
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||||
|
eval_sets[i] = Pool(
|
||||||
|
data=data_dictionary["test_features"],
|
||||||
|
label=data_dictionary["test_labels"].iloc[:, i],
|
||||||
|
weight=data_dictionary["test_weights"],
|
||||||
|
)
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
|
if init_model:
|
||||||
|
init_models = init_model.estimators_
|
||||||
|
else:
|
||||||
|
init_models = [None] * y.shape[1]
|
||||||
|
|
||||||
|
fit_params = []
|
||||||
|
for i in range(len(eval_sets)):
|
||||||
|
fit_params.append(
|
||||||
|
{'eval_set': eval_sets[i], 'init_model': init_models[i]})
|
||||||
|
|
||||||
|
model = FreqaiMultiOutputRegressor(estimator=cbr)
|
||||||
|
thread_training = self.freqai_info.get('multitarget_parallel_training', False)
|
||||||
|
if thread_training:
|
||||||
|
model.n_jobs = y.shape[1]
|
||||||
|
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -3,7 +3,8 @@ from typing import Any, Dict
|
|||||||
|
|
||||||
from lightgbm import LGBMClassifier
|
from lightgbm import LGBMClassifier
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseClassifierModel import BaseClassifierModel
|
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -16,7 +17,7 @@ class LightGBMClassifier(BaseClassifierModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:params:
|
:params:
|
||||||
@ -35,9 +36,11 @@ class LightGBMClassifier(BaseClassifierModel):
|
|||||||
y = data_dictionary["train_labels"].to_numpy()[:, 0]
|
y = data_dictionary["train_labels"].to_numpy()[:, 0]
|
||||||
train_weights = data_dictionary["train_weights"]
|
train_weights = data_dictionary["train_weights"]
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
model = LGBMClassifier(**self.model_training_parameters)
|
model = LGBMClassifier(**self.model_training_parameters)
|
||||||
|
|
||||||
model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights,
|
model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights,
|
||||||
eval_sample_weight=[test_weights])
|
eval_sample_weight=[test_weights], init_model=init_model)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -3,7 +3,8 @@ from typing import Any, Dict
|
|||||||
|
|
||||||
from lightgbm import LGBMRegressor
|
from lightgbm import LGBMRegressor
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -16,7 +17,7 @@ class LightGBMRegressor(BaseRegressionModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
Most regressors use the same function names and arguments e.g. user
|
Most regressors use the same function names and arguments e.g. user
|
||||||
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
||||||
@ -35,9 +36,11 @@ class LightGBMRegressor(BaseRegressionModel):
|
|||||||
y = data_dictionary["train_labels"]
|
y = data_dictionary["train_labels"]
|
||||||
train_weights = data_dictionary["train_weights"]
|
train_weights = data_dictionary["train_weights"]
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
model = LGBMRegressor(**self.model_training_parameters)
|
model = LGBMRegressor(**self.model_training_parameters)
|
||||||
|
|
||||||
model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights,
|
model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights,
|
||||||
eval_sample_weight=[eval_weights])
|
eval_sample_weight=[eval_weights], init_model=init_model)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
@ -2,9 +2,10 @@ import logging
|
|||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from lightgbm import LGBMRegressor
|
from lightgbm import LGBMRegressor
|
||||||
from sklearn.multioutput import MultiOutputRegressor
|
|
||||||
|
|
||||||
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.base_models.FreqaiMultiOutputRegressor import FreqaiMultiOutputRegressor
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -17,7 +18,7 @@ class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
|||||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict) -> Any:
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
User sets up the training and test data to fit their desired model here
|
User sets up the training and test data to fit their desired model here
|
||||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
@ -28,12 +29,36 @@ class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
|||||||
|
|
||||||
X = data_dictionary["train_features"]
|
X = data_dictionary["train_features"]
|
||||||
y = data_dictionary["train_labels"]
|
y = data_dictionary["train_labels"]
|
||||||
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
|
|
||||||
sample_weight = data_dictionary["train_weights"]
|
sample_weight = data_dictionary["train_weights"]
|
||||||
|
|
||||||
model = MultiOutputRegressor(estimator=lgb)
|
eval_weights = None
|
||||||
model.fit(X=X, y=y, sample_weight=sample_weight) # , eval_set=eval_set)
|
eval_sets = [None] * y.shape[1]
|
||||||
train_score = model.score(X, y)
|
|
||||||
test_score = model.score(*eval_set)
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||||
logger.info(f"Train score {train_score}, Test score {test_score}")
|
eval_weights = [data_dictionary["test_weights"]]
|
||||||
|
eval_sets = [(None, None)] * data_dictionary['test_labels'].shape[1] # type: ignore
|
||||||
|
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||||
|
eval_sets[i] = ( # type: ignore
|
||||||
|
data_dictionary["test_features"],
|
||||||
|
data_dictionary["test_labels"].iloc[:, i]
|
||||||
|
)
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
if init_model:
|
||||||
|
init_models = init_model.estimators_
|
||||||
|
else:
|
||||||
|
init_models = [None] * y.shape[1]
|
||||||
|
|
||||||
|
fit_params = []
|
||||||
|
for i in range(len(eval_sets)):
|
||||||
|
fit_params.append(
|
||||||
|
{'eval_set': eval_sets[i], 'eval_sample_weight': eval_weights,
|
||||||
|
'init_model': init_models[i]})
|
||||||
|
|
||||||
|
model = FreqaiMultiOutputRegressor(estimator=lgb)
|
||||||
|
thread_training = self.freqai_info.get('multitarget_parallel_training', False)
|
||||||
|
if thread_training:
|
||||||
|
model.n_jobs = y.shape[1]
|
||||||
|
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
45
freqtrade/freqai/prediction_models/XGBoostRegressor.py
Normal file
45
freqtrade/freqai/prediction_models/XGBoostRegressor.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from xgboost import XGBRegressor
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class XGBoostRegressor(BaseRegressionModel):
|
||||||
|
"""
|
||||||
|
User created prediction model. The class needs to override three necessary
|
||||||
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
|
"""
|
||||||
|
|
||||||
|
X = data_dictionary["train_features"]
|
||||||
|
y = data_dictionary["train_labels"]
|
||||||
|
|
||||||
|
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0:
|
||||||
|
eval_set = None
|
||||||
|
else:
|
||||||
|
eval_set = [(data_dictionary["test_features"], data_dictionary["test_labels"])]
|
||||||
|
eval_weights = [data_dictionary['test_weights']]
|
||||||
|
|
||||||
|
sample_weight = data_dictionary["train_weights"]
|
||||||
|
|
||||||
|
xgb_model = self.get_init_model(dk.pair)
|
||||||
|
|
||||||
|
model = XGBRegressor(**self.model_training_parameters)
|
||||||
|
|
||||||
|
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
|
||||||
|
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
|
||||||
|
|
||||||
|
return model
|
@ -0,0 +1,63 @@
|
|||||||
|
import logging
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from xgboost import XGBRegressor
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
|
from freqtrade.freqai.base_models.FreqaiMultiOutputRegressor import FreqaiMultiOutputRegressor
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class XGBoostRegressorMultiTarget(BaseRegressionModel):
|
||||||
|
"""
|
||||||
|
User created prediction model. The class needs to override three necessary
|
||||||
|
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||||
|
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||||
|
all the training and test data/labels.
|
||||||
|
"""
|
||||||
|
|
||||||
|
xgb = XGBRegressor(**self.model_training_parameters)
|
||||||
|
|
||||||
|
X = data_dictionary["train_features"]
|
||||||
|
y = data_dictionary["train_labels"]
|
||||||
|
sample_weight = data_dictionary["train_weights"]
|
||||||
|
|
||||||
|
eval_weights = None
|
||||||
|
eval_sets = [None] * y.shape[1]
|
||||||
|
|
||||||
|
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||||
|
eval_weights = [data_dictionary["test_weights"]]
|
||||||
|
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||||
|
eval_sets[i] = [( # type: ignore
|
||||||
|
data_dictionary["test_features"],
|
||||||
|
data_dictionary["test_labels"].iloc[:, i]
|
||||||
|
)]
|
||||||
|
|
||||||
|
init_model = self.get_init_model(dk.pair)
|
||||||
|
if init_model:
|
||||||
|
init_models = init_model.estimators_
|
||||||
|
else:
|
||||||
|
init_models = [None] * y.shape[1]
|
||||||
|
|
||||||
|
fit_params = []
|
||||||
|
for i in range(len(eval_sets)):
|
||||||
|
fit_params.append(
|
||||||
|
{'eval_set': eval_sets[i], 'sample_weight_eval_set': eval_weights,
|
||||||
|
'xgb_model': init_models[i]})
|
||||||
|
|
||||||
|
model = FreqaiMultiOutputRegressor(estimator=xgb)
|
||||||
|
thread_training = self.freqai_info.get('multitarget_parallel_training', False)
|
||||||
|
if thread_training:
|
||||||
|
model.n_jobs = y.shape[1]
|
||||||
|
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||||
|
|
||||||
|
return model
|
@ -142,15 +142,20 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
logger.info('Cleaning up modules ...')
|
logger.info('Cleaning up modules ...')
|
||||||
|
try:
|
||||||
|
# Wrap db activities in shutdown to avoid problems if database is gone,
|
||||||
|
# and raises further exceptions.
|
||||||
|
if self.config['cancel_open_orders_on_exit']:
|
||||||
|
self.cancel_all_open_orders()
|
||||||
|
|
||||||
if self.config['cancel_open_orders_on_exit']:
|
self.check_for_open_trades()
|
||||||
self.cancel_all_open_orders()
|
|
||||||
|
|
||||||
self.check_for_open_trades()
|
finally:
|
||||||
|
self.strategy.ft_bot_cleanup()
|
||||||
|
|
||||||
self.rpc.cleanup()
|
self.rpc.cleanup()
|
||||||
Trade.commit()
|
Trade.commit()
|
||||||
self.exchange.close()
|
self.exchange.close()
|
||||||
|
|
||||||
def startup(self) -> None:
|
def startup(self) -> None:
|
||||||
"""
|
"""
|
||||||
@ -276,16 +281,17 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
def update_funding_fees(self):
|
def update_funding_fees(self):
|
||||||
if self.trading_mode == TradingMode.FUTURES:
|
if self.trading_mode == TradingMode.FUTURES:
|
||||||
trades = Trade.get_open_trades()
|
trades = Trade.get_open_trades()
|
||||||
for trade in trades:
|
try:
|
||||||
funding_fees = self.exchange.get_funding_fees(
|
for trade in trades:
|
||||||
pair=trade.pair,
|
funding_fees = self.exchange.get_funding_fees(
|
||||||
amount=trade.amount,
|
pair=trade.pair,
|
||||||
is_short=trade.is_short,
|
amount=trade.amount,
|
||||||
open_date=trade.open_date_utc
|
is_short=trade.is_short,
|
||||||
)
|
open_date=trade.date_last_filled_utc
|
||||||
trade.funding_fees = funding_fees
|
)
|
||||||
else:
|
trade.funding_fees = funding_fees
|
||||||
return 0.0
|
except ExchangeError:
|
||||||
|
logger.warning("Could not update funding fees for open trades.")
|
||||||
|
|
||||||
def startup_backpopulate_precision(self):
|
def startup_backpopulate_precision(self):
|
||||||
|
|
||||||
@ -578,7 +584,9 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
|
|
||||||
if stake_amount is not None and stake_amount < 0.0:
|
if stake_amount is not None and stake_amount < 0.0:
|
||||||
# We should decrease our position
|
# We should decrease our position
|
||||||
amount = abs(float(FtPrecise(stake_amount) / FtPrecise(current_exit_rate)))
|
amount = self.exchange.amount_to_contract_precision(
|
||||||
|
trade.pair,
|
||||||
|
abs(float(FtPrecise(stake_amount) / FtPrecise(current_exit_rate))))
|
||||||
if amount > trade.amount:
|
if amount > trade.amount:
|
||||||
# This is currently ineffective as remaining would become < min tradable
|
# This is currently ineffective as remaining would become < min tradable
|
||||||
# Fixing this would require checking for 0.0 there -
|
# Fixing this would require checking for 0.0 there -
|
||||||
@ -587,9 +595,14 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
f"Adjusting amount to trade.amount as it is higher. {amount} > {trade.amount}")
|
f"Adjusting amount to trade.amount as it is higher. {amount} > {trade.amount}")
|
||||||
amount = trade.amount
|
amount = trade.amount
|
||||||
|
|
||||||
|
if amount == 0.0:
|
||||||
|
logger.info("Amount to sell is 0.0 due to exchange limits - not selling.")
|
||||||
|
return
|
||||||
|
|
||||||
remaining = (trade.amount - amount) * current_exit_rate
|
remaining = (trade.amount - amount) * current_exit_rate
|
||||||
if remaining < min_exit_stake:
|
if remaining < min_exit_stake:
|
||||||
logger.info(f'Remaining amount of {remaining} would be too small.')
|
logger.info(f"Remaining amount of {remaining} would be smaller "
|
||||||
|
f"than the minimum of {min_exit_stake}.")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.execute_trade_exit(trade, current_exit_rate, exit_check=ExitCheckTuple(
|
self.execute_trade_exit(trade, current_exit_rate, exit_check=ExitCheckTuple(
|
||||||
@ -659,14 +672,12 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
if not stake_amount:
|
if not stake_amount:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if pos_adjust:
|
msg = (f"Position adjust: about to create a new order for {pair} with stake: "
|
||||||
logger.info(f"Position adjust: about to create a new order for {pair} with stake: "
|
f"{stake_amount} for {trade}" if pos_adjust
|
||||||
f"{stake_amount} for {trade}")
|
else
|
||||||
else:
|
f"{name} signal found: about create a new trade for {pair} with stake_amount: "
|
||||||
logger.info(
|
f"{stake_amount} ...")
|
||||||
f"{name} signal found: about create a new trade for {pair} with stake_amount: "
|
logger.info(msg)
|
||||||
f"{stake_amount} ...")
|
|
||||||
|
|
||||||
amount = (stake_amount / enter_limit_requested) * leverage
|
amount = (stake_amount / enter_limit_requested) * leverage
|
||||||
order_type = ordertype or self.strategy.order_types['entry']
|
order_type = ordertype or self.strategy.order_types['entry']
|
||||||
|
|
||||||
@ -726,10 +737,16 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker')
|
||||||
base_currency = self.exchange.get_pair_base_currency(pair)
|
base_currency = self.exchange.get_pair_base_currency(pair)
|
||||||
open_date = datetime.now(timezone.utc)
|
open_date = datetime.now(timezone.utc)
|
||||||
funding_fees = self.exchange.get_funding_fees(
|
|
||||||
pair=pair, amount=amount, is_short=is_short, open_date=open_date)
|
|
||||||
# This is a new trade
|
# This is a new trade
|
||||||
if trade is None:
|
if trade is None:
|
||||||
|
funding_fees = 0.0
|
||||||
|
try:
|
||||||
|
funding_fees = self.exchange.get_funding_fees(
|
||||||
|
pair=pair, amount=amount, is_short=is_short, open_date=open_date)
|
||||||
|
except ExchangeError:
|
||||||
|
logger.warning("Could not find funding fee.")
|
||||||
|
|
||||||
trade = Trade(
|
trade = Trade(
|
||||||
pair=pair,
|
pair=pair,
|
||||||
base_currency=base_currency,
|
base_currency=base_currency,
|
||||||
@ -906,7 +923,7 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||||
'amount': order.safe_amount_after_fee,
|
'amount': order.safe_amount_after_fee if fill else order.amount,
|
||||||
'open_date': trade.open_date or datetime.utcnow(),
|
'open_date': trade.open_date or datetime.utcnow(),
|
||||||
'current_rate': current_rate,
|
'current_rate': current_rate,
|
||||||
'sub_trade': sub_trade,
|
'sub_trade': sub_trade,
|
||||||
@ -1480,12 +1497,16 @@ class FreqtradeBot(LoggingMixin):
|
|||||||
:param exit_check: CheckTuple with signal and reason
|
:param exit_check: CheckTuple with signal and reason
|
||||||
:return: True if it succeeds False
|
:return: True if it succeeds False
|
||||||
"""
|
"""
|
||||||
trade.funding_fees = self.exchange.get_funding_fees(
|
try:
|
||||||
pair=trade.pair,
|
trade.funding_fees = self.exchange.get_funding_fees(
|
||||||
amount=trade.amount,
|
pair=trade.pair,
|
||||||
is_short=trade.is_short,
|
amount=trade.amount,
|
||||||
open_date=trade.open_date_utc,
|
is_short=trade.is_short,
|
||||||
)
|
open_date=trade.date_last_filled_utc,
|
||||||
|
)
|
||||||
|
except ExchangeError:
|
||||||
|
logger.warning("Could not update funding fee.")
|
||||||
|
|
||||||
exit_type = 'exit'
|
exit_type = 'exit'
|
||||||
exit_reason = exit_tag or exit_check.exit_reason
|
exit_reason = exit_tag or exit_check.exit_reason
|
||||||
if exit_check.exit_type in (
|
if exit_check.exit_type in (
|
||||||
|
@ -537,7 +537,11 @@ class Backtesting:
|
|||||||
return pos_trade
|
return pos_trade
|
||||||
|
|
||||||
if stake_amount is not None and stake_amount < 0.0:
|
if stake_amount is not None and stake_amount < 0.0:
|
||||||
amount = abs(stake_amount) / current_rate
|
amount = amount_to_contract_precision(
|
||||||
|
abs(stake_amount) / current_rate, trade.amount_precision,
|
||||||
|
self.precision_mode, trade.contract_size)
|
||||||
|
if amount == 0.0:
|
||||||
|
return trade
|
||||||
if amount > trade.amount:
|
if amount > trade.amount:
|
||||||
# This is currently ineffective as remaining would become < min tradable
|
# This is currently ineffective as remaining would become < min tradable
|
||||||
amount = trade.amount
|
amount = trade.amount
|
||||||
@ -686,7 +690,7 @@ class Backtesting:
|
|||||||
self.futures_data[trade.pair],
|
self.futures_data[trade.pair],
|
||||||
amount=trade.amount,
|
amount=trade.amount,
|
||||||
is_short=trade.is_short,
|
is_short=trade.is_short,
|
||||||
open_date=trade.open_date_utc,
|
open_date=trade.date_last_filled_utc,
|
||||||
close_date=exit_candle_time,
|
close_date=exit_candle_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -421,9 +421,10 @@ class Hyperopt:
|
|||||||
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
||||||
|
|
||||||
# Trim startup period from analyzed dataframe to get correct dates for output.
|
# Trim startup period from analyzed dataframe to get correct dates for output.
|
||||||
processed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
||||||
self.min_date, self.max_date = get_timerange(processed)
|
self.min_date, self.max_date = get_timerange(trimmed)
|
||||||
return processed
|
# Real trimming will happen as part of backtesting.
|
||||||
|
return preprocessed
|
||||||
|
|
||||||
def prepare_hyperopt_data(self) -> None:
|
def prepare_hyperopt_data(self) -> None:
|
||||||
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
|
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
|
||||||
|
@ -75,7 +75,8 @@ def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
|||||||
'.2f', 'd', 's', 's']
|
'.2f', 'd', 's', 's']
|
||||||
|
|
||||||
|
|
||||||
def _get_line_header(first_column: str, stake_currency: str, direction: str = 'Buys') -> List[str]:
|
def _get_line_header(first_column: str, stake_currency: str,
|
||||||
|
direction: str = 'Entries') -> List[str]:
|
||||||
"""
|
"""
|
||||||
Generate header lines (goes in line with _generate_result_line())
|
Generate header lines (goes in line with _generate_result_line())
|
||||||
"""
|
"""
|
||||||
@ -642,7 +643,7 @@ def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_curr
|
|||||||
if (tag_type == "enter_tag"):
|
if (tag_type == "enter_tag"):
|
||||||
headers = _get_line_header("TAG", stake_currency)
|
headers = _get_line_header("TAG", stake_currency)
|
||||||
else:
|
else:
|
||||||
headers = _get_line_header("TAG", stake_currency, 'Sells')
|
headers = _get_line_header("TAG", stake_currency, 'Exits')
|
||||||
floatfmt = _get_line_floatfmt(stake_currency)
|
floatfmt = _get_line_floatfmt(stake_currency)
|
||||||
output = [
|
output = [
|
||||||
[
|
[
|
||||||
|
@ -212,17 +212,18 @@ def migrate_orders_table(engine, table_back_name: str, cols_order: List):
|
|||||||
ft_fee_base = get_column_def(cols_order, 'ft_fee_base', 'null')
|
ft_fee_base = get_column_def(cols_order, 'ft_fee_base', 'null')
|
||||||
average = get_column_def(cols_order, 'average', 'null')
|
average = get_column_def(cols_order, 'average', 'null')
|
||||||
stop_price = get_column_def(cols_order, 'stop_price', 'null')
|
stop_price = get_column_def(cols_order, 'stop_price', 'null')
|
||||||
|
funding_fee = get_column_def(cols_order, 'funding_fee', '0.0')
|
||||||
|
|
||||||
# sqlite does not support literals for booleans
|
# sqlite does not support literals for booleans
|
||||||
with engine.begin() as connection:
|
with engine.begin() as connection:
|
||||||
connection.execute(text(f"""
|
connection.execute(text(f"""
|
||||||
insert into orders (id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
insert into orders (id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
||||||
status, symbol, order_type, side, price, amount, filled, average, remaining, cost,
|
status, symbol, order_type, side, price, amount, filled, average, remaining, cost,
|
||||||
stop_price, order_date, order_filled_date, order_update_date, ft_fee_base)
|
stop_price, order_date, order_filled_date, order_update_date, ft_fee_base, funding_fee)
|
||||||
select id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
select id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id,
|
||||||
status, symbol, order_type, side, price, amount, filled, {average} average, remaining,
|
status, symbol, order_type, side, price, amount, filled, {average} average, remaining,
|
||||||
cost, {stop_price} stop_price, order_date, order_filled_date,
|
cost, {stop_price} stop_price, order_date, order_filled_date,
|
||||||
order_update_date, {ft_fee_base} ft_fee_base
|
order_update_date, {ft_fee_base} ft_fee_base, {funding_fee} funding_fee
|
||||||
from {table_back_name}
|
from {table_back_name}
|
||||||
"""))
|
"""))
|
||||||
|
|
||||||
@ -307,9 +308,10 @@ def check_migrate(engine, decl_base, previous_tables) -> None:
|
|||||||
# Check if migration necessary
|
# Check if migration necessary
|
||||||
# Migrates both trades and orders table!
|
# Migrates both trades and orders table!
|
||||||
# if ('orders' not in previous_tables
|
# if ('orders' not in previous_tables
|
||||||
# or not has_column(cols_orders, 'stop_price')):
|
# or not has_column(cols_orders, 'funding_fee')):
|
||||||
migrating = False
|
migrating = False
|
||||||
if not has_column(cols_trades, 'contract_size'):
|
# if not has_column(cols_trades, 'contract_size'):
|
||||||
|
if not has_column(cols_orders, 'funding_fee'):
|
||||||
migrating = True
|
migrating = True
|
||||||
logger.info(f"Running database migration for trades - "
|
logger.info(f"Running database migration for trades - "
|
||||||
f"backup: {table_back_name}, {order_table_bak_name}")
|
f"backup: {table_back_name}, {order_table_bak_name}")
|
||||||
|
@ -65,6 +65,8 @@ class Order(_DECL_BASE):
|
|||||||
order_filled_date = Column(DateTime, nullable=True)
|
order_filled_date = Column(DateTime, nullable=True)
|
||||||
order_update_date = Column(DateTime, nullable=True)
|
order_update_date = Column(DateTime, nullable=True)
|
||||||
|
|
||||||
|
funding_fee = Column(Float, nullable=True)
|
||||||
|
|
||||||
ft_fee_base = Column(Float, nullable=True)
|
ft_fee_base = Column(Float, nullable=True)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -72,6 +74,13 @@ class Order(_DECL_BASE):
|
|||||||
""" Order-date with UTC timezoneinfo"""
|
""" Order-date with UTC timezoneinfo"""
|
||||||
return self.order_date.replace(tzinfo=timezone.utc)
|
return self.order_date.replace(tzinfo=timezone.utc)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def order_filled_utc(self) -> Optional[datetime]:
|
||||||
|
""" last order-date with UTC timezoneinfo"""
|
||||||
|
return (
|
||||||
|
self.order_filled_date.replace(tzinfo=timezone.utc) if self.order_filled_date else None
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def safe_price(self) -> float:
|
def safe_price(self) -> float:
|
||||||
return self.average or self.price
|
return self.average or self.price
|
||||||
@ -119,6 +128,10 @@ class Order(_DECL_BASE):
|
|||||||
self.ft_is_open = True
|
self.ft_is_open = True
|
||||||
if self.status in NON_OPEN_EXCHANGE_STATES:
|
if self.status in NON_OPEN_EXCHANGE_STATES:
|
||||||
self.ft_is_open = False
|
self.ft_is_open = False
|
||||||
|
if self.trade:
|
||||||
|
# Assign funding fee up to this point
|
||||||
|
# (represents the funding fee since the last order)
|
||||||
|
self.funding_fee = self.trade.funding_fees
|
||||||
if (order.get('filled', 0.0) or 0.0) > 0:
|
if (order.get('filled', 0.0) or 0.0) > 0:
|
||||||
self.order_filled_date = datetime.now(timezone.utc)
|
self.order_filled_date = datetime.now(timezone.utc)
|
||||||
self.order_update_date = datetime.now(timezone.utc)
|
self.order_update_date = datetime.now(timezone.utc)
|
||||||
@ -179,6 +192,10 @@ class Order(_DECL_BASE):
|
|||||||
self.remaining = 0
|
self.remaining = 0
|
||||||
self.status = 'closed'
|
self.status = 'closed'
|
||||||
self.ft_is_open = False
|
self.ft_is_open = False
|
||||||
|
# Assign funding fees to Order.
|
||||||
|
# Assumes backtesting will use date_last_filled_utc to calculate future funding fees.
|
||||||
|
self.funding_fee = trade.funding_fees
|
||||||
|
|
||||||
if (self.ft_order_side == trade.entry_side):
|
if (self.ft_order_side == trade.entry_side):
|
||||||
trade.open_rate = self.price
|
trade.open_rate = self.price
|
||||||
trade.recalc_trade_from_orders()
|
trade.recalc_trade_from_orders()
|
||||||
@ -346,6 +363,15 @@ class LocalTrade():
|
|||||||
else:
|
else:
|
||||||
return self.amount
|
return self.amount
|
||||||
|
|
||||||
|
@property
|
||||||
|
def date_last_filled_utc(self) -> datetime:
|
||||||
|
""" Date of the last filled order"""
|
||||||
|
orders = self.select_filled_orders()
|
||||||
|
if not orders:
|
||||||
|
return self.open_date_utc
|
||||||
|
return max([self.open_date_utc,
|
||||||
|
max(o.order_filled_utc for o in orders if o.order_filled_utc)])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def open_date_utc(self):
|
def open_date_utc(self):
|
||||||
return self.open_date.replace(tzinfo=timezone.utc)
|
return self.open_date.replace(tzinfo=timezone.utc)
|
||||||
@ -843,10 +869,14 @@ class LocalTrade():
|
|||||||
close_profit = 0.0
|
close_profit = 0.0
|
||||||
close_profit_abs = 0.0
|
close_profit_abs = 0.0
|
||||||
profit = None
|
profit = None
|
||||||
for o in self.orders:
|
# Reset funding fees
|
||||||
|
self.funding_fees = 0.0
|
||||||
|
funding_fees = 0.0
|
||||||
|
ordercount = len(self.orders) - 1
|
||||||
|
for i, o in enumerate(self.orders):
|
||||||
if o.ft_is_open or not o.filled:
|
if o.ft_is_open or not o.filled:
|
||||||
continue
|
continue
|
||||||
|
funding_fees += (o.funding_fee or 0.0)
|
||||||
tmp_amount = FtPrecise(o.safe_amount_after_fee)
|
tmp_amount = FtPrecise(o.safe_amount_after_fee)
|
||||||
tmp_price = FtPrecise(o.safe_price)
|
tmp_price = FtPrecise(o.safe_price)
|
||||||
|
|
||||||
@ -861,7 +891,11 @@ class LocalTrade():
|
|||||||
avg_price = current_stake / current_amount
|
avg_price = current_stake / current_amount
|
||||||
|
|
||||||
if is_exit:
|
if is_exit:
|
||||||
# Process partial exits
|
# Process exits
|
||||||
|
if i == ordercount and is_closing:
|
||||||
|
# Apply funding fees only to the last closing order
|
||||||
|
self.funding_fees = funding_fees
|
||||||
|
|
||||||
exit_rate = o.safe_price
|
exit_rate = o.safe_price
|
||||||
exit_amount = o.safe_amount_after_fee
|
exit_amount = o.safe_amount_after_fee
|
||||||
profit = self.calc_profit(rate=exit_rate, amount=exit_amount,
|
profit = self.calc_profit(rate=exit_rate, amount=exit_amount,
|
||||||
@ -871,6 +905,7 @@ class LocalTrade():
|
|||||||
exit_rate, amount=exit_amount, open_rate=avg_price)
|
exit_rate, amount=exit_amount, open_rate=avg_price)
|
||||||
else:
|
else:
|
||||||
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
||||||
|
self.funding_fees = funding_fees
|
||||||
|
|
||||||
if close_profit:
|
if close_profit:
|
||||||
self.close_profit = close_profit
|
self.close_profit = close_profit
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from freqtrade.enums.rpcmessagetype import RPCMessageType
|
from freqtrade.enums import RPCMessageType
|
||||||
from freqtrade.rpc import RPC
|
from freqtrade.rpc import RPC
|
||||||
from freqtrade.rpc.webhook import Webhook
|
from freqtrade.rpc.webhook import Webhook
|
||||||
|
|
||||||
|
@ -261,11 +261,15 @@ class RPC:
|
|||||||
profit_str += f" ({fiat_profit:.2f})"
|
profit_str += f" ({fiat_profit:.2f})"
|
||||||
fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \
|
fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \
|
||||||
else fiat_profit_sum + fiat_profit
|
else fiat_profit_sum + fiat_profit
|
||||||
|
open_order = (trade.select_order_by_order_id(
|
||||||
|
trade.open_order_id) if trade.open_order_id else None)
|
||||||
|
|
||||||
detail_trade = [
|
detail_trade = [
|
||||||
f'{trade.id} {direction_str}',
|
f'{trade.id} {direction_str}',
|
||||||
trade.pair + ('*' if (trade.open_order_id is not None
|
trade.pair + ('*' if (open_order
|
||||||
and trade.close_rate_requested is None) else '')
|
and open_order.ft_order_side == trade.entry_side) else '')
|
||||||
+ ('**' if (trade.close_rate_requested is not None) else ''),
|
+ ('**' if (open_order and
|
||||||
|
open_order.ft_order_side == trade.exit_side is not None) else ''),
|
||||||
shorten_date(arrow.get(trade.open_date).humanize(only_distance=True)),
|
shorten_date(arrow.get(trade.open_date).humanize(only_distance=True)),
|
||||||
profit_str
|
profit_str
|
||||||
]
|
]
|
||||||
|
@ -6,6 +6,7 @@ This module manage Telegram communication
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
from copy import deepcopy
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import date, datetime, timedelta
|
from datetime import date, datetime, timedelta
|
||||||
from functools import partial
|
from functools import partial
|
||||||
@ -374,7 +375,7 @@ class Telegram(RPCHandler):
|
|||||||
message += f"\n*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`"
|
message += f"\n*Duration:* `{msg['duration']} ({msg['duration_min']:.1f} min)`"
|
||||||
return message
|
return message
|
||||||
|
|
||||||
def compose_message(self, msg: Dict[str, Any], msg_type: RPCMessageType) -> str:
|
def compose_message(self, msg: Dict[str, Any], msg_type: RPCMessageType) -> Optional[str]:
|
||||||
if msg_type in [RPCMessageType.ENTRY, RPCMessageType.ENTRY_FILL]:
|
if msg_type in [RPCMessageType.ENTRY, RPCMessageType.ENTRY_FILL]:
|
||||||
message = self._format_entry_msg(msg)
|
message = self._format_entry_msg(msg)
|
||||||
|
|
||||||
@ -411,7 +412,8 @@ class Telegram(RPCHandler):
|
|||||||
elif msg_type == RPCMessageType.STRATEGY_MSG:
|
elif msg_type == RPCMessageType.STRATEGY_MSG:
|
||||||
message = f"{msg['msg']}"
|
message = f"{msg['msg']}"
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"Unknown message type: {msg_type}")
|
logger.debug("Unknown message type: %s", msg_type)
|
||||||
|
return None
|
||||||
return message
|
return message
|
||||||
|
|
||||||
def send_msg(self, msg: Dict[str, Any]) -> None:
|
def send_msg(self, msg: Dict[str, Any]) -> None:
|
||||||
@ -438,9 +440,9 @@ class Telegram(RPCHandler):
|
|||||||
# Notification disabled
|
# Notification disabled
|
||||||
return
|
return
|
||||||
|
|
||||||
message = self.compose_message(msg, msg_type)
|
message = self.compose_message(deepcopy(msg), msg_type)
|
||||||
|
if message:
|
||||||
self._send_msg(message, disable_notification=(noti == 'silent'))
|
self._send_msg(message, disable_notification=(noti == 'silent'))
|
||||||
|
|
||||||
def _get_sell_emoji(self, msg):
|
def _get_sell_emoji(self, msg):
|
||||||
"""
|
"""
|
||||||
|
@ -12,9 +12,8 @@ from pandas import DataFrame
|
|||||||
|
|
||||||
from freqtrade.constants import ListPairsWithTimeframes
|
from freqtrade.constants import ListPairsWithTimeframes
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, SignalDirection, SignalTagType,
|
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, RunMode, SignalDirection,
|
||||||
SignalType, TradingMode)
|
SignalTagType, SignalType, TradingMode)
|
||||||
from freqtrade.enums.runmode import RunMode
|
|
||||||
from freqtrade.exceptions import OperationalException, StrategyError
|
from freqtrade.exceptions import OperationalException, StrategyError
|
||||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_next_date, timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_next_date, timeframe_to_seconds
|
||||||
from freqtrade.persistence import Order, PairLocks, Trade
|
from freqtrade.persistence import Order, PairLocks, Trade
|
||||||
@ -169,6 +168,10 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
'freqAI is not enabled. '
|
'freqAI is not enabled. '
|
||||||
'Please enable it in your config to use this strategy.')
|
'Please enable it in your config to use this strategy.')
|
||||||
|
|
||||||
|
def shutdown(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
self.freqai = DummyClass() # type: ignore
|
self.freqai = DummyClass() # type: ignore
|
||||||
|
|
||||||
def ft_bot_start(self, **kwargs) -> None:
|
def ft_bot_start(self, **kwargs) -> None:
|
||||||
@ -182,6 +185,12 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
|
|
||||||
self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT)
|
self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT)
|
||||||
|
|
||||||
|
def ft_bot_cleanup(self) -> None:
|
||||||
|
"""
|
||||||
|
Clean up FreqAI and child threads
|
||||||
|
"""
|
||||||
|
self.freqai.shutdown()
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
|
@ -7,7 +7,7 @@ from abc import ABC, abstractmethod
|
|||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
from typing import Any, Optional, Sequence, Union
|
from typing import Any, Optional, Sequence, Union
|
||||||
|
|
||||||
from freqtrade.enums.hyperoptstate import HyperoptState
|
from freqtrade.enums import HyperoptState
|
||||||
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from freqtrade.exchange import timeframe_to_minutes
|
from freqtrade.exchange import timeframe_to_minutes
|
||||||
@ -6,7 +8,8 @@ from freqtrade.exchange import timeframe_to_minutes
|
|||||||
def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
|
def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
|
||||||
timeframe: str, timeframe_inf: str, ffill: bool = True,
|
timeframe: str, timeframe_inf: str, ffill: bool = True,
|
||||||
append_timeframe: bool = True,
|
append_timeframe: bool = True,
|
||||||
date_column: str = 'date') -> pd.DataFrame:
|
date_column: str = 'date',
|
||||||
|
suffix: Optional[str] = None) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
|
Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
|
||||||
|
|
||||||
@ -28,6 +31,8 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
|
|||||||
:param ffill: Forwardfill missing values - optional but usually required
|
:param ffill: Forwardfill missing values - optional but usually required
|
||||||
:param append_timeframe: Rename columns by appending timeframe.
|
:param append_timeframe: Rename columns by appending timeframe.
|
||||||
:param date_column: A custom date column name.
|
:param date_column: A custom date column name.
|
||||||
|
:param suffix: A string suffix to add at the end of the informative columns. If specified,
|
||||||
|
append_timeframe must be false.
|
||||||
:return: Merged dataframe
|
:return: Merged dataframe
|
||||||
:raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
|
:raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
|
||||||
"""
|
"""
|
||||||
@ -50,10 +55,16 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
|
|||||||
|
|
||||||
# Rename columns to be unique
|
# Rename columns to be unique
|
||||||
date_merge = 'date_merge'
|
date_merge = 'date_merge'
|
||||||
if append_timeframe:
|
if suffix and append_timeframe:
|
||||||
|
raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.")
|
||||||
|
elif append_timeframe:
|
||||||
date_merge = f'date_merge_{timeframe_inf}'
|
date_merge = f'date_merge_{timeframe_inf}'
|
||||||
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
|
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
|
||||||
|
|
||||||
|
elif suffix:
|
||||||
|
date_merge = f'date_merge_{suffix}'
|
||||||
|
informative.columns = [f"{col}_{suffix}" for col in informative.columns]
|
||||||
|
|
||||||
# Combine the 2 dataframes
|
# Combine the 2 dataframes
|
||||||
# all indicators on the informative sample MUST be calculated before this point
|
# all indicators on the informative sample MUST be calculated before this point
|
||||||
if ffill:
|
if ffill:
|
||||||
|
@ -6,9 +6,7 @@ import talib.abstract as ta
|
|||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
from technical import qtpylib
|
from technical import qtpylib
|
||||||
|
|
||||||
from freqtrade.exchange import timeframe_to_prev_date
|
from freqtrade.strategy import CategoricalParameter, IStrategy, merge_informative_pair
|
||||||
from freqtrade.persistence import Trade
|
|
||||||
from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -31,9 +29,6 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
"main_plot": {},
|
"main_plot": {},
|
||||||
"subplots": {
|
"subplots": {
|
||||||
"prediction": {"prediction": {"color": "blue"}},
|
"prediction": {"prediction": {"color": "blue"}},
|
||||||
"target_roi": {
|
|
||||||
"target_roi": {"color": "brown"},
|
|
||||||
},
|
|
||||||
"do_predict": {
|
"do_predict": {
|
||||||
"do_predict": {"color": "brown"},
|
"do_predict": {"color": "brown"},
|
||||||
},
|
},
|
||||||
@ -47,10 +42,10 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
startup_candle_count: int = 40
|
startup_candle_count: int = 40
|
||||||
can_short = False
|
can_short = False
|
||||||
|
|
||||||
linear_roi_offset = DecimalParameter(
|
std_dev_multiplier_buy = CategoricalParameter(
|
||||||
0.00, 0.02, default=0.005, space="sell", optimize=False, load=True
|
[0.75, 1, 1.25, 1.5, 1.75], default=1.25, space="buy", optimize=True)
|
||||||
)
|
std_dev_multiplier_sell = CategoricalParameter(
|
||||||
max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
|
[0.1, 0.25, 0.4], space="sell", default=0.2, optimize=True)
|
||||||
|
|
||||||
def informative_pairs(self):
|
def informative_pairs(self):
|
||||||
whitelist_pairs = self.dp.current_whitelist()
|
whitelist_pairs = self.dp.current_whitelist()
|
||||||
@ -92,12 +87,10 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
t = int(t)
|
t = int(t)
|
||||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||||
|
|
||||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
|
||||||
|
|
||||||
bollinger = qtpylib.bollinger_bands(
|
bollinger = qtpylib.bollinger_bands(
|
||||||
qtpylib.typical_price(informative), window=t, stds=2.2
|
qtpylib.typical_price(informative), window=t, stds=2.2
|
||||||
)
|
)
|
||||||
@ -189,21 +182,26 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
# `populate_any_indicators()` for each training period.
|
# `populate_any_indicators()` for each training period.
|
||||||
|
|
||||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||||
|
for val in self.std_dev_multiplier_buy.range:
|
||||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
dataframe[f'target_roi_{val}'] = dataframe["&-s_close_mean"] + \
|
||||||
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
|
dataframe["&-s_close_std"] * val
|
||||||
|
for val in self.std_dev_multiplier_sell.range:
|
||||||
|
dataframe[f'sell_roi_{val}'] = dataframe["&-s_close_mean"] - \
|
||||||
|
dataframe["&-s_close_std"] * val
|
||||||
return dataframe
|
return dataframe
|
||||||
|
|
||||||
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
|
||||||
enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
|
enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"]
|
||||||
|
> df[f"target_roi_{self.std_dev_multiplier_buy.value}"]]
|
||||||
|
|
||||||
if enter_long_conditions:
|
if enter_long_conditions:
|
||||||
df.loc[
|
df.loc[
|
||||||
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||||
] = (1, "long")
|
] = (1, "long")
|
||||||
|
|
||||||
enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"]]
|
enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"]
|
||||||
|
< df[f"sell_roi_{self.std_dev_multiplier_sell.value}"]]
|
||||||
|
|
||||||
if enter_short_conditions:
|
if enter_short_conditions:
|
||||||
df.loc[
|
df.loc[
|
||||||
@ -213,11 +211,13 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
return df
|
return df
|
||||||
|
|
||||||
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||||
exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"] * 0.25]
|
exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] <
|
||||||
|
df[f"sell_roi_{self.std_dev_multiplier_sell.value}"] * 0.25]
|
||||||
if exit_long_conditions:
|
if exit_long_conditions:
|
||||||
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
||||||
|
|
||||||
exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"] * 0.25]
|
exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] >
|
||||||
|
df[f"target_roi_{self.std_dev_multiplier_buy.value}"] * 0.25]
|
||||||
if exit_short_conditions:
|
if exit_short_conditions:
|
||||||
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
||||||
|
|
||||||
@ -226,83 +226,6 @@ class FreqaiExampleStrategy(IStrategy):
|
|||||||
def get_ticker_indicator(self):
|
def get_ticker_indicator(self):
|
||||||
return int(self.config["timeframe"][:-1])
|
return int(self.config["timeframe"][:-1])
|
||||||
|
|
||||||
def custom_exit(
|
|
||||||
self, pair: str, trade: Trade, current_time, current_rate, current_profit, **kwargs
|
|
||||||
):
|
|
||||||
|
|
||||||
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
|
|
||||||
|
|
||||||
trade_date = timeframe_to_prev_date(self.config["timeframe"], trade.open_date_utc)
|
|
||||||
trade_candle = dataframe.loc[(dataframe["date"] == trade_date)]
|
|
||||||
|
|
||||||
if trade_candle.empty:
|
|
||||||
return None
|
|
||||||
trade_candle = trade_candle.squeeze()
|
|
||||||
|
|
||||||
follow_mode = self.config.get("freqai", {}).get("follow_mode", False)
|
|
||||||
|
|
||||||
if not follow_mode:
|
|
||||||
pair_dict = self.freqai.dd.pair_dict
|
|
||||||
else:
|
|
||||||
pair_dict = self.freqai.dd.follower_dict
|
|
||||||
|
|
||||||
entry_tag = trade.enter_tag
|
|
||||||
|
|
||||||
if (
|
|
||||||
"prediction" + entry_tag not in pair_dict[pair]
|
|
||||||
or pair_dict[pair]['extras']["prediction" + entry_tag] == 0
|
|
||||||
):
|
|
||||||
pair_dict[pair]['extras']["prediction" + entry_tag] = abs(trade_candle["&-s_close"])
|
|
||||||
if not follow_mode:
|
|
||||||
self.freqai.dd.save_drawer_to_disk()
|
|
||||||
else:
|
|
||||||
self.freqai.dd.save_follower_dict_to_disk()
|
|
||||||
|
|
||||||
roi_price = pair_dict[pair]['extras']["prediction" + entry_tag]
|
|
||||||
roi_time = self.max_roi_time_long.value
|
|
||||||
|
|
||||||
roi_decay = roi_price * (
|
|
||||||
1 - ((current_time - trade.open_date_utc).seconds) / (roi_time * 60)
|
|
||||||
)
|
|
||||||
if roi_decay < 0:
|
|
||||||
roi_decay = self.linear_roi_offset.value
|
|
||||||
else:
|
|
||||||
roi_decay += self.linear_roi_offset.value
|
|
||||||
|
|
||||||
if current_profit > roi_decay:
|
|
||||||
return "roi_custom_win"
|
|
||||||
|
|
||||||
if current_profit < -roi_decay:
|
|
||||||
return "roi_custom_loss"
|
|
||||||
|
|
||||||
def confirm_trade_exit(
|
|
||||||
self,
|
|
||||||
pair: str,
|
|
||||||
trade: Trade,
|
|
||||||
order_type: str,
|
|
||||||
amount: float,
|
|
||||||
rate: float,
|
|
||||||
time_in_force: str,
|
|
||||||
exit_reason: str,
|
|
||||||
current_time,
|
|
||||||
**kwargs,
|
|
||||||
) -> bool:
|
|
||||||
|
|
||||||
entry_tag = trade.enter_tag
|
|
||||||
follow_mode = self.config.get("freqai", {}).get("follow_mode", False)
|
|
||||||
if not follow_mode:
|
|
||||||
pair_dict = self.freqai.dd.pair_dict
|
|
||||||
else:
|
|
||||||
pair_dict = self.freqai.dd.follower_dict
|
|
||||||
|
|
||||||
pair_dict[pair]['extras']["prediction" + entry_tag] = 0
|
|
||||||
if not follow_mode:
|
|
||||||
self.freqai.dd.save_drawer_to_disk()
|
|
||||||
else:
|
|
||||||
self.freqai.dd.save_follower_dict_to_disk()
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def confirm_trade_entry(
|
def confirm_trade_entry(
|
||||||
self,
|
self,
|
||||||
pair: str,
|
pair: str,
|
||||||
|
@ -135,7 +135,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||||||
t = int(t)
|
t = int(t)
|
||||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||||
informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
||||||
|
@ -10,7 +10,7 @@ flake8==5.0.4
|
|||||||
flake8-tidy-imports==4.8.0
|
flake8-tidy-imports==4.8.0
|
||||||
mypy==0.971
|
mypy==0.971
|
||||||
pre-commit==2.20.0
|
pre-commit==2.20.0
|
||||||
pytest==7.1.2
|
pytest==7.1.3
|
||||||
pytest-asyncio==0.19.0
|
pytest-asyncio==0.19.0
|
||||||
pytest-cov==3.0.0
|
pytest-cov==3.0.0
|
||||||
pytest-mock==3.8.2
|
pytest-mock==3.8.2
|
||||||
|
@ -6,6 +6,7 @@ scikit-learn==1.1.2
|
|||||||
joblib==1.1.0
|
joblib==1.1.0
|
||||||
catboost==1.0.6; platform_machine != 'aarch64'
|
catboost==1.0.6; platform_machine != 'aarch64'
|
||||||
lightgbm==3.3.2
|
lightgbm==3.3.2
|
||||||
|
xgboost==1.6.2
|
||||||
torch==1.12.1
|
torch==1.12.1
|
||||||
stable-baselines3==1.6.0
|
stable-baselines3==1.6.0
|
||||||
gym==0.21.0
|
gym==0.21.0
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
numpy==1.23.2
|
numpy==1.23.2
|
||||||
pandas==1.4.3
|
pandas==1.4.4
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==1.92.84
|
ccxt==1.93.3
|
||||||
# Pin cryptography for now due to rust build errors with piwheels
|
# Pin cryptography for now due to rust build errors with piwheels
|
||||||
cryptography==37.0.4
|
cryptography==37.0.4
|
||||||
aiohttp==3.8.1
|
aiohttp==3.8.1
|
||||||
SQLAlchemy==1.4.40
|
SQLAlchemy==1.4.40
|
||||||
python-telegram-bot==13.13
|
python-telegram-bot==13.14
|
||||||
arrow==1.2.2
|
arrow==1.2.3
|
||||||
cachetools==4.2.2
|
cachetools==4.2.2
|
||||||
requests==2.28.1
|
requests==2.28.1
|
||||||
urllib3==1.26.12
|
urllib3==1.26.12
|
||||||
jsonschema==4.14.0
|
jsonschema==4.15.0
|
||||||
TA-Lib==0.4.24
|
TA-Lib==0.4.24
|
||||||
technical==1.3.0
|
technical==1.3.0
|
||||||
tabulate==0.8.10
|
tabulate==0.8.10
|
||||||
pycoingecko==2.2.0
|
pycoingecko==3.0.0
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
tables==3.7.0
|
tables==3.7.0
|
||||||
blosc==1.10.6
|
blosc==1.10.6
|
||||||
@ -34,17 +34,17 @@ orjson==3.8.0
|
|||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
|
|
||||||
# API Server
|
# API Server
|
||||||
fastapi==0.81.0
|
fastapi==0.82.0
|
||||||
uvicorn==0.18.3
|
uvicorn==0.18.3
|
||||||
pyjwt==2.4.0
|
pyjwt==2.4.0
|
||||||
aiofiles==0.8.0
|
aiofiles==0.8.0
|
||||||
psutil==5.9.1
|
psutil==5.9.2
|
||||||
|
|
||||||
# Support for colorized terminal output
|
# Support for colorized terminal output
|
||||||
colorama==0.4.5
|
colorama==0.4.5
|
||||||
# Building config files interactively
|
# Building config files interactively
|
||||||
questionary==1.10.0
|
questionary==1.10.0
|
||||||
prompt-toolkit==3.0.30
|
prompt-toolkit==3.0.31
|
||||||
# Extensions to datetime library
|
# Extensions to datetime library
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ from pandas import DataFrame
|
|||||||
from pandas.testing import assert_frame_equal
|
from pandas.testing import assert_frame_equal
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.constants import AVAILABLE_DATAHANDLERS
|
from freqtrade.constants import AVAILABLE_DATAHANDLERS, DATETIME_PRINT_FORMAT
|
||||||
from freqtrade.data.converter import ohlcv_to_dataframe
|
from freqtrade.data.converter import ohlcv_to_dataframe
|
||||||
from freqtrade.data.history.hdf5datahandler import HDF5DataHandler
|
from freqtrade.data.history.hdf5datahandler import HDF5DataHandler
|
||||||
from freqtrade.data.history.history_utils import (_download_pair_history, _download_trades_history,
|
from freqtrade.data.history.history_utils import (_download_pair_history, _download_trades_history,
|
||||||
@ -386,7 +386,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
|||||||
assert td != len(data['UNITTEST/BTC'])
|
assert td != len(data['UNITTEST/BTC'])
|
||||||
start_real = data['UNITTEST/BTC'].iloc[0, 0]
|
start_real = data['UNITTEST/BTC'].iloc[0, 0]
|
||||||
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
||||||
f'data starts at {start_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
f'data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}',
|
||||||
caplog)
|
caplog)
|
||||||
# Make sure we start fresh - test missing data at end
|
# Make sure we start fresh - test missing data at end
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
@ -401,7 +401,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
|||||||
# Shift endtime with +5 - as last candle is dropped (partial candle)
|
# Shift endtime with +5 - as last candle is dropped (partial candle)
|
||||||
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5)
|
end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5)
|
||||||
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
assert log_has(f'UNITTEST/BTC, spot, 5m, '
|
||||||
f'data ends at {end_real.strftime("%Y-%m-%d %H:%M:%S")}',
|
f'data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}',
|
||||||
caplog)
|
caplog)
|
||||||
|
|
||||||
|
|
||||||
|
@ -267,13 +267,8 @@ class TestCCXTExchange():
|
|||||||
now = datetime.now(timezone.utc) - timedelta(minutes=(timeframe_to_minutes(timeframe) * 2))
|
now = datetime.now(timezone.utc) - timedelta(minutes=(timeframe_to_minutes(timeframe) * 2))
|
||||||
assert exchange.klines(pair_tf).iloc[-1]['date'] >= timeframe_to_prev_date(timeframe, now)
|
assert exchange.klines(pair_tf).iloc[-1]['date'] >= timeframe_to_prev_date(timeframe, now)
|
||||||
|
|
||||||
def test_ccxt__async_get_candle_history(self, exchange):
|
def ccxt__async_get_candle_history(self, exchange, exchangename, pair, timeframe):
|
||||||
exchange, exchangename = exchange
|
|
||||||
# For some weired reason, this test returns random lengths for bittrex.
|
|
||||||
if not exchange._ft_has['ohlcv_has_history'] or exchangename == 'bittrex':
|
|
||||||
return
|
|
||||||
pair = EXCHANGES[exchangename]['pair']
|
|
||||||
timeframe = EXCHANGES[exchangename]['timeframe']
|
|
||||||
candle_type = CandleType.SPOT
|
candle_type = CandleType.SPOT
|
||||||
timeframe_ms = timeframe_to_msecs(timeframe)
|
timeframe_ms = timeframe_to_msecs(timeframe)
|
||||||
now = timeframe_to_prev_date(
|
now = timeframe_to_prev_date(
|
||||||
@ -299,6 +294,24 @@ class TestCCXTExchange():
|
|||||||
assert len(candles) >= min(candle_count, candle_count1)
|
assert len(candles) >= min(candle_count, candle_count1)
|
||||||
assert candles[0][0] == since_ms or (since_ms + timeframe_ms)
|
assert candles[0][0] == since_ms or (since_ms + timeframe_ms)
|
||||||
|
|
||||||
|
def test_ccxt__async_get_candle_history(self, exchange):
|
||||||
|
exchange, exchangename = exchange
|
||||||
|
# For some weired reason, this test returns random lengths for bittrex.
|
||||||
|
if not exchange._ft_has['ohlcv_has_history'] or exchangename in ('bittrex', 'gateio'):
|
||||||
|
return
|
||||||
|
pair = EXCHANGES[exchangename]['pair']
|
||||||
|
timeframe = EXCHANGES[exchangename]['timeframe']
|
||||||
|
self.ccxt__async_get_candle_history(exchange, exchangename, pair, timeframe)
|
||||||
|
|
||||||
|
def test_ccxt__async_get_candle_history_futures(self, exchange_futures):
|
||||||
|
exchange, exchangename = exchange_futures
|
||||||
|
if not exchange:
|
||||||
|
# exchange_futures only returns values for supported exchanges
|
||||||
|
return
|
||||||
|
pair = EXCHANGES[exchangename].get('futures_pair', EXCHANGES[exchangename]['pair'])
|
||||||
|
timeframe = EXCHANGES[exchangename]['timeframe']
|
||||||
|
self.ccxt__async_get_candle_history(exchange, exchangename, pair, timeframe)
|
||||||
|
|
||||||
def test_ccxt_fetch_funding_rate_history(self, exchange_futures):
|
def test_ccxt_fetch_funding_rate_history(self, exchange_futures):
|
||||||
exchange, exchangename = exchange_futures
|
exchange, exchangename = exchange_futures
|
||||||
if not exchange:
|
if not exchange:
|
||||||
|
@ -11,8 +11,9 @@ import pytest
|
|||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
||||||
from freqtrade.exceptions import (DDosProtection, DependencyException, InvalidOrderException,
|
from freqtrade.exceptions import (DDosProtection, DependencyException, ExchangeError,
|
||||||
OperationalException, PricingError, TemporaryError)
|
InvalidOrderException, OperationalException, PricingError,
|
||||||
|
TemporaryError)
|
||||||
from freqtrade.exchange import (Binance, Bittrex, Exchange, Kraken, amount_to_precision,
|
from freqtrade.exchange import (Binance, Bittrex, Exchange, Kraken, amount_to_precision,
|
||||||
date_minus_candles, market_is_active, price_to_precision,
|
date_minus_candles, market_is_active, price_to_precision,
|
||||||
timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date,
|
timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date,
|
||||||
@ -4179,17 +4180,24 @@ def test__fetch_and_calculate_funding_fees(
|
|||||||
type(api_mock).has = PropertyMock(return_value={'fetchOHLCV': True})
|
type(api_mock).has = PropertyMock(return_value={'fetchOHLCV': True})
|
||||||
type(api_mock).has = PropertyMock(return_value={'fetchFundingRateHistory': True})
|
type(api_mock).has = PropertyMock(return_value={'fetchFundingRateHistory': True})
|
||||||
|
|
||||||
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange)
|
ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange)
|
||||||
mocker.patch('freqtrade.exchange.Exchange.timeframes', PropertyMock(
|
mocker.patch('freqtrade.exchange.Exchange.timeframes', PropertyMock(
|
||||||
return_value=['1h', '4h', '8h']))
|
return_value=['1h', '4h', '8h']))
|
||||||
funding_fees = exchange._fetch_and_calculate_funding_fees(
|
funding_fees = ex._fetch_and_calculate_funding_fees(
|
||||||
pair='ADA/USDT', amount=amount, is_short=True, open_date=d1, close_date=d2)
|
pair='ADA/USDT', amount=amount, is_short=True, open_date=d1, close_date=d2)
|
||||||
assert pytest.approx(funding_fees) == expected_fees
|
assert pytest.approx(funding_fees) == expected_fees
|
||||||
# Fees for Longs are inverted
|
# Fees for Longs are inverted
|
||||||
funding_fees = exchange._fetch_and_calculate_funding_fees(
|
funding_fees = ex._fetch_and_calculate_funding_fees(
|
||||||
pair='ADA/USDT', amount=amount, is_short=False, open_date=d1, close_date=d2)
|
pair='ADA/USDT', amount=amount, is_short=False, open_date=d1, close_date=d2)
|
||||||
assert pytest.approx(funding_fees) == -expected_fees
|
assert pytest.approx(funding_fees) == -expected_fees
|
||||||
|
|
||||||
|
# Return empty "refresh_latest"
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.refresh_latest_ohlcv", return_value={})
|
||||||
|
ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange)
|
||||||
|
with pytest.raises(ExchangeError, match="Could not find funding rates."):
|
||||||
|
ex._fetch_and_calculate_funding_fees(
|
||||||
|
pair='ADA/USDT', amount=amount, is_short=False, open_date=d1, close_date=d2)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('exchange,expected_fees', [
|
@pytest.mark.parametrize('exchange,expected_fees', [
|
||||||
('binance', -0.0009140999999999999),
|
('binance', -0.0009140999999999999),
|
||||||
@ -4456,6 +4464,39 @@ def test__amount_to_contracts(
|
|||||||
assert result_amount == param_amount
|
assert result_amount == param_amount
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('pair,amount,expected_spot,expected_fut', [
|
||||||
|
# Contract size of 0.01
|
||||||
|
('ADA/USDT:USDT', 40, 40, 40),
|
||||||
|
('ADA/USDT:USDT', 10.4445555, 10.4, 10.444),
|
||||||
|
('LTC/ETH', 30, 30, 30),
|
||||||
|
('LTC/USD', 30, 30, 30),
|
||||||
|
# contract size of 10
|
||||||
|
('ETH/USDT:USDT', 10.111, 10.1, 10),
|
||||||
|
('ETH/USDT:USDT', 10.188, 10.1, 10),
|
||||||
|
('ETH/USDT:USDT', 10.988, 10.9, 10),
|
||||||
|
])
|
||||||
|
def test_amount_to_contract_precision(
|
||||||
|
mocker,
|
||||||
|
default_conf,
|
||||||
|
pair,
|
||||||
|
amount,
|
||||||
|
expected_spot,
|
||||||
|
expected_fut,
|
||||||
|
):
|
||||||
|
api_mock = MagicMock()
|
||||||
|
default_conf['trading_mode'] = 'spot'
|
||||||
|
default_conf['margin_mode'] = 'isolated'
|
||||||
|
exchange = get_patched_exchange(mocker, default_conf, api_mock)
|
||||||
|
|
||||||
|
result_size = exchange.amount_to_contract_precision(pair, amount)
|
||||||
|
assert result_size == expected_spot
|
||||||
|
|
||||||
|
default_conf['trading_mode'] = 'futures'
|
||||||
|
exchange = get_patched_exchange(mocker, default_conf, api_mock)
|
||||||
|
result_size = exchange.amount_to_contract_precision(pair, amount)
|
||||||
|
assert result_size == expected_fut
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [
|
@pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [
|
||||||
# Bittrex
|
# Bittrex
|
||||||
('bittrex', 2.0, False, 'spot', None),
|
('bittrex', 2.0, False, 'spot', None),
|
||||||
|
@ -4,8 +4,7 @@ from unittest.mock import MagicMock, PropertyMock
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from freqtrade.enums import MarginMode, TradingMode
|
from freqtrade.enums import CandleType, MarginMode, TradingMode
|
||||||
from freqtrade.enums.candletype import CandleType
|
|
||||||
from freqtrade.exchange.exchange import timeframe_to_minutes
|
from freqtrade.exchange.exchange import timeframe_to_minutes
|
||||||
from tests.conftest import get_mock_coro, get_patched_exchange, log_has
|
from tests.conftest import get_mock_coro, get_patched_exchange, log_has
|
||||||
from tests.exchange.test_exchange import ccxt_exceptionhandlers
|
from tests.exchange.test_exchange import ccxt_exceptionhandlers
|
||||||
|
@ -81,6 +81,37 @@ def get_patched_freqaimodel(mocker, freqaiconf):
|
|||||||
return freqaimodel
|
return freqaimodel
|
||||||
|
|
||||||
|
|
||||||
|
def make_unfiltered_dataframe(mocker, freqai_conf):
|
||||||
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
|
|
||||||
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
|
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||||
|
freqai = strategy.freqai
|
||||||
|
freqai.live = True
|
||||||
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
|
freqai.dk.pair = "ADA/BTC"
|
||||||
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
|
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
||||||
|
|
||||||
|
freqai.dd.pair_dict = MagicMock()
|
||||||
|
|
||||||
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
|
corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes(
|
||||||
|
data_load_timerange, freqai.dk.pair, freqai.dk
|
||||||
|
)
|
||||||
|
|
||||||
|
unfiltered_dataframe = freqai.dk.use_strategy_to_populate_indicators(
|
||||||
|
strategy, corr_dataframes, base_dataframes, freqai.dk.pair
|
||||||
|
)
|
||||||
|
|
||||||
|
unfiltered_dataframe = freqai.dk.slice_dataframe(new_timerange, unfiltered_dataframe)
|
||||||
|
|
||||||
|
return freqai, unfiltered_dataframe
|
||||||
|
|
||||||
|
|
||||||
def make_data_dictionary(mocker, freqai_conf):
|
def make_data_dictionary(mocker, freqai_conf):
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
|
|
||||||
@ -92,12 +123,11 @@ def make_data_dictionary(mocker, freqai_conf):
|
|||||||
freqai.live = True
|
freqai.live = True
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||||
freqai.dk.pair = "ADA/BTC"
|
freqai.dk.pair = "ADA/BTC"
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
freqai.dd.load_all_pair_histories(data_load_timerange, freqai.dk)
|
||||||
|
|
||||||
freqai.dd.pair_dict = MagicMock()
|
freqai.dd.pair_dict = MagicMock()
|
||||||
|
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes(
|
corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes(
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
import datetime
|
|
||||||
import shutil
|
import shutil
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from tests.conftest import log_has_re
|
from tests.conftest import log_has_re
|
||||||
from tests.freqai.conftest import get_patched_data_kitchen, make_data_dictionary
|
from tests.freqai.conftest import (get_patched_data_kitchen, make_data_dictionary,
|
||||||
|
make_unfiltered_dataframe)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -56,16 +57,13 @@ def test_split_timerange(
|
|||||||
shutil.rmtree(Path(dk.full_path))
|
shutil.rmtree(Path(dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
def test_check_if_model_expired(mocker, freqai_conf):
|
||||||
"timestamp, expected",
|
|
||||||
[
|
|
||||||
(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() - 7200, True),
|
|
||||||
(datetime.datetime.now(tz=datetime.timezone.utc).timestamp(), False),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_check_if_model_expired(mocker, freqai_conf, timestamp, expected):
|
|
||||||
dk = get_patched_data_kitchen(mocker, freqai_conf)
|
dk = get_patched_data_kitchen(mocker, freqai_conf)
|
||||||
assert dk.check_if_model_expired(timestamp) == expected
|
now = datetime.now(tz=timezone.utc).timestamp()
|
||||||
|
assert dk.check_if_model_expired(now) is False
|
||||||
|
now = (datetime.now(tz=timezone.utc) - timedelta(hours=2)).timestamp()
|
||||||
|
assert dk.check_if_model_expired(now) is True
|
||||||
shutil.rmtree(Path(dk.full_path))
|
shutil.rmtree(Path(dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@ -74,7 +72,7 @@ def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog):
|
|||||||
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
# freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1})
|
||||||
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"DBSCAN found eps of 2.42.",
|
"DBSCAN found eps of 2.36.",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -83,7 +81,7 @@ def test_compute_distances(mocker, freqai_conf):
|
|||||||
freqai = make_data_dictionary(mocker, freqai_conf)
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1})
|
||||||
avg_mean_dist = freqai.dk.compute_distances()
|
avg_mean_dist = freqai.dk.compute_distances()
|
||||||
assert round(avg_mean_dist, 2) == 2.56
|
assert round(avg_mean_dist, 2) == 2.54
|
||||||
|
|
||||||
|
|
||||||
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog):
|
||||||
@ -91,6 +89,75 @@ def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf,
|
|||||||
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1})
|
||||||
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
freqai.dk.use_SVM_to_remove_outliers(predict=False)
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
"SVM detected 8.46%",
|
"SVM detected 8.09%",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compute_inlier_metric(mocker, freqai_conf, caplog):
|
||||||
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
|
freqai_conf['freqai']['feature_parameters'].update({"inlier_metric_window": 10})
|
||||||
|
freqai.dk.compute_inlier_metric(set_='train')
|
||||||
|
assert log_has_re(
|
||||||
|
"Inlier metric computed and added to features.",
|
||||||
|
caplog,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_noise_to_training_features(mocker, freqai_conf):
|
||||||
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
|
freqai_conf['freqai']['feature_parameters'].update({"noise_standard_deviation": 0.1})
|
||||||
|
freqai.dk.add_noise_to_training_features()
|
||||||
|
|
||||||
|
|
||||||
|
def test_remove_beginning_points_from_data_dict(mocker, freqai_conf):
|
||||||
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
|
freqai.dk.remove_beginning_points_from_data_dict(set_='train')
|
||||||
|
|
||||||
|
|
||||||
|
def test_principal_component_analysis(mocker, freqai_conf, caplog):
|
||||||
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
|
freqai.dk.principal_component_analysis()
|
||||||
|
assert log_has_re(
|
||||||
|
"reduced feature dimension by",
|
||||||
|
caplog,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_normalize_data(mocker, freqai_conf):
|
||||||
|
freqai = make_data_dictionary(mocker, freqai_conf)
|
||||||
|
data_dict = freqai.dk.data_dictionary
|
||||||
|
freqai.dk.normalize_data(data_dict)
|
||||||
|
assert len(freqai.dk.data) == 56
|
||||||
|
|
||||||
|
|
||||||
|
def test_filter_features(mocker, freqai_conf):
|
||||||
|
freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf)
|
||||||
|
freqai.dk.find_features(unfiltered_dataframe)
|
||||||
|
|
||||||
|
filtered_df, labels = freqai.dk.filter_features(
|
||||||
|
unfiltered_dataframe,
|
||||||
|
freqai.dk.training_features_list,
|
||||||
|
freqai.dk.label_list,
|
||||||
|
training_filter=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(filtered_df.columns) == 26
|
||||||
|
|
||||||
|
|
||||||
|
def test_make_train_test_datasets(mocker, freqai_conf):
|
||||||
|
freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf)
|
||||||
|
freqai.dk.find_features(unfiltered_dataframe)
|
||||||
|
|
||||||
|
features_filtered, labels_filtered = freqai.dk.filter_features(
|
||||||
|
unfiltered_dataframe,
|
||||||
|
freqai.dk.training_features_list,
|
||||||
|
freqai.dk.label_list,
|
||||||
|
training_filter=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered)
|
||||||
|
|
||||||
|
assert data_dictionary
|
||||||
|
assert len(data_dictionary) == 7
|
||||||
|
assert len(data_dictionary['train_features'].index) == 1916
|
||||||
|
@ -17,8 +17,18 @@ def is_arm() -> bool:
|
|||||||
return "arm" in machine or "aarch64" in machine
|
return "arm" in machine or "aarch64" in machine
|
||||||
|
|
||||||
|
|
||||||
def test_train_model_in_series_LightGBM(mocker, freqai_conf):
|
@pytest.mark.parametrize('model', [
|
||||||
|
'LightGBMRegressor',
|
||||||
|
'XGBoostRegressor',
|
||||||
|
'CatboostRegressor',
|
||||||
|
])
|
||||||
|
def test_extract_data_and_train_model_Regressors(mocker, freqai_conf, model):
|
||||||
|
if is_arm() and model == 'CatboostRegressor':
|
||||||
|
pytest.skip("CatBoost is not supported on ARM")
|
||||||
|
|
||||||
|
freqai_conf.update({"freqaimodel": model})
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
|
freqai_conf.update({"strategy": "freqai_test_strat"})
|
||||||
|
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
@ -35,7 +45,8 @@ def test_train_model_in_series_LightGBM(mocker, freqai_conf):
|
|||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
freqai.extract_data_and_train_model(
|
||||||
|
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
||||||
@ -45,10 +56,18 @@ def test_train_model_in_series_LightGBM(mocker, freqai_conf):
|
|||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf):
|
@pytest.mark.parametrize('model', [
|
||||||
|
'LightGBMRegressorMultiTarget',
|
||||||
|
'XGBoostRegressorMultiTarget',
|
||||||
|
'CatboostRegressorMultiTarget',
|
||||||
|
])
|
||||||
|
def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model):
|
||||||
|
if is_arm() and model == 'CatboostRegressorMultiTarget':
|
||||||
|
pytest.skip("CatBoost is not supported on ARM")
|
||||||
|
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
freqai_conf.update({"strategy": "freqai_test_multimodel_strat"})
|
freqai_conf.update({"strategy": "freqai_test_multimodel_strat"})
|
||||||
freqai_conf.update({"freqaimodel": "LightGBMRegressorMultiTarget"})
|
freqai_conf.update({"freqaimodel": model})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -64,7 +83,8 @@ def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf):
|
|||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
freqai.extract_data_and_train_model(
|
||||||
|
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert len(freqai.dk.label_list) == 2
|
assert len(freqai.dk.label_list) == 2
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
||||||
@ -76,75 +96,17 @@ def test_train_model_in_series_LightGBMMultiModel(mocker, freqai_conf):
|
|||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
shutil.rmtree(Path(freqai.dk.full_path))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...")
|
@pytest.mark.parametrize('model', [
|
||||||
def test_train_model_in_series_Catboost(mocker, freqai_conf):
|
'LightGBMClassifier',
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
'CatboostClassifier',
|
||||||
freqai_conf.update({"freqaimodel": "CatboostRegressor"})
|
])
|
||||||
# freqai_conf.get('freqai', {}).update(
|
def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
||||||
# {'model_training_parameters': {"n_estimators": 100, "verbose": 0}})
|
if is_arm() and model == 'CatboostClassifier':
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
pytest.skip("CatBoost is not supported on ARM")
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
|
||||||
|
|
||||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
freqai_conf.update({"freqaimodel": model})
|
||||||
freqai = strategy.freqai
|
|
||||||
freqai.live = True
|
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
|
||||||
|
|
||||||
freqai.dd.pair_dict = MagicMock()
|
|
||||||
|
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC",
|
|
||||||
strategy, freqai.dk, data_load_timerange)
|
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(is_arm(), reason="no ARM for Catboost ...")
|
|
||||||
def test_train_model_in_series_CatboostClassifier(mocker, freqai_conf):
|
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
|
||||||
freqai_conf.update({"freqaimodel": "CatboostClassifier"})
|
|
||||||
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
|
||||||
|
|
||||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
|
||||||
freqai = strategy.freqai
|
|
||||||
freqai.live = True
|
|
||||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
|
||||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
|
||||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
|
||||||
|
|
||||||
freqai.dd.pair_dict = MagicMock()
|
|
||||||
|
|
||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC",
|
|
||||||
strategy, freqai.dk, data_load_timerange)
|
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists()
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists()
|
|
||||||
|
|
||||||
shutil.rmtree(Path(freqai.dk.full_path))
|
|
||||||
|
|
||||||
|
|
||||||
def test_train_model_in_series_LightGBMClassifier(mocker, freqai_conf):
|
|
||||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||||
freqai_conf.update({"freqaimodel": "LightGBMClassifier"})
|
|
||||||
freqai_conf.update({"strategy": "freqai_test_classifier"})
|
|
||||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||||
@ -161,8 +123,8 @@ def test_train_model_in_series_LightGBMClassifier(mocker, freqai_conf):
|
|||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC",
|
freqai.extract_data_and_train_model(new_timerange, "ADA/BTC",
|
||||||
strategy, freqai.dk, data_load_timerange)
|
strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").exists()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists()
|
||||||
@ -296,7 +258,8 @@ def test_follow_mode(mocker, freqai_conf):
|
|||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
freqai.extract_data_and_train_model(
|
||||||
|
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file()
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file()
|
||||||
@ -345,7 +308,8 @@ def test_principal_component_analysis(mocker, freqai_conf):
|
|||||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||||
|
|
||||||
freqai.train_model_in_series(new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
freqai.extract_data_and_train_model(
|
||||||
|
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||||
|
|
||||||
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_pca_object.pkl")
|
assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_pca_object.pkl")
|
||||||
|
|
||||||
|
@ -40,14 +40,14 @@ def test_text_table_bt_results():
|
|||||||
)
|
)
|
||||||
|
|
||||||
result_str = (
|
result_str = (
|
||||||
'| Pair | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % |'
|
'| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | '
|
||||||
' Avg Duration | Win Draw Loss Win% |\n'
|
'Tot Profit % | Avg Duration | Win Draw Loss Win% |\n'
|
||||||
'|---------+--------+----------------+----------------+------------------+----------------+'
|
'|---------+-----------+----------------+----------------+------------------+'
|
||||||
'----------------+-------------------------|\n'
|
'----------------+----------------+-------------------------|\n'
|
||||||
'| ETH/BTC | 3 | 8.33 | 25.00 | 0.50000000 | 12.50 |'
|
'| ETH/BTC | 3 | 8.33 | 25.00 | 0.50000000 | '
|
||||||
' 0:20:00 | 2 0 1 66.7 |\n'
|
'12.50 | 0:20:00 | 2 0 1 66.7 |\n'
|
||||||
'| TOTAL | 3 | 8.33 | 25.00 | 0.50000000 | 12.50 |'
|
'| TOTAL | 3 | 8.33 | 25.00 | 0.50000000 | '
|
||||||
' 0:20:00 | 2 0 1 66.7 |'
|
'12.50 | 0:20:00 | 2 0 1 66.7 |'
|
||||||
)
|
)
|
||||||
|
|
||||||
pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC',
|
pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC',
|
||||||
@ -402,13 +402,13 @@ def test_text_table_strategy(testdatadir):
|
|||||||
bt_res_data_comparison = bt_res_data.pop('strategy_comparison')
|
bt_res_data_comparison = bt_res_data.pop('strategy_comparison')
|
||||||
|
|
||||||
result_str = (
|
result_str = (
|
||||||
'| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit BTC |'
|
'| Strategy | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC |'
|
||||||
' Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n'
|
' Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n'
|
||||||
'|----------------+--------+----------------+----------------+------------------+'
|
'|----------------+-----------+----------------+----------------+------------------+'
|
||||||
'----------------+----------------+-------------------------+-----------------------|\n'
|
'----------------+----------------+-------------------------+-----------------------|\n'
|
||||||
'| StrategyTestV2 | 179 | 0.08 | 14.39 | 0.02608550 |'
|
'| StrategyTestV2 | 179 | 0.08 | 14.39 | 0.02608550 |'
|
||||||
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n'
|
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n'
|
||||||
'| TestStrategy | 179 | 0.08 | 14.39 | 0.02608550 |'
|
'| TestStrategy | 179 | 0.08 | 14.39 | 0.02608550 |'
|
||||||
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |'
|
' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2138,11 +2138,11 @@ def test_send_msg_strategy_msg_notification(default_conf, mocker) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def test_send_msg_unknown_type(default_conf, mocker) -> None:
|
def test_send_msg_unknown_type(default_conf, mocker) -> None:
|
||||||
telegram, _, _ = get_telegram_testobject(mocker, default_conf)
|
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
|
||||||
with pytest.raises(NotImplementedError, match=r'Unknown message type: None'):
|
telegram.send_msg({
|
||||||
telegram.send_msg({
|
'type': None,
|
||||||
'type': None,
|
})
|
||||||
})
|
msg_mock.call_count == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
|
@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
|
||||||
|
@ -11,8 +11,7 @@ from pandas import DataFrame
|
|||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.data.history import load_data
|
from freqtrade.data.history import load_data
|
||||||
from freqtrade.enums import ExitCheckTuple, ExitType, SignalDirection
|
from freqtrade.enums import ExitCheckTuple, ExitType, HyperoptState, SignalDirection
|
||||||
from freqtrade.enums.hyperoptstate import HyperoptState
|
|
||||||
from freqtrade.exceptions import OperationalException, StrategyError
|
from freqtrade.exceptions import OperationalException, StrategyError
|
||||||
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
||||||
from freqtrade.optimize.space import SKDecimal
|
from freqtrade.optimize.space import SKDecimal
|
||||||
|
@ -117,6 +117,29 @@ def test_merge_informative_pair_lower():
|
|||||||
merge_informative_pair(data, informative, '1h', '15m', ffill=True)
|
merge_informative_pair(data, informative, '1h', '15m', ffill=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_informative_pair_suffix():
|
||||||
|
data = generate_test_data('15m', 20)
|
||||||
|
informative = generate_test_data('1h', 20)
|
||||||
|
|
||||||
|
result = merge_informative_pair(data, informative, '15m', '1h',
|
||||||
|
append_timeframe=False, suffix="suf")
|
||||||
|
|
||||||
|
assert 'date' in result.columns
|
||||||
|
assert result['date'].equals(data['date'])
|
||||||
|
assert 'date_suf' in result.columns
|
||||||
|
|
||||||
|
assert 'open_suf' in result.columns
|
||||||
|
assert 'open_1h' not in result.columns
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_informative_pair_suffix_append_timeframe():
|
||||||
|
data = generate_test_data('15m', 20)
|
||||||
|
informative = generate_test_data('1h', 20)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=r"You can not specify `append_timeframe` .*"):
|
||||||
|
merge_informative_pair(data, informative, '15m', '1h', suffix="suf")
|
||||||
|
|
||||||
|
|
||||||
def test_stoploss_from_open():
|
def test_stoploss_from_open():
|
||||||
open_price_ranges = [
|
open_price_ranges = [
|
||||||
[0.01, 1.00, 30],
|
[0.01, 1.00, 30],
|
||||||
|
@ -506,7 +506,7 @@ def test_create_trades_multiple_trades(
|
|||||||
|
|
||||||
|
|
||||||
def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
|
def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
|
||||||
limit_buy_order_usdt_open) -> None:
|
limit_buy_order_usdt_open, caplog) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
default_conf_usdt['max_open_trades'] = 4
|
default_conf_usdt['max_open_trades'] = 4
|
||||||
@ -515,6 +515,7 @@ def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
|
|||||||
fetch_ticker=ticker_usdt,
|
fetch_ticker=ticker_usdt,
|
||||||
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
|
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
|
||||||
get_fee=fee,
|
get_fee=fee,
|
||||||
|
get_funding_fees=MagicMock(side_effect=ExchangeError()),
|
||||||
)
|
)
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
patch_get_signal(freqtrade)
|
patch_get_signal(freqtrade)
|
||||||
@ -522,6 +523,7 @@ def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
|
|||||||
# Create 2 existing trades
|
# Create 2 existing trades
|
||||||
freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount'])
|
freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount'])
|
||||||
freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount'])
|
freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount'])
|
||||||
|
assert log_has("Could not find funding fee.", caplog)
|
||||||
|
|
||||||
assert len(Trade.get_open_trades()) == 2
|
assert len(Trade.get_open_trades()) == 2
|
||||||
# Change order_id for new orders
|
# Change order_id for new orders
|
||||||
@ -3655,6 +3657,7 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit(
|
|||||||
assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value
|
assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value
|
||||||
assert rpc_mock.call_count == 3
|
assert rpc_mock.call_count == 3
|
||||||
assert rpc_mock.call_args_list[0][0][0]['type'] == RPCMessageType.ENTRY
|
assert rpc_mock.call_args_list[0][0][0]['type'] == RPCMessageType.ENTRY
|
||||||
|
assert rpc_mock.call_args_list[0][0][0]['amount'] > 20
|
||||||
assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY_FILL
|
assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY_FILL
|
||||||
assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.EXIT_FILL
|
assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.EXIT_FILL
|
||||||
|
|
||||||
@ -3665,7 +3668,7 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit(
|
|||||||
(True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'),
|
(True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'),
|
||||||
])
|
])
|
||||||
def test_execute_trade_exit_market_order(
|
def test_execute_trade_exit_market_order(
|
||||||
default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount,
|
default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, caplog,
|
||||||
limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker
|
limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@ -3693,6 +3696,7 @@ def test_execute_trade_exit_market_order(
|
|||||||
fetch_ticker=ticker_usdt,
|
fetch_ticker=ticker_usdt,
|
||||||
get_fee=fee,
|
get_fee=fee,
|
||||||
_is_dry_limit_order_filled=MagicMock(return_value=True),
|
_is_dry_limit_order_filled=MagicMock(return_value=True),
|
||||||
|
get_funding_fees=MagicMock(side_effect=ExchangeError()),
|
||||||
)
|
)
|
||||||
patch_whitelist(mocker, default_conf_usdt)
|
patch_whitelist(mocker, default_conf_usdt)
|
||||||
freqtrade = FreqtradeBot(default_conf_usdt)
|
freqtrade = FreqtradeBot(default_conf_usdt)
|
||||||
@ -3718,6 +3722,7 @@ def test_execute_trade_exit_market_order(
|
|||||||
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
|
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
|
||||||
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
|
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
|
||||||
)
|
)
|
||||||
|
assert log_has("Could not update funding fee.", caplog)
|
||||||
|
|
||||||
assert not trade.is_open
|
assert not trade.is_open
|
||||||
assert pytest.approx(trade.close_profit) == profit_ratio
|
assert pytest.approx(trade.close_profit) == profit_ratio
|
||||||
@ -5429,6 +5434,16 @@ def test_update_funding_fees(
|
|||||||
))
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_funding_fees_error(mocker, default_conf, caplog):
|
||||||
|
mocker.patch('freqtrade.exchange.Exchange.get_funding_fees', side_effect=ExchangeError())
|
||||||
|
default_conf['trading_mode'] = 'futures'
|
||||||
|
default_conf['margin_mode'] = 'isolated'
|
||||||
|
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
||||||
|
freqtrade.update_funding_fees()
|
||||||
|
|
||||||
|
log_has("Could not update funding fees for open trades.", caplog)
|
||||||
|
|
||||||
|
|
||||||
def test_position_adjust(mocker, default_conf_usdt, fee) -> None:
|
def test_position_adjust(mocker, default_conf_usdt, fee) -> None:
|
||||||
patch_RPCManager(mocker)
|
patch_RPCManager(mocker)
|
||||||
patch_exchange(mocker)
|
patch_exchange(mocker)
|
||||||
|
@ -485,7 +485,7 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non
|
|||||||
assert len(trade.orders) == 1
|
assert len(trade.orders) == 1
|
||||||
assert pytest.approx(trade.stake_amount) == 60
|
assert pytest.approx(trade.stake_amount) == 60
|
||||||
assert pytest.approx(trade.amount) == 30.0
|
assert pytest.approx(trade.amount) == 30.0
|
||||||
assert log_has_re("Remaining amount of 1.6.* would be too small.", caplog)
|
assert log_has_re("Remaining amount of 1.6.* would be smaller than the minimum of 10.", caplog)
|
||||||
|
|
||||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20)
|
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-20)
|
||||||
|
|
||||||
@ -504,9 +504,21 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog) -> Non
|
|||||||
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-50)
|
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-50)
|
||||||
freqtrade.process()
|
freqtrade.process()
|
||||||
assert log_has_re("Adjusting amount to trade.amount as it is higher.*", caplog)
|
assert log_has_re("Adjusting amount to trade.amount as it is higher.*", caplog)
|
||||||
assert log_has_re("Remaining amount of 0.0 would be too small.", caplog)
|
assert log_has_re("Remaining amount of 0.0 would be smaller than the minimum of 10.", caplog)
|
||||||
trade = Trade.get_trades().first()
|
trade = Trade.get_trades().first()
|
||||||
assert len(trade.orders) == 2
|
assert len(trade.orders) == 2
|
||||||
assert trade.orders[-1].ft_order_side == 'sell'
|
assert trade.orders[-1].ft_order_side == 'sell'
|
||||||
assert pytest.approx(trade.stake_amount) == 40.198
|
assert pytest.approx(trade.stake_amount) == 40.198
|
||||||
assert trade.is_open
|
assert trade.is_open
|
||||||
|
|
||||||
|
# use amount that would trunc to 0.0 once selling
|
||||||
|
mocker.patch("freqtrade.exchange.Exchange.amount_to_contract_precision",
|
||||||
|
lambda s, p, v: round(v, 1))
|
||||||
|
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-0.01)
|
||||||
|
freqtrade.process()
|
||||||
|
trade = Trade.get_trades().first()
|
||||||
|
assert len(trade.orders) == 2
|
||||||
|
assert trade.orders[-1].ft_order_side == 'sell'
|
||||||
|
assert pytest.approx(trade.stake_amount) == 40.198
|
||||||
|
assert trade.is_open
|
||||||
|
assert log_has_re('Amount to sell is 0.0 due to exchange limits - not selling.', caplog)
|
||||||
|
@ -9,7 +9,7 @@ import arrow
|
|||||||
import pytest
|
import pytest
|
||||||
from sqlalchemy import create_engine, text
|
from sqlalchemy import create_engine, text
|
||||||
|
|
||||||
from freqtrade import constants
|
from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_DB_PROD_URL
|
||||||
from freqtrade.enums import TradingMode
|
from freqtrade.enums import TradingMode
|
||||||
from freqtrade.exceptions import DependencyException, OperationalException
|
from freqtrade.exceptions import DependencyException, OperationalException
|
||||||
from freqtrade.persistence import LocalTrade, Order, Trade, init_db
|
from freqtrade.persistence import LocalTrade, Order, Trade, init_db
|
||||||
@ -52,7 +52,7 @@ def test_init_invalid_db_url():
|
|||||||
|
|
||||||
def test_init_prod_db(default_conf, mocker):
|
def test_init_prod_db(default_conf, mocker):
|
||||||
default_conf.update({'dry_run': False})
|
default_conf.update({'dry_run': False})
|
||||||
default_conf.update({'db_url': constants.DEFAULT_DB_PROD_URL})
|
default_conf.update({'db_url': DEFAULT_DB_PROD_URL})
|
||||||
|
|
||||||
create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock())
|
create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock())
|
||||||
|
|
||||||
@ -615,21 +615,25 @@ def test_calc_open_close_trade_price(
|
|||||||
is_short=is_short,
|
is_short=is_short,
|
||||||
leverage=lev,
|
leverage=lev,
|
||||||
trading_mode=trading_mode,
|
trading_mode=trading_mode,
|
||||||
funding_fees=funding_fees
|
|
||||||
)
|
)
|
||||||
entry_order = limit_order[trade.entry_side]
|
entry_order = limit_order[trade.entry_side]
|
||||||
exit_order = limit_order[trade.exit_side]
|
exit_order = limit_order[trade.exit_side]
|
||||||
trade.open_order_id = f'something-{is_short}-{lev}-{exchange}'
|
trade.open_order_id = f'something-{is_short}-{lev}-{exchange}'
|
||||||
|
|
||||||
oobj = Order.parse_from_ccxt_object(entry_order, 'ADA/USDT', trade.entry_side)
|
oobj = Order.parse_from_ccxt_object(entry_order, 'ADA/USDT', trade.entry_side)
|
||||||
trade.orders.append(oobj)
|
oobj.trade = trade
|
||||||
|
oobj.update_from_ccxt_object(entry_order)
|
||||||
trade.update_trade(oobj)
|
trade.update_trade(oobj)
|
||||||
|
|
||||||
|
trade.funding_fees = funding_fees
|
||||||
|
|
||||||
oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', trade.exit_side)
|
oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', trade.exit_side)
|
||||||
trade.orders.append(oobj)
|
oobj.trade = trade
|
||||||
|
oobj.update_from_ccxt_object(exit_order)
|
||||||
trade.update_trade(oobj)
|
trade.update_trade(oobj)
|
||||||
|
|
||||||
assert trade.is_open is False
|
assert trade.is_open is False
|
||||||
|
assert trade.funding_fees == funding_fees
|
||||||
|
|
||||||
assert pytest.approx(trade._calc_open_trade_value(trade.amount, trade.open_rate)) == open_value
|
assert pytest.approx(trade._calc_open_trade_value(trade.amount, trade.open_rate)) == open_value
|
||||||
assert pytest.approx(trade.calc_close_trade_value(trade.close_rate)) == close_value
|
assert pytest.approx(trade.calc_close_trade_value(trade.close_rate)) == close_value
|
||||||
@ -1735,7 +1739,7 @@ def test_to_json(fee):
|
|||||||
'base_currency': 'ADA',
|
'base_currency': 'ADA',
|
||||||
'quote_currency': 'USDT',
|
'quote_currency': 'USDT',
|
||||||
'is_open': None,
|
'is_open': None,
|
||||||
'open_date': trade.open_date.strftime("%Y-%m-%d %H:%M:%S"),
|
'open_date': trade.open_date.strftime(DATETIME_PRINT_FORMAT),
|
||||||
'open_timestamp': int(trade.open_date.timestamp() * 1000),
|
'open_timestamp': int(trade.open_date.timestamp() * 1000),
|
||||||
'open_order_id': 'dry_run_buy_12345',
|
'open_order_id': 'dry_run_buy_12345',
|
||||||
'close_date': None,
|
'close_date': None,
|
||||||
@ -1813,9 +1817,9 @@ def test_to_json(fee):
|
|||||||
'pair': 'XRP/BTC',
|
'pair': 'XRP/BTC',
|
||||||
'base_currency': 'XRP',
|
'base_currency': 'XRP',
|
||||||
'quote_currency': 'BTC',
|
'quote_currency': 'BTC',
|
||||||
'open_date': trade.open_date.strftime("%Y-%m-%d %H:%M:%S"),
|
'open_date': trade.open_date.strftime(DATETIME_PRINT_FORMAT),
|
||||||
'open_timestamp': int(trade.open_date.timestamp() * 1000),
|
'open_timestamp': int(trade.open_date.timestamp() * 1000),
|
||||||
'close_date': trade.close_date.strftime("%Y-%m-%d %H:%M:%S"),
|
'close_date': trade.close_date.strftime(DATETIME_PRINT_FORMAT),
|
||||||
'close_timestamp': int(trade.close_date.timestamp() * 1000),
|
'close_timestamp': int(trade.close_date.timestamp() * 1000),
|
||||||
'open_rate': 0.123,
|
'open_rate': 0.123,
|
||||||
'close_rate': 0.125,
|
'close_rate': 0.125,
|
||||||
|
Loading…
Reference in New Issue
Block a user