diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 03a1ab0f4..6b7d7cc29 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,6 @@ name: Freqtrade CI on: push: branches: - - master - stable - develop tags: @@ -20,7 +19,7 @@ jobs: strategy: matrix: os: [ ubuntu-18.04, ubuntu-20.04 ] - python-version: [3.7, 3.8, 3.9] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v2 @@ -39,7 +38,7 @@ jobs: - name: pip cache (linux) uses: actions/cache@v2 - if: startsWith(matrix.os, 'ubuntu') + if: runner.os == 'Linux' with: path: ~/.cache/pip key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip @@ -50,8 +49,9 @@ jobs: cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd .. - name: Installation - *nix + if: runner.os == 'Linux' run: | - python -m pip install --upgrade pip + python -m pip install --upgrade pip wheel export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH export TA_LIBRARY_PATH=${HOME}/dependencies/lib export TA_INCLUDE_PATH=${HOME}/dependencies/include @@ -69,7 +69,7 @@ jobs: if: matrix.python-version == '3.9' - name: Coveralls - if: (startsWith(matrix.os, 'ubuntu-20') && matrix.python-version == '3.8') + if: (runner.os == 'Linux' && matrix.python-version == '3.8') env: # Coveralls token. Not used as secret due to github not providing secrets to forked repositories COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu @@ -114,7 +114,7 @@ jobs: strategy: matrix: os: [ macos-latest ] - python-version: [3.7, 3.8, 3.9] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v2 @@ -133,7 +133,7 @@ jobs: - name: pip cache (macOS) uses: actions/cache@v2 - if: startsWith(matrix.os, 'macOS') + if: runner.os == 'macOS' with: path: ~/Library/Caches/pip key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip @@ -144,10 +144,11 @@ jobs: cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd .. - name: Installation - macOS + if: runner.os == 'macOS' run: | brew update brew install hdf5 c-blosc - python -m pip install --upgrade pip + python -m pip install --upgrade pip wheel export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH export TA_LIBRARY_PATH=${HOME}/dependencies/lib export TA_INCLUDE_PATH=${HOME}/dependencies/include @@ -159,7 +160,7 @@ jobs: pytest --random-order --cov=freqtrade --cov-config=.coveragerc - name: Coveralls - if: (startsWith(matrix.os, 'ubuntu-20') && matrix.python-version == '3.8') + if: (runner.os == 'Linux' && matrix.python-version == '3.8') env: # Coveralls token. Not used as secret due to github not providing secrets to forked repositories COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu @@ -205,7 +206,7 @@ jobs: strategy: matrix: os: [ windows-latest ] - python-version: [3.7, 3.8] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v2 @@ -217,7 +218,6 @@ jobs: - name: Pip cache (Windows) uses: actions/cache@preview - if: startsWith(runner.os, 'Windows') with: path: ~\AppData\Local\pip\Cache key: ${{ matrix.os }}-${{ matrix.python-version }}-pip diff --git a/Dockerfile b/Dockerfile index 8f5b85698..1d283e5c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.9.9-slim-bullseye as base +FROM python:3.10.0-slim-bullseye as base # Setup env ENV LANG C.UTF-8 diff --git a/README.md b/README.md index 9882bce02..3a7d42fe9 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ To run this bot we recommend you a cloud instance with a minimum of: ### Software requirements -- [Python 3.7.x](http://docs.python-guide.org/en/latest/starting/installation/) +- [Python >= 3.7](http://docs.python-guide.org/en/latest/starting/installation/) - [pip](https://pip.pypa.io/en/stable/installing/) - [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - [TA-Lib](https://mrjbq7.github.io/ta-lib/install.html) diff --git a/build_helpers/TA_Lib-0.4.22-cp310-cp310-win_amd64.whl b/build_helpers/TA_Lib-0.4.22-cp310-cp310-win_amd64.whl new file mode 100644 index 000000000..d3477abd1 Binary files /dev/null and b/build_helpers/TA_Lib-0.4.22-cp310-cp310-win_amd64.whl differ diff --git a/build_helpers/install_windows.ps1 b/build_helpers/install_windows.ps1 index cda87e98d..f04869780 100644 --- a/build_helpers/install_windows.ps1 +++ b/build_helpers/install_windows.ps1 @@ -1,7 +1,7 @@ # Downloads don't work automatically, since the URL is regenerated via javascript. # Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib -python -m pip install --upgrade pip +python -m pip install --upgrade pip wheel $pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" @@ -14,6 +14,8 @@ if ($pyv -eq '3.8') { if ($pyv -eq '3.9') { pip install build_helpers\TA_Lib-0.4.22-cp39-cp39-win_amd64.whl } - +if ($pyv -eq '3.10') { + pip install build_helpers\TA_Lib-0.4.22-cp310-cp310-win_amd64.whl +} pip install -r requirements-dev.txt pip install -e . diff --git a/docs/advanced-setup.md b/docs/advanced-setup.md index 02b0307e5..93a2025ed 100644 --- a/docs/advanced-setup.md +++ b/docs/advanced-setup.md @@ -176,12 +176,15 @@ Log messages are send to `syslog` with the `user` facility. So you can see them On many systems `syslog` (`rsyslog`) fetches data from `journald` (and vice versa), so both `--logfile syslog` or `--logfile journald` can be used and the messages be viewed with both `journalctl` and a syslog viewer utility. You can combine this in any way which suites you better. For `rsyslog` the messages from the bot can be redirected into a separate dedicated log file. To achieve this, add + ``` if $programname startswith "freqtrade" then -/var/log/freqtrade.log ``` + to one of the rsyslog configuration files, for example at the end of the `/etc/rsyslog.d/50-default.conf`. For `syslog` (`rsyslog`), the reduction mode can be switched on. This will reduce the number of repeating messages. For instance, multiple bot Heartbeat messages will be reduced to a single message when nothing else happens with the bot. To achieve this, set in `/etc/rsyslog.conf`: + ``` # Filter duplicated messages $RepeatedMsgReduction on diff --git a/docs/assets/plot-profit.png b/docs/assets/plot-profit.png index 88d69a2d4..e9fe6c341 100644 Binary files a/docs/assets/plot-profit.png and b/docs/assets/plot-profit.png differ diff --git a/docs/backtesting.md b/docs/backtesting.md index a49e4700a..ad62c84b3 100644 --- a/docs/backtesting.md +++ b/docs/backtesting.md @@ -484,8 +484,8 @@ Since backtesting lacks some detailed information about what happens within a ca - ROI applies before trailing-stop, ensuring profits are "top-capped" at ROI if both ROI and trailing stop applies - Sell-reason does not explain if a trade was positive or negative, just what triggered the sell (this can look odd if negative ROI values are used) - Evaluation sequence (if multiple signals happen on the same candle) - - ROI (if not stoploss) - Sell-signal + - ROI (if not stoploss) - Stoploss Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode. diff --git a/docs/installation.md b/docs/installation.md index f3955ec5a..c67eff60b 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -56,10 +56,6 @@ OS Specific steps are listed first, the [Common](#common) section below is neces !!! Note Python3.7 or higher and the corresponding pip are assumed to be available. -!!! Warning "Python 3.10 support" - Due to issues with dependencies, freqtrade is currently unable to support python 3.10. - We're working on supporting python 3.10, are however dependant on support from dependencies. - === "Debian/Ubuntu" #### Install necessary dependencies @@ -424,16 +420,3 @@ open /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10 ``` If this file is inexistent, then you're probably on a different version of MacOS, so you may need to consult the internet for specific resolution details. - -### MacOS installation error with python 3.9 - -When using python 3.9 on macOS, it's currently necessary to install some os-level modules to allow dependencies to compile. -The errors you'll see happen during installation and are related to the installation of `tables` or `blosc`. - -You can install the necessary libraries with the following command: - -```bash -brew install hdf5 c-blosc -``` - -After this, please run the installation (script) again. diff --git a/docs/plotting.md b/docs/plotting.md index 38635ae6e..a812f2429 100644 --- a/docs/plotting.md +++ b/docs/plotting.md @@ -286,6 +286,8 @@ The `plot-profit` subcommand shows an interactive graph with three plots: * The summarized profit made by backtesting. Note that this is not the real-world profit, but more of an estimate. * Profit for each individual pair. +* Parallelism of trades. +* Underwater (Periods of drawdown). The first graph is good to get a grip of how the overall market progresses. @@ -295,6 +297,8 @@ This graph will also highlight the start (and end) of the Max drawdown period. The third graph can be useful to spot outliers, events in pairs that cause profit spikes. +The forth graph can help you analyze trade parallelism, showing how often max_open_trades have been maxed out. + Possible options for the `freqtrade plot-profit` subcommand: ``` diff --git a/docs/windows_installation.md b/docs/windows_installation.md index 965dde485..f4be06db3 100644 --- a/docs/windows_installation.md +++ b/docs/windows_installation.md @@ -25,7 +25,7 @@ Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7 As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), there is also a repository of unofficial pre-compiled windows Wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib), which need to be downloaded and installed using `pip install TA_Lib‑0.4.22‑cp38‑cp38‑win_amd64.whl` (make sure to use the version matching your python version). -Freqtrade provides these dependencies for the latest 3 Python versions (3.7, 3.8 and 3.9) and for 64bit Windows. +Freqtrade provides these dependencies for the latest 3 Python versions (3.7, 3.8, 3.9 and 3.10) and for 64bit Windows. Other versions must be downloaded from the above link. ``` powershell diff --git a/freqtrade/configuration/PeriodicCache.py b/freqtrade/configuration/PeriodicCache.py index 25c0c47f3..64fff668e 100644 --- a/freqtrade/configuration/PeriodicCache.py +++ b/freqtrade/configuration/PeriodicCache.py @@ -1,6 +1,6 @@ from datetime import datetime, timezone -from cachetools.ttl import TTLCache +from cachetools import TTLCache class PeriodicCache(TTLCache): diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py index 7d97661c4..56c16f966 100644 --- a/freqtrade/data/btanalysis.py +++ b/freqtrade/data/btanalysis.py @@ -325,6 +325,7 @@ def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame], :param column: Column in the original dataframes to use :return: DataFrame with the column renamed to the dict key, and a column named mean, containing the mean of all pairs. + :raise: ValueError if no data is provided. """ df_comb = pd.concat([data[pair].set_index('date').rename( {column: pair}, axis=1)[pair] for pair in data], axis=1) @@ -360,6 +361,36 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str, return df +def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str + ) -> pd.DataFrame: + max_drawdown_df = pd.DataFrame() + max_drawdown_df['cumulative'] = profit_results[value_col].cumsum() + max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax() + max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value'] + max_drawdown_df['date'] = profit_results.loc[:, date_col] + return max_drawdown_df + + +def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date', + value_col: str = 'profit_ratio' + ): + """ + Calculate max drawdown and the corresponding close dates + :param trades: DataFrame containing trades (requires columns close_date and profit_ratio) + :param date_col: Column in DataFrame to use for dates (defaults to 'close_date') + :param value_col: Column in DataFrame to use for values (defaults to 'profit_ratio') + :return: Tuple (float, highdate, lowdate, highvalue, lowvalue) with absolute max drawdown, + high and low time and high and low value. + :raise: ValueError if trade-dataframe was found empty. + """ + if len(trades) == 0: + raise ValueError("Trade dataframe empty.") + profit_results = trades.sort_values(date_col).reset_index(drop=True) + max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col) + + return max_drawdown_df + + def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date', value_col: str = 'profit_ratio' ) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float]: @@ -375,10 +406,7 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date' if len(trades) == 0: raise ValueError("Trade dataframe empty.") profit_results = trades.sort_values(date_col).reset_index(drop=True) - max_drawdown_df = pd.DataFrame() - max_drawdown_df['cumulative'] = profit_results[value_col].cumsum() - max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax() - max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value'] + max_drawdown_df = _calc_drawdown_series(profit_results, date_col=date_col, value_col=value_col) idxmin = max_drawdown_df['drawdown'].idxmin() if idxmin == 0: diff --git a/freqtrade/data/history/idatahandler.py b/freqtrade/data/history/idatahandler.py index 578d0b5bf..cb02f98e3 100644 --- a/freqtrade/data/history/idatahandler.py +++ b/freqtrade/data/history/idatahandler.py @@ -201,7 +201,7 @@ class IDataHandler(ABC): enddate = pairdf.iloc[-1]['date'] if timerange_startup: - self._validate_pairdata(pair, pairdf, timerange_startup) + self._validate_pairdata(pair, pairdf, timeframe, timerange_startup) pairdf = trim_dataframe(pairdf, timerange_startup) if self._check_empty_df(pairdf, pair, timeframe, warn_no_data): return pairdf @@ -228,7 +228,7 @@ class IDataHandler(ABC): return True return False - def _validate_pairdata(self, pair, pairdata: DataFrame, timerange: TimeRange): + def _validate_pairdata(self, pair, pairdata: DataFrame, timeframe: str, timerange: TimeRange): """ Validates pairdata for missing data at start end end and logs warnings. :param pairdata: Dataframe to validate @@ -238,12 +238,12 @@ class IDataHandler(ABC): if timerange.starttype == 'date': start = datetime.fromtimestamp(timerange.startts, tz=timezone.utc) if pairdata.iloc[0]['date'] > start: - logger.warning(f"Missing data at start for pair {pair}, " + logger.warning(f"Missing data at start for pair {pair} at {timeframe}, " f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}") if timerange.stoptype == 'date': stop = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc) if pairdata.iloc[-1]['date'] < stop: - logger.warning(f"Missing data at end for pair {pair}, " + logger.warning(f"Missing data at end for pair {pair} at {timeframe}, " f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}") diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py index a4c827e07..3916ee8f7 100644 --- a/freqtrade/exchange/common.py +++ b/freqtrade/exchange/common.py @@ -4,9 +4,20 @@ import time from functools import wraps from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError +from freqtrade.mixins import LoggingMixin logger = logging.getLogger(__name__) +__logging_mixin = None + + +def _get_logging_mixin(): + # Logging-mixin to cache kucoin responses + # Only to be used in retrier + global __logging_mixin + if not __logging_mixin: + __logging_mixin = LoggingMixin(logger) + return __logging_mixin # Maximum default retry count. @@ -72,28 +83,33 @@ def calculate_backoff(retrycount, max_retries): def retrier_async(f): async def wrapper(*args, **kwargs): count = kwargs.pop('count', API_RETRY_COUNT) + kucoin = args[0].name == "Kucoin" # Check if the exchange is KuCoin. try: return await f(*args, **kwargs) except TemporaryError as ex: - logger.warning('%s() returned exception: "%s"', f.__name__, ex) + msg = f'{f.__name__}() returned exception: "{ex}". ' if count > 0: - logger.warning('retrying %s() still for %s times', f.__name__, count) + msg += f'Retrying still for {count} times.' count -= 1 - kwargs.update({'count': count}) + kwargs['count'] = count if isinstance(ex, DDosProtection): - if "kucoin" in str(ex) and "429000" in str(ex): + if kucoin and "429000" in str(ex): # Temporary fix for 429000 error on kucoin # see https://github.com/freqtrade/freqtrade/issues/5700 for details. - logger.warning( + _get_logging_mixin().log_once( f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. " - f"{count} tries left before giving up") + f"{count} tries left before giving up", logmethod=logger.warning) + # Reset msg to avoid logging too many times. + msg = '' else: backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT) logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}") await asyncio.sleep(backoff_delay) + if msg: + logger.warning(msg) return await wrapper(*args, **kwargs) else: - logger.warning('Giving up retrying: %s()', f.__name__) + logger.warning(msg + 'Giving up.') raise ex return wrapper @@ -106,9 +122,9 @@ def retrier(_func=None, retries=API_RETRY_COUNT): try: return f(*args, **kwargs) except (TemporaryError, RetryableOrderError) as ex: - logger.warning('%s() returned exception: "%s"', f.__name__, ex) + msg = f'{f.__name__}() returned exception: "{ex}". ' if count > 0: - logger.warning('retrying %s() still for %s times', f.__name__, count) + logger.warning(msg + f'Retrying still for {count} times.') count -= 1 kwargs.update({'count': count}) if isinstance(ex, (DDosProtection, RetryableOrderError)): @@ -118,7 +134,7 @@ def retrier(_func=None, retries=API_RETRY_COUNT): time.sleep(backoff_delay) return wrapper(*args, **kwargs) else: - logger.warning('Giving up retrying: %s()', f.__name__) + logger.warning(msg + 'Giving up.') raise ex return wrapper # Support both @retrier and @retrier(retries=2) syntax diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index 813938f99..8bd9db9f6 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -83,6 +83,8 @@ class Exchange: self._api: ccxt.Exchange = None self._api_async: ccxt_async.Exchange = None self._markets: Dict = {} + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) self._config.update(config) @@ -170,8 +172,10 @@ class Exchange: def close(self): logger.debug("Exchange object destroyed, closing async loop") - if self._api_async and inspect.iscoroutinefunction(self._api_async.close): - asyncio.get_event_loop().run_until_complete(self._api_async.close()) + if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) + and self._api_async.session): + logger.info("Closing async ccxt session.") + self.loop.run_until_complete(self._api_async.close()) def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt, ccxt_kwargs: Dict = {}) -> ccxt.Exchange: @@ -326,7 +330,7 @@ class Exchange: def _load_async_markets(self, reload: bool = False) -> None: try: if self._api_async: - asyncio.get_event_loop().run_until_complete( + self.loop.run_until_complete( self._api_async.load_markets(reload=reload)) except (asyncio.TimeoutError, ccxt.BaseError) as e: @@ -1227,7 +1231,7 @@ class Exchange: :param since_ms: Timestamp in milliseconds to get history from :return: List with candle (OHLCV) data """ - pair, timeframe, data = asyncio.get_event_loop().run_until_complete( + pair, timeframe, data = self.loop.run_until_complete( self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe, since_ms=since_ms, is_new_pair=is_new_pair)) logger.info(f"Downloaded data for {pair} with length {len(data)}.") @@ -1329,8 +1333,10 @@ class Exchange: results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): - results = asyncio.get_event_loop().run_until_complete( - asyncio.gather(*input_coro, return_exceptions=True)) + async def gather_stuff(): + return await asyncio.gather(*input_coro, return_exceptions=True) + + results = self.loop.run_until_complete(gather_stuff()) # handle caching for res in results: @@ -1566,7 +1572,7 @@ class Exchange: if not self.exchange_has("fetchTrades"): raise OperationalException("This exchange does not support downloading Trades.") - return asyncio.get_event_loop().run_until_complete( + return self.loop.run_until_complete( self._async_get_trade_history(pair=pair, since=since, until=until, from_id=from_id)) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index f4342fe05..0ef2d8fe1 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -126,6 +126,7 @@ class FreqtradeBot(LoggingMixin): self.rpc.cleanup() cleanup_db() + self.exchange.close() def startup(self) -> None: """ diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 70293e8b9..4cdb3abfb 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -246,6 +246,9 @@ class Backtesting: Helper function to convert a processed dataframes into lists for performance reasons. Used by backtest() - so keep this optimized for performance. + + :param processed: a processed dictionary with format {pair, data}, which gets cleared to + optimize memory usage! """ # Every change to this headers list must evaluate further usages of the resulting tuple # and eventually change the constants for indexes at the top @@ -254,7 +257,8 @@ class Backtesting: self.progress.init_step(BacktestState.CONVERT, len(processed)) # Create dict with data - for pair, pair_data in processed.items(): + for pair in processed.keys(): + pair_data = processed[pair] self.check_abort() self.progress.increment() if not pair_data.empty: @@ -283,6 +287,9 @@ class Backtesting: # Convert from Pandas to list for performance reasons # (Looping Pandas is slow.) data[pair] = df_analyzed[headers].values.tolist() + + # Do not hold on to old data to reduce memory usage + processed[pair] = pair_data = None return data def _get_close_rate(self, sell_row: Tuple, trade: LocalTrade, sell: SellCheckTuple, @@ -571,7 +578,8 @@ class Backtesting: Of course try to not have ugly code. By some accessor are sometime slower than functions. Avoid extensive logging in this method and functions it calls. - :param processed: a processed dictionary with format {pair, data} + :param processed: a processed dictionary with format {pair, data}, which gets cleared to + optimize memory usage! :param start_date: backtesting timerange start datetime :param end_date: backtesting timerange end datetime :param max_open_trades: maximum number of concurrent trades, <= 0 means unlimited diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 2c7cc0ea7..58da7d0d5 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -422,6 +422,7 @@ class Hyperopt: self.backtesting.exchange.close() self.backtesting.exchange._api = None # type: ignore self.backtesting.exchange._api_async = None # type: ignore + self.backtesting.exchange.loop = None # type: ignore # self.backtesting.exchange = None # type: ignore self.backtesting.pairlists = None # type: ignore diff --git a/freqtrade/plot/plotting.py b/freqtrade/plot/plotting.py index 6d44d56b1..c0888808f 100644 --- a/freqtrade/plot/plotting.py +++ b/freqtrade/plot/plotting.py @@ -5,7 +5,8 @@ from typing import Any, Dict, List import pandas as pd from freqtrade.configuration import TimeRange -from freqtrade.data.btanalysis import (calculate_max_drawdown, combine_dataframes_with_mean, +from freqtrade.data.btanalysis import (analyze_trade_parallelism, calculate_max_drawdown, + calculate_underwater, combine_dataframes_with_mean, create_cum_profit, extract_trades_of_period, load_trades) from freqtrade.data.converter import trim_dataframe from freqtrade.data.dataprovider import DataProvider @@ -185,6 +186,48 @@ def add_max_drawdown(fig, row, trades: pd.DataFrame, df_comb: pd.DataFrame, return fig +def add_underwater(fig, row, trades: pd.DataFrame) -> make_subplots: + """ + Add underwater plot + """ + try: + underwater = calculate_underwater(trades, value_col="profit_abs") + + underwater = go.Scatter( + x=underwater['date'], + y=underwater['drawdown'], + name="Underwater Plot", + fill='tozeroy', + fillcolor='#cc362b', + line={'color': '#cc362b'}, + ) + fig.add_trace(underwater, row, 1) + except ValueError: + logger.warning("No trades found - not plotting underwater plot") + return fig + + +def add_parallelism(fig, row, trades: pd.DataFrame, timeframe: str) -> make_subplots: + """ + Add Chart showing trade parallelism + """ + try: + result = analyze_trade_parallelism(trades, timeframe) + + drawdown = go.Scatter( + x=result.index, + y=result['open_trades'], + name="Parallel trades", + fill='tozeroy', + fillcolor='#242222', + line={'color': '#242222'}, + ) + fig.add_trace(drawdown, row, 1) + except ValueError: + logger.warning("No trades found - not plotting Parallelism.") + return fig + + def plot_trades(fig, trades: pd.DataFrame) -> make_subplots: """ Add trades to "fig" @@ -460,7 +503,12 @@ def generate_candlestick_graph(pair: str, data: pd.DataFrame, trades: pd.DataFra def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame], trades: pd.DataFrame, timeframe: str, stake_currency: str) -> go.Figure: # Combine close-values for all pairs, rename columns to "pair" - df_comb = combine_dataframes_with_mean(data, "close") + try: + df_comb = combine_dataframes_with_mean(data, "close") + except ValueError: + raise OperationalException( + "No data found. Please make sure that data is available for " + "the timerange and pairs selected.") # Trim trades to available OHLCV data trades = extract_trades_of_period(df_comb, trades, date_index=True) @@ -477,20 +525,30 @@ def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame], name='Avg close price', ) - fig = make_subplots(rows=3, cols=1, shared_xaxes=True, - row_width=[1, 1, 1], + fig = make_subplots(rows=5, cols=1, shared_xaxes=True, + row_heights=[1, 1, 1, 0.5, 1], vertical_spacing=0.05, - subplot_titles=["AVG Close Price", "Combined Profit", "Profit per pair"]) + subplot_titles=[ + "AVG Close Price", + "Combined Profit", + "Profit per pair", + "Parallelism", + "Underwater", + ]) fig['layout'].update(title="Freqtrade Profit plot") fig['layout']['yaxis1'].update(title='Price') fig['layout']['yaxis2'].update(title=f'Profit {stake_currency}') fig['layout']['yaxis3'].update(title=f'Profit {stake_currency}') + fig['layout']['yaxis4'].update(title='Trade count') + fig['layout']['yaxis5'].update(title='Underwater Plot') fig['layout']['xaxis']['rangeslider'].update(visible=False) fig.update_layout(modebar_add=["v1hovermode", "toggleSpikeLines"]) fig.add_trace(avgclose, 1, 1) fig = add_profit(fig, 2, df_comb, 'cum_profit', 'Profit') fig = add_max_drawdown(fig, 2, trades, df_comb, timeframe) + fig = add_parallelism(fig, 4, trades, timeframe) + fig = add_underwater(fig, 5, trades) for pair in pairs: profit_col = f'cum_profit_{pair}' diff --git a/freqtrade/plugins/pairlist/VolatilityFilter.py b/freqtrade/plugins/pairlist/VolatilityFilter.py index 9383e5d06..20b899c5f 100644 --- a/freqtrade/plugins/pairlist/VolatilityFilter.py +++ b/freqtrade/plugins/pairlist/VolatilityFilter.py @@ -8,7 +8,7 @@ from typing import Any, Dict, List, Optional import arrow import numpy as np -from cachetools.ttl import TTLCache +from cachetools import TTLCache from pandas import DataFrame from freqtrade.exceptions import OperationalException diff --git a/freqtrade/plugins/pairlist/VolumePairList.py b/freqtrade/plugins/pairlist/VolumePairList.py index 0ffc8a8c8..83116ebac 100644 --- a/freqtrade/plugins/pairlist/VolumePairList.py +++ b/freqtrade/plugins/pairlist/VolumePairList.py @@ -8,7 +8,7 @@ from functools import partial from typing import Any, Dict, List import arrow -from cachetools.ttl import TTLCache +from cachetools import TTLCache from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_minutes diff --git a/freqtrade/plugins/pairlist/rangestabilityfilter.py b/freqtrade/plugins/pairlist/rangestabilityfilter.py index 3e5a002ff..314056fbb 100644 --- a/freqtrade/plugins/pairlist/rangestabilityfilter.py +++ b/freqtrade/plugins/pairlist/rangestabilityfilter.py @@ -6,7 +6,7 @@ from copy import deepcopy from typing import Any, Dict, List, Optional import arrow -from cachetools.ttl import TTLCache +from cachetools import TTLCache from pandas import DataFrame from freqtrade.exceptions import OperationalException diff --git a/freqtrade/rpc/api_server/uvicorn_threaded.py b/freqtrade/rpc/api_server/uvicorn_threaded.py index 79af659c7..a79c1a5fc 100644 --- a/freqtrade/rpc/api_server/uvicorn_threaded.py +++ b/freqtrade/rpc/api_server/uvicorn_threaded.py @@ -47,7 +47,7 @@ class UvicornServer(uvicorn.Server): else: asyncio.set_event_loop(uvloop.new_event_loop()) try: - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() except RuntimeError: # When running in a thread, we'll not have an eventloop yet. loop = asyncio.new_event_loop() diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py index f4e82261e..ef9689d0a 100644 --- a/freqtrade/rpc/fiat_convert.py +++ b/freqtrade/rpc/fiat_convert.py @@ -7,7 +7,7 @@ import datetime import logging from typing import Dict, List -from cachetools.ttl import TTLCache +from cachetools import TTLCache from pycoingecko import CoinGeckoAPI from requests.exceptions import RequestException diff --git a/freqtrade/rpc/telegram.py b/freqtrade/rpc/telegram.py index f333dc5db..61a0b1f65 100644 --- a/freqtrade/rpc/telegram.py +++ b/freqtrade/rpc/telegram.py @@ -199,8 +199,8 @@ class Telegram(RPCHandler): self._updater.start_polling( bootstrap_retries=-1, - timeout=30, - read_latency=60, + timeout=20, + read_latency=60, # Assumed transmission latency drop_pending_updates=True, ) logger.info( @@ -213,6 +213,7 @@ class Telegram(RPCHandler): Stops all running telegram threads. :return: None """ + # This can take up to `timeout` from the call to `start_polling`. self._updater.stop() def _format_buy_msg(self, msg: Dict[str, Any]) -> str: diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 3a3b53df4..335ffb3e5 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -727,23 +727,21 @@ class IStrategy(ABC, HyperStrategyMixin): custom_reason = custom_reason[:CUSTOM_SELL_MAX_LENGTH] else: custom_reason = None - # TODO: return here if sell-signal should be favored over ROI + if sell_signal in (SellType.CUSTOM_SELL, SellType.SELL_SIGNAL): + logger.debug(f"{trade.pair} - Sell signal received. " + f"sell_type=SellType.{sell_signal.name}" + + (f", custom_reason={custom_reason}" if custom_reason else "")) + return SellCheckTuple(sell_type=sell_signal, sell_reason=custom_reason) # Start evaluations # Sequence: - # ROI (if not stoploss) # Sell-signal + # ROI (if not stoploss) # Stoploss if roi_reached and stoplossflag.sell_type != SellType.STOP_LOSS: logger.debug(f"{trade.pair} - Required profit reached. sell_type=SellType.ROI") return SellCheckTuple(sell_type=SellType.ROI) - if sell_signal != SellType.NONE: - logger.debug(f"{trade.pair} - Sell signal received. " - f"sell_type=SellType.{sell_signal.name}" + - (f", custom_reason={custom_reason}" if custom_reason else "")) - return SellCheckTuple(sell_type=sell_signal, sell_reason=custom_reason) - if stoplossflag.sell_flag: logger.debug(f"{trade.pair} - Stoploss hit. sell_type={stoplossflag.sell_type}") diff --git a/pyproject.toml b/pyproject.toml index f0637d8c6..ad32bad4e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ exclude = ''' line_length = 100 multi_line_output=0 lines_after_imports=2 +skip_glob = ["**/.env*", "**/env/*", "**/.venv/*", "**/docs/*"] [build-system] requires = ["setuptools >= 46.4.0", "wheel"] diff --git a/requirements.txt b/requirements.txt index e37cb5e76..8a2a7c258 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ -numpy==1.21.5 +numpy==1.21.5; python_version <= '3.7' +numpy==1.22.0; python_version > '3.7' pandas==1.3.5 pandas-ta==0.3.14b @@ -18,7 +19,7 @@ technical==1.3.0 tabulate==0.8.9 pycoingecko==2.2.0 jinja2==3.0.3 -tables==3.6.1 +tables==3.7.0 blosc==1.10.6 # find first, C search in arrays diff --git a/setup.cfg b/setup.cfg index b311c94da..c5c7f2f25 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,6 +17,7 @@ classifiers = Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Operating System :: MacOS Operating System :: Unix Topic :: Office/Business :: Financial :: Investment diff --git a/setup.sh b/setup.sh index d14f8214d..c642a654d 100755 --- a/setup.sh +++ b/setup.sh @@ -25,7 +25,7 @@ function check_installed_python() { exit 2 fi - for v in 9 8 7 + for v in 9 10 8 7 do PYTHON="python3.${v}" which $PYTHON @@ -37,7 +37,6 @@ function check_installed_python() { done echo "No usable python found. Please make sure to have python3.7 or newer installed." - echo "python3.10 is currently not supported." exit 1 } @@ -220,7 +219,7 @@ function install() { install_redhat else echo "This script does not support your OS." - echo "If you have Python version 3.7 - 3.9, pip, virtualenv, ta-lib you can continue." + echo "If you have Python version 3.7 - 3.10, pip, virtualenv, ta-lib you can continue." echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell." sleep 10 fi diff --git a/tests/conftest.py b/tests/conftest.py index 3ce064ee3..9cd1332d4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,7 +4,6 @@ import logging import re from copy import deepcopy from datetime import datetime, timedelta -from functools import reduce from pathlib import Path from unittest.mock import MagicMock, Mock, PropertyMock @@ -50,17 +49,23 @@ def pytest_configure(config): def log_has(line, logs): - # caplog mocker returns log as a tuple: ('freqtrade.something', logging.WARNING, 'foobar') - # and we want to match line against foobar in the tuple - return reduce(lambda a, b: a or b, - filter(lambda x: x[2] == line, logs.record_tuples), - False) + """Check if line is found on some caplog's message.""" + return any(line == message for message in logs.messages) def log_has_re(line, logs): - return reduce(lambda a, b: a or b, - filter(lambda x: re.match(line, x[2]), logs.record_tuples), - False) + """Check if line matches some caplog's message.""" + return any(re.match(line, message) for message in logs.messages) + + +def num_log_has(line, logs): + """Check how many times line is found in caplog's messages.""" + return sum(line == message for message in logs.messages) + + +def num_log_has_re(line, logs): + """Check how many times line matches caplog's messages.""" + return sum(bool(re.match(line, message)) for message in logs.messages) def get_args(args): diff --git a/tests/data/test_btanalysis.py b/tests/data/test_btanalysis.py index 1dcd04a80..47f1b8849 100644 --- a/tests/data/test_btanalysis.py +++ b/tests/data/test_btanalysis.py @@ -11,10 +11,10 @@ from freqtrade.constants import LAST_BT_RESULT_FN from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, BT_DATA_COLUMNS_MID, BT_DATA_COLUMNS_OLD, analyze_trade_parallelism, calculate_csum, calculate_market_change, calculate_max_drawdown, - combine_dataframes_with_mean, create_cum_profit, - extract_trades_of_period, get_latest_backtest_filename, - get_latest_hyperopt_file, load_backtest_data, load_trades, - load_trades_from_db) + calculate_underwater, combine_dataframes_with_mean, + create_cum_profit, extract_trades_of_period, + get_latest_backtest_filename, get_latest_hyperopt_file, + load_backtest_data, load_trades, load_trades_from_db) from freqtrade.data.history import load_data, load_pair_history from tests.conftest import create_mock_trades from tests.conftest_trades import MOCK_TRADE_COUNT @@ -234,6 +234,13 @@ def test_combine_dataframes_with_mean(testdatadir): assert "mean" in df.columns +def test_combine_dataframes_with_mean_no_data(testdatadir): + pairs = ["ETH/BTC", "ADA/BTC"] + data = load_data(datadir=testdatadir, pairs=pairs, timeframe='6m') + with pytest.raises(ValueError, match=r"No objects to concatenate"): + combine_dataframes_with_mean(data) + + def test_create_cum_profit(testdatadir): filename = testdatadir / "backtest-result_test.json" bt_data = load_backtest_data(filename) @@ -284,9 +291,16 @@ def test_calculate_max_drawdown(testdatadir): assert isinstance(lval, float) assert hdate == Timestamp('2018-01-24 14:25:00', tz='UTC') assert lowdate == Timestamp('2018-01-30 04:45:00', tz='UTC') + + underwater = calculate_underwater(bt_data) + assert isinstance(underwater, DataFrame) + with pytest.raises(ValueError, match='Trade dataframe empty.'): drawdown, hdate, lowdate, hval, lval = calculate_max_drawdown(DataFrame()) + with pytest.raises(ValueError, match='Trade dataframe empty.'): + calculate_underwater(DataFrame()) + def test_calculate_csum(testdatadir): filename = testdatadir / "backtest-result_test.json" diff --git a/tests/data/test_history.py b/tests/data/test_history.py index 575a590e7..627e29444 100644 --- a/tests/data/test_history.py +++ b/tests/data/test_history.py @@ -311,7 +311,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None: assert td != len(data['UNITTEST/BTC']) start_real = data['UNITTEST/BTC'].iloc[0, 0] assert log_has(f'Missing data at start for pair ' - f'UNITTEST/BTC, data starts at {start_real.strftime("%Y-%m-%d %H:%M:%S")}', + f'UNITTEST/BTC at 5m, data starts at {start_real.strftime("%Y-%m-%d %H:%M:%S")}', caplog) # Make sure we start fresh - test missing data at end caplog.clear() @@ -326,7 +326,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None: # Shift endtime with +5 - as last candle is dropped (partial candle) end_real = arrow.get(data['UNITTEST/BTC'].iloc[-1, 0]).shift(minutes=5) assert log_has(f'Missing data at end for pair ' - f'UNITTEST/BTC, data ends at {end_real.strftime("%Y-%m-%d %H:%M:%S")}', + f'UNITTEST/BTC at 5m, data ends at {end_real.strftime("%Y-%m-%d %H:%M:%S")}', caplog) diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index a4b151742..071f4e2b8 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -20,7 +20,7 @@ from freqtrade.exchange.exchange import (market_is_active, timeframe_to_minutes, timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_seconds) from freqtrade.resolvers.exchange_resolver import ExchangeResolver -from tests.conftest import get_mock_coro, get_patched_exchange, log_has, log_has_re +from tests.conftest import get_mock_coro, get_patched_exchange, log_has, log_has_re, num_log_has_re # Make sure to always keep one exchange here which is NOT subclassed!! @@ -1740,6 +1740,44 @@ async def test__async_get_candle_history(default_conf, mocker, caplog, exchange_ (arrow.utcnow().int_timestamp - 2000) * 1000) +@pytest.mark.asyncio +async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog): + caplog.set_level(logging.INFO) + api_mock = MagicMock() + api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( + "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" + "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" + "429 Too Many Requests" '{"code":"429000","msg":"Too Many Requests"}')) + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kucoin") + + msg = "Kucoin 429 error, avoid triggering DDosProtection backoff delay" + assert not num_log_has_re(msg, caplog) + + for _ in range(3): + with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): + await exchange._async_get_candle_history( + "ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3) + assert num_log_has_re(msg, caplog) == 3 + + caplog.clear() + # Test regular non-kucoin message + api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( + "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" + "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" + "429 Too Many Requests" '{"code":"2222222","msg":"Too Many Requests"}')) + + msg = r'_async_get_candle_history\(\) returned exception: .*' + msg2 = r'Applying DDosProtection backoff delay: .*' + with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)): + for _ in range(3): + with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): + await exchange._async_get_candle_history( + "ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3) + # Expect the "returned exception" message 12 times (4 retries * 3 (loop)) + assert num_log_has_re(msg, caplog) == 12 + assert num_log_has_re(msg2, caplog) == 9 + + @pytest.mark.asyncio async def test__async_get_candle_history_empty(default_conf, mocker, caplog): """ Test empty exchange result """ diff --git a/tests/optimize/test_backtest_detail.py b/tests/optimize/test_backtest_detail.py index 775f15b87..f41b6101c 100644 --- a/tests/optimize/test_backtest_detail.py +++ b/tests/optimize/test_backtest_detail.py @@ -426,8 +426,6 @@ tc26 = BTContainer(data=[ # Test 27: Sell with signal sell in candle 3 (ROI at signal candle) # Stoploss at 10% (irrelevant), ROI at 5% (will trigger) - Wins over Sell-signal -# TODO: figure out if sell-signal should win over ROI -# Sell-signal wins over stoploss tc27 = BTContainer(data=[ # D O H L C V B S [0, 5000, 5025, 4975, 4987, 6172, 1, 0], @@ -436,8 +434,8 @@ tc27 = BTContainer(data=[ [3, 5010, 5012, 4986, 5010, 6172, 0, 1], # sell-signal [4, 5010, 5251, 4855, 4995, 6172, 0, 0], # Triggers ROI, sell-signal acted on [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.05, use_sell_signal=True, - trades=[BTrade(sell_reason=SellType.ROI, open_tick=1, close_tick=4)] + stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.002, use_sell_signal=True, + trades=[BTrade(sell_reason=SellType.SELL_SIGNAL, open_tick=1, close_tick=4)] ) # Test 28: trailing_stop should raise so candle 3 causes a stoploss diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index f5e182c1d..6290c3c55 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -1,6 +1,7 @@ # pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument import random +from copy import deepcopy from datetime import datetime, timedelta, timezone from pathlib import Path from unittest.mock import MagicMock, PropertyMock @@ -648,7 +649,7 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) result = backtesting.backtest( - processed=processed, + processed=deepcopy(processed), start_date=min_date, end_date=max_date, max_open_trades=10, @@ -887,7 +888,7 @@ def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir) processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) backtest_conf = { - 'processed': processed, + 'processed': deepcopy(processed), 'start_date': min_date, 'end_date': max_date, 'max_open_trades': 3, @@ -909,7 +910,7 @@ def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir) 'NXT/BTC', '5m')[0]) == len(data['NXT/BTC']) - 1 - backtesting.strategy.startup_candle_count backtest_conf = { - 'processed': processed, + 'processed': deepcopy(processed), 'start_date': min_date, 'end_date': max_date, 'max_open_trades': 1, diff --git a/tests/plugins/test_pairlist.py b/tests/plugins/test_pairlist.py index dec6ca726..f7ff495ac 100644 --- a/tests/plugins/test_pairlist.py +++ b/tests/plugins/test_pairlist.py @@ -15,7 +15,7 @@ from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.resolvers import PairListResolver from tests.conftest import (create_mock_trades, get_patched_exchange, get_patched_freqtradebot, - log_has, log_has_re) + log_has, log_has_re, num_log_has) @pytest.fixture(scope="function") @@ -237,19 +237,13 @@ def test_remove_logs_for_pairs_already_in_blacklist(mocker, markets, static_pl_c # Ensure that log message wasn't generated. assert not log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...', caplog) - new_whitelist = freqtrade.pairlists.verify_blacklist(whitelist + ['BLK/BTC'], logger.warning) - # Ensure that the pair is removed from the white list, and properly logged. - assert set(whitelist) == set(new_whitelist) - matches = sum(1 for message in caplog.messages - if message == 'Pair BLK/BTC in your blacklist. Removing it from whitelist...') - assert matches == 1 - - new_whitelist = freqtrade.pairlists.verify_blacklist(whitelist + ['BLK/BTC'], logger.warning) - # Ensure that the pair is not logged anymore when being removed from the pair list. - assert set(whitelist) == set(new_whitelist) - matches = sum(1 for message in caplog.messages - if message == 'Pair BLK/BTC in your blacklist. Removing it from whitelist...') - assert matches == 1 + for _ in range(3): + new_whitelist = freqtrade.pairlists.verify_blacklist( + whitelist + ['BLK/BTC'], logger.warning) + # Ensure that the pair is removed from the white list, and properly logged. + assert set(whitelist) == set(new_whitelist) + assert num_log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...', + caplog) == 1 def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_conf): diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 3e6917dd6..e86022a91 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -424,7 +424,7 @@ def test_rpc_trade_statistics(default_conf, ticker, ticker_sell_up, fee, assert stats['trade_count'] == 2 assert stats['first_trade_date'] == 'just now' assert stats['latest_trade_date'] == 'just now' - assert stats['avg_duration'] in ('0:00:00', '0:00:01') + assert stats['avg_duration'] in ('0:00:00', '0:00:01', '0:00:02') assert stats['best_pair'] == 'ETH/BTC' assert prec_satoshi(stats['best_rate'], 6.2) @@ -435,7 +435,7 @@ def test_rpc_trade_statistics(default_conf, ticker, ticker_sell_up, fee, assert stats['trade_count'] == 2 assert stats['first_trade_date'] == 'just now' assert stats['latest_trade_date'] == 'just now' - assert stats['avg_duration'] in ('0:00:00', '0:00:01') + assert stats['avg_duration'] in ('0:00:00', '0:00:01', '0:00:02') assert stats['best_pair'] == 'ETH/BTC' assert prec_satoshi(stats['best_rate'], 6.2) assert isnan(stats['profit_all_coin']) diff --git a/tests/rpc/test_rpc_telegram.py b/tests/rpc/test_rpc_telegram.py index 5266f498f..7f7629e1d 100644 --- a/tests/rpc/test_rpc_telegram.py +++ b/tests/rpc/test_rpc_telegram.py @@ -584,7 +584,7 @@ def test_monthly_handle(default_conf, update, ticker, limit_buy_order, fee, assert 'Monthly Profit over the last 2 months:' in msg_mock.call_args_list[0][0][0] assert 'Month ' in msg_mock.call_args_list[0][0][0] today = datetime.utcnow().date() - current_month = f"{today.year}-{today.month} " + current_month = f"{today.year}-{today.month:02} " assert current_month in msg_mock.call_args_list[0][0][0] assert str(' 0.00006217 BTC') in msg_mock.call_args_list[0][0][0] assert str(' 0.933 USD') in msg_mock.call_args_list[0][0][0] diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 7bcd9b64e..d226280fe 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -1905,7 +1905,7 @@ def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_o # we might just want to check if we are in a sell condition without # executing # if ROI is reached we must sell - patch_get_signal(freqtrade, value=(False, True, None, None)) + patch_get_signal(freqtrade, value=(False, False, None, None)) assert freqtrade.handle_trade(trade) assert log_has("ETH/USDT - Required profit reached. sell_type=SellType.ROI", caplog) @@ -3242,7 +3242,7 @@ def test_ignore_roi_if_buy_signal(default_conf_usdt, limit_buy_order_usdt, assert freqtrade.handle_trade(trade) is False # Test if buy-signal is absent (should sell due to roi = true) - patch_get_signal(freqtrade, value=(False, True, None, None)) + patch_get_signal(freqtrade, value=(False, False, None, None)) assert freqtrade.handle_trade(trade) is True assert trade.sell_reason == SellType.ROI.value @@ -3428,11 +3428,11 @@ def test_disable_ignore_roi_if_buy_signal(default_conf_usdt, limit_buy_order_usd trade = Trade.query.first() trade.update(limit_buy_order_usdt) # Sell due to min_roi_reached - patch_get_signal(freqtrade, value=(True, True, None, None)) + patch_get_signal(freqtrade, value=(True, False, None, None)) assert freqtrade.handle_trade(trade) is True # Test if buy-signal is absent - patch_get_signal(freqtrade, value=(False, True, None, None)) + patch_get_signal(freqtrade, value=(False, False, None, None)) assert freqtrade.handle_trade(trade) is True assert trade.sell_reason == SellType.ROI.value diff --git a/tests/test_plotting.py b/tests/test_plotting.py index 8a40f4a20..40a76d04e 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -336,15 +336,20 @@ def test_generate_profit_graph(testdatadir): assert fig.layout.yaxis3.title.text == "Profit BTC" figure = fig.layout.figure - assert len(figure.data) == 5 + assert len(figure.data) == 7 avgclose = find_trace_in_fig_data(figure.data, "Avg close price") assert isinstance(avgclose, go.Scatter) profit = find_trace_in_fig_data(figure.data, "Profit") assert isinstance(profit, go.Scatter) - profit = find_trace_in_fig_data(figure.data, "Max drawdown 10.45%") - assert isinstance(profit, go.Scatter) + drawdown = find_trace_in_fig_data(figure.data, "Max drawdown 10.45%") + assert isinstance(drawdown, go.Scatter) + parallel = find_trace_in_fig_data(figure.data, "Parallel trades") + assert isinstance(parallel, go.Scatter) + + underwater = find_trace_in_fig_data(figure.data, "Underwater Plot") + assert isinstance(underwater, go.Scatter) for pair in pairs: profit_pair = find_trace_in_fig_data(figure.data, f"Profit {pair}")