2022-05-04 15:53:40 +00:00
|
|
|
import logging
|
2022-05-19 19:15:58 +00:00
|
|
|
import threading
|
2022-06-27 09:35:33 +00:00
|
|
|
import time
|
2022-05-04 15:53:40 +00:00
|
|
|
from abc import ABC, abstractmethod
|
2022-09-18 15:00:55 +00:00
|
|
|
from collections import deque
|
2022-09-06 17:46:58 +00:00
|
|
|
from datetime import datetime, timezone
|
2022-05-04 15:42:34 +00:00
|
|
|
from pathlib import Path
|
2022-11-12 09:54:34 +00:00
|
|
|
from typing import Any, Dict, List, Literal, Optional, Tuple
|
2022-08-15 04:53:02 +00:00
|
|
|
|
2022-06-17 12:55:40 +00:00
|
|
|
import numpy as np
|
2022-05-03 08:14:17 +00:00
|
|
|
import pandas as pd
|
2022-09-28 22:10:18 +00:00
|
|
|
import psutil
|
2022-07-29 06:12:50 +00:00
|
|
|
from numpy.typing import NDArray
|
2022-05-03 08:14:17 +00:00
|
|
|
from pandas import DataFrame
|
2022-08-15 04:53:02 +00:00
|
|
|
|
2022-05-22 22:06:26 +00:00
|
|
|
from freqtrade.configuration import TimeRange
|
2022-11-10 17:26:14 +00:00
|
|
|
from freqtrade.constants import Config
|
2022-11-12 10:33:03 +00:00
|
|
|
from freqtrade.data.dataprovider import DataProvider
|
2022-05-06 14:20:52 +00:00
|
|
|
from freqtrade.enums import RunMode
|
2022-05-25 10:37:25 +00:00
|
|
|
from freqtrade.exceptions import OperationalException
|
2022-08-15 04:53:02 +00:00
|
|
|
from freqtrade.exchange import timeframe_to_seconds
|
2022-05-23 19:05:05 +00:00
|
|
|
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
2022-05-06 10:54:49 +00:00
|
|
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
2022-10-23 18:51:32 +00:00
|
|
|
from freqtrade.freqai.utils import plot_feature_importance, record_params
|
2022-05-09 13:25:00 +00:00
|
|
|
from freqtrade.strategy.interface import IStrategy
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
pd.options.mode.chained_assignment = None
|
2022-05-04 15:53:40 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-19 19:15:58 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
class IFreqaiModel(ABC):
|
|
|
|
"""
|
|
|
|
Class containing all tools for training and prediction in the strategy.
|
2022-07-22 10:17:15 +00:00
|
|
|
Base*PredictionModels inherit from this class.
|
2022-07-23 11:04:06 +00:00
|
|
|
|
|
|
|
Record of contribution:
|
|
|
|
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
|
|
|
|
project.
|
|
|
|
|
|
|
|
Conception and software development:
|
|
|
|
Robert Caulk @robcaulk
|
|
|
|
|
|
|
|
Theoretical brainstorming:
|
2022-08-02 18:14:02 +00:00
|
|
|
Elin Törnquist @th0rntwig
|
2022-07-23 11:04:06 +00:00
|
|
|
|
|
|
|
Code review, software architecture brainstorming:
|
|
|
|
@xmatthias
|
|
|
|
|
|
|
|
Beta testing and bug reporting:
|
|
|
|
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
2022-08-14 18:24:29 +00:00
|
|
|
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
|
2022-09-18 11:20:36 +00:00
|
|
|
def __init__(self, config: Config) -> None:
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
self.config = config
|
2022-05-23 10:07:09 +00:00
|
|
|
self.assert_config(self.config)
|
2022-07-28 05:24:30 +00:00
|
|
|
self.freqai_info: Dict[str, Any] = config["freqai"]
|
|
|
|
self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
|
|
|
"data_split_parameters", {})
|
|
|
|
self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get(
|
|
|
|
"model_training_parameters", {})
|
2022-10-22 01:48:26 +00:00
|
|
|
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
|
2022-05-19 19:15:58 +00:00
|
|
|
self.retrain = False
|
2022-05-22 15:51:49 +00:00
|
|
|
self.first = True
|
2022-05-23 19:05:05 +00:00
|
|
|
self.set_full_path()
|
2022-09-24 11:21:01 +00:00
|
|
|
self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True)
|
2022-09-03 12:00:01 +00:00
|
|
|
if self.save_backtest_models:
|
|
|
|
logger.info('Backtesting module configured to save all models.')
|
2022-11-12 17:37:23 +00:00
|
|
|
|
2023-02-02 10:40:23 +00:00
|
|
|
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config)
|
2022-11-03 17:49:39 +00:00
|
|
|
# set current candle to arbitrary historical date
|
|
|
|
self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc)
|
|
|
|
self.dd.current_candle = self.current_candle
|
2022-06-08 04:14:01 +00:00
|
|
|
self.scanning = False
|
2022-08-18 17:15:29 +00:00
|
|
|
self.ft_params = self.freqai_info["feature_parameters"]
|
2022-10-30 12:28:01 +00:00
|
|
|
self.corr_pairlist: List[str] = self.ft_params.get("include_corr_pairlist", [])
|
2022-07-28 05:24:30 +00:00
|
|
|
self.keras: bool = self.freqai_info.get("keras", False)
|
2022-08-18 17:15:29 +00:00
|
|
|
if self.keras and self.ft_params.get("DI_threshold", 0):
|
|
|
|
self.ft_params["DI_threshold"] = 0
|
2022-07-12 16:09:17 +00:00
|
|
|
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
2022-11-03 20:35:12 +00:00
|
|
|
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
2022-08-18 17:15:29 +00:00
|
|
|
if self.ft_params.get("inlier_metric_window", 0):
|
|
|
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
2023-03-28 11:42:52 +00:00
|
|
|
self.class_names: List[str] = [] # used in classification subclasses
|
2022-07-21 11:22:12 +00:00
|
|
|
self.pair_it = 0
|
2022-08-22 11:30:30 +00:00
|
|
|
self.pair_it_train = 0
|
2022-07-21 11:22:12 +00:00
|
|
|
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
2022-09-18 15:00:55 +00:00
|
|
|
self.train_queue = self._set_train_queue()
|
2022-08-14 14:41:50 +00:00
|
|
|
self.inference_time: float = 0
|
2022-08-22 11:30:30 +00:00
|
|
|
self.train_time: float = 0
|
2022-08-14 14:41:50 +00:00
|
|
|
self.begin_time: float = 0
|
2022-08-22 11:30:30 +00:00
|
|
|
self.begin_time_train: float = 0
|
2022-08-14 14:41:50 +00:00
|
|
|
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
2022-09-06 18:30:37 +00:00
|
|
|
self.continual_learning = self.freqai_info.get('continual_learning', False)
|
2022-09-25 09:18:10 +00:00
|
|
|
self.plot_features = self.ft_params.get("plot_feature_importances", 0)
|
2022-10-20 14:30:32 +00:00
|
|
|
self.corr_dataframes: Dict[str, DataFrame] = {}
|
2022-10-31 17:18:00 +00:00
|
|
|
# get_corr_dataframes is controlling the caching of corr_dataframes
|
|
|
|
# for improved performance. Careful with this boolean.
|
2022-10-20 14:30:32 +00:00
|
|
|
self.get_corr_dataframes: bool = True
|
2022-09-03 19:24:14 +00:00
|
|
|
self._threads: List[threading.Thread] = []
|
|
|
|
self._stop_event = threading.Event()
|
2022-11-29 23:53:35 +00:00
|
|
|
self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk()
|
2022-11-12 10:33:03 +00:00
|
|
|
self.data_provider: Optional[DataProvider] = None
|
2022-09-29 12:01:22 +00:00
|
|
|
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
2022-12-16 20:19:08 +00:00
|
|
|
self.can_short = True # overridden in start() with strategy.can_short
|
2022-09-03 19:24:14 +00:00
|
|
|
|
2022-10-23 18:51:32 +00:00
|
|
|
record_params(config, self.full_path)
|
2022-10-23 18:09:07 +00:00
|
|
|
|
2022-09-05 20:43:28 +00:00
|
|
|
def __getstate__(self):
|
|
|
|
"""
|
2022-09-06 18:42:47 +00:00
|
|
|
Return an empty state to be pickled in hyperopt
|
2022-09-05 20:43:28 +00:00
|
|
|
"""
|
2022-09-06 18:42:47 +00:00
|
|
|
return ({})
|
2022-09-05 20:43:28 +00:00
|
|
|
|
2022-09-18 11:20:36 +00:00
|
|
|
def assert_config(self, config: Config) -> None:
|
2022-05-25 10:37:25 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
if not config.get("freqai", {}):
|
|
|
|
raise OperationalException("No freqai parameters found in configuration file.")
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-05-09 13:25:00 +00:00
|
|
|
def start(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> DataFrame:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2022-05-23 19:05:05 +00:00
|
|
|
Entry point to the FreqaiModel from a specific pair, it will train a new model if
|
2022-05-15 14:25:08 +00:00
|
|
|
necessary before making the prediction.
|
2022-05-25 09:31:03 +00:00
|
|
|
|
2022-07-24 14:54:39 +00:00
|
|
|
:param dataframe: Full dataframe coming from strategy - it contains entire
|
|
|
|
backtesting timerange + additional historical data necessary to train
|
2022-05-03 08:14:17 +00:00
|
|
|
the model.
|
2022-07-24 14:54:39 +00:00
|
|
|
:param metadata: pair metadata coming from strategy.
|
|
|
|
:param strategy: Strategy to train on
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
2022-07-02 16:09:38 +00:00
|
|
|
self.dd.set_pair_dict_info(metadata)
|
2022-11-12 10:33:03 +00:00
|
|
|
self.data_provider = strategy.dp
|
2022-12-16 19:16:19 +00:00
|
|
|
self.can_short = strategy.can_short
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
if self.live:
|
2022-08-14 14:41:50 +00:00
|
|
|
self.inference_timer('start')
|
2022-07-26 08:24:14 +00:00
|
|
|
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
2022-07-02 16:09:38 +00:00
|
|
|
dk = self.start_live(dataframe, metadata, strategy, self.dk)
|
2022-11-19 17:15:58 +00:00
|
|
|
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-25 09:31:03 +00:00
|
|
|
# For backtesting, each pair enters and then gets trained for each window along the
|
2022-07-10 10:34:09 +00:00
|
|
|
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
|
2022-05-25 09:31:03 +00:00
|
|
|
# (backtest window, i.e. window immediately following the training window).
|
|
|
|
# FreqAI slides the window and sequentially builds the backtesting results before returning
|
|
|
|
# the concatenated results for the full backtesting period back to the strategy.
|
2023-02-02 10:40:23 +00:00
|
|
|
else:
|
2022-07-26 08:24:14 +00:00
|
|
|
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
2022-11-22 16:09:09 +00:00
|
|
|
if not self.config.get("freqai_backtest_live_models", False):
|
2022-11-29 23:53:35 +00:00
|
|
|
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
2022-12-28 22:03:41 +00:00
|
|
|
dk = self.start_backtesting(dataframe, metadata, self.dk, strategy)
|
2022-11-12 17:37:23 +00:00
|
|
|
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
|
|
|
else:
|
2022-11-29 23:53:35 +00:00
|
|
|
logger.info(
|
|
|
|
"Backtesting using historic predictions (live models)")
|
2022-11-22 16:09:09 +00:00
|
|
|
dk = self.start_backtesting_from_historic_predictions(
|
2022-11-12 17:37:23 +00:00
|
|
|
dataframe, metadata, self.dk)
|
|
|
|
dataframe = dk.return_dataframe
|
2022-05-25 09:31:03 +00:00
|
|
|
|
2022-08-22 11:30:30 +00:00
|
|
|
self.clean_up()
|
2022-08-14 14:41:50 +00:00
|
|
|
if self.live:
|
2022-10-09 18:22:42 +00:00
|
|
|
self.inference_timer('stop', metadata["pair"])
|
2022-11-12 17:37:23 +00:00
|
|
|
|
2022-07-09 08:13:33 +00:00
|
|
|
return dataframe
|
2022-06-08 04:14:01 +00:00
|
|
|
|
2022-08-22 11:30:30 +00:00
|
|
|
def clean_up(self):
|
|
|
|
"""
|
|
|
|
Objects that should be handled by GC already between coins, but
|
|
|
|
are explicitly shown here to help demonstrate the non-persistence of these
|
|
|
|
objects.
|
|
|
|
"""
|
|
|
|
self.model = None
|
|
|
|
self.dk = None
|
|
|
|
|
2022-09-28 03:06:05 +00:00
|
|
|
def _on_stop(self):
|
|
|
|
"""
|
|
|
|
Callback for Subclasses to override to include logic for shutting down resources
|
|
|
|
when SIGINT is sent.
|
|
|
|
"""
|
|
|
|
return
|
|
|
|
|
2022-09-03 19:24:14 +00:00
|
|
|
def shutdown(self):
|
|
|
|
"""
|
|
|
|
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
|
|
|
for current training iteration.
|
|
|
|
"""
|
|
|
|
logger.info("Stopping FreqAI")
|
|
|
|
self._stop_event.set()
|
|
|
|
|
2022-11-24 18:07:38 +00:00
|
|
|
self.data_provider = None
|
2022-09-28 03:06:05 +00:00
|
|
|
self._on_stop()
|
|
|
|
|
2022-09-03 19:24:14 +00:00
|
|
|
logger.info("Waiting on Training iteration")
|
|
|
|
for _thread in self._threads:
|
|
|
|
_thread.join()
|
|
|
|
|
|
|
|
def start_scanning(self, *args, **kwargs) -> None:
|
|
|
|
"""
|
|
|
|
Start `self._start_scanning` in a separate thread
|
|
|
|
"""
|
|
|
|
_thread = threading.Thread(target=self._start_scanning, args=args, kwargs=kwargs)
|
|
|
|
self._threads.append(_thread)
|
|
|
|
_thread.start()
|
|
|
|
|
|
|
|
def _start_scanning(self, strategy: IStrategy) -> None:
|
2022-06-17 14:16:23 +00:00
|
|
|
"""
|
|
|
|
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
|
|
|
|
to improve model youth. This function is agnostic to data preparation/collection/storage,
|
2022-07-02 16:09:38 +00:00
|
|
|
it simply trains on what ever data is available in the self.dd.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param strategy: IStrategy = The user defined strategy class
|
2022-06-17 14:16:23 +00:00
|
|
|
"""
|
2022-09-03 19:24:14 +00:00
|
|
|
while not self._stop_event.is_set():
|
2022-06-27 09:35:33 +00:00
|
|
|
time.sleep(1)
|
2022-09-18 15:00:55 +00:00
|
|
|
pair = self.train_queue[0]
|
|
|
|
|
|
|
|
# ensure pair is avaialble in dp
|
|
|
|
if pair not in strategy.dp.current_whitelist():
|
|
|
|
self.train_queue.popleft()
|
|
|
|
logger.warning(f'{pair} not in current whitelist, removing from train queue.')
|
|
|
|
continue
|
|
|
|
|
2023-02-22 21:01:41 +00:00
|
|
|
(_, trained_timestamp) = self.dd.get_pair_dict_info(pair)
|
2022-09-18 15:00:55 +00:00
|
|
|
|
|
|
|
dk = FreqaiDataKitchen(self.config, self.live, pair)
|
|
|
|
(
|
|
|
|
retrain,
|
|
|
|
new_trained_timerange,
|
|
|
|
data_load_timerange,
|
|
|
|
) = dk.check_if_new_training_required(trained_timestamp)
|
|
|
|
|
|
|
|
if retrain:
|
|
|
|
self.train_timer('start')
|
2022-10-11 17:05:46 +00:00
|
|
|
dk.set_paths(pair, new_trained_timerange.stopts)
|
2022-09-19 18:39:19 +00:00
|
|
|
try:
|
|
|
|
self.extract_data_and_train_model(
|
|
|
|
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
|
|
|
)
|
|
|
|
except Exception as msg:
|
2022-10-07 14:05:41 +00:00
|
|
|
logger.warning(f"Training {pair} raised exception {msg.__class__.__name__}. "
|
|
|
|
f"Message: {msg}, skipping.")
|
2022-09-19 18:39:19 +00:00
|
|
|
|
2022-10-09 18:22:42 +00:00
|
|
|
self.train_timer('stop', pair)
|
2022-09-18 15:00:55 +00:00
|
|
|
|
|
|
|
# only rotate the queue after the first has been trained.
|
|
|
|
self.train_queue.rotate(-1)
|
|
|
|
|
|
|
|
self.dd.save_historic_predictions_to_disk()
|
2022-10-09 18:22:42 +00:00
|
|
|
if self.freqai_info.get('write_metrics_to_disk', False):
|
|
|
|
self.dd.save_metric_tracker_to_disk()
|
2022-08-14 14:41:50 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def start_backtesting(
|
2022-12-28 22:03:41 +00:00
|
|
|
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen, strategy: IStrategy
|
2022-07-03 08:59:38 +00:00
|
|
|
) -> FreqaiDataKitchen:
|
2022-05-25 09:31:03 +00:00
|
|
|
"""
|
|
|
|
The main broad execution for backtesting. For backtesting, each pair enters and then gets
|
2022-07-10 10:34:09 +00:00
|
|
|
trained for each window along the sliding window defined by "train_period_days"
|
|
|
|
(training window) and "backtest_period_days" (backtest window, i.e. window immediately
|
|
|
|
following the training window). FreqAI slides the window and sequentially builds
|
|
|
|
the backtesting results before returning the concatenated results for the full
|
|
|
|
backtesting period back to the strategy.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param dataframe: DataFrame = strategy passed dataframe
|
|
|
|
:param metadata: Dict = pair metadata
|
|
|
|
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
2022-12-28 22:03:41 +00:00
|
|
|
:param strategy: Strategy to train on
|
2022-09-24 16:01:53 +00:00
|
|
|
:return:
|
|
|
|
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
|
|
|
"""
|
|
|
|
|
|
|
|
self.pair_it += 1
|
|
|
|
train_it = 0
|
2022-12-29 19:35:11 +00:00
|
|
|
pair = metadata["pair"]
|
2022-12-28 22:03:41 +00:00
|
|
|
populate_indicators = True
|
2022-12-30 14:16:35 +00:00
|
|
|
check_features = True
|
2022-09-24 16:01:53 +00:00
|
|
|
# Loop enforcing the sliding window training/backtesting paradigm
|
|
|
|
# tr_train is the training time range e.g. 1 historical month
|
|
|
|
# tr_backtest is the backtesting time range e.g. the week directly
|
|
|
|
# following tr_train. Both of these windows slide through the
|
|
|
|
# entire backtest
|
|
|
|
for tr_train, tr_backtest in zip(dk.training_timeranges, dk.backtesting_timeranges):
|
2023-02-22 21:01:41 +00:00
|
|
|
(_, _) = self.dd.get_pair_dict_info(pair)
|
2022-09-24 16:01:53 +00:00
|
|
|
train_it += 1
|
|
|
|
total_trains = len(dk.backtesting_timeranges)
|
|
|
|
self.training_timerange = tr_train
|
2022-12-01 15:53:19 +00:00
|
|
|
len_backtest_df = len(dataframe.loc[(dataframe["date"] >= tr_backtest.startdt) & (
|
|
|
|
dataframe["date"] < tr_backtest.stopdt), :])
|
2022-09-24 16:01:53 +00:00
|
|
|
|
2022-12-01 15:53:19 +00:00
|
|
|
if not self.ensure_data_exists(len_backtest_df, tr_backtest, pair):
|
2022-09-27 03:14:12 +00:00
|
|
|
continue
|
|
|
|
|
2022-11-04 15:10:46 +00:00
|
|
|
self.log_backtesting_progress(tr_train, pair, train_it, total_trains)
|
2022-09-27 03:14:12 +00:00
|
|
|
|
2022-11-04 15:10:46 +00:00
|
|
|
timestamp_model_id = int(tr_train.stopts)
|
2022-09-25 13:35:55 +00:00
|
|
|
if dk.backtest_live_models:
|
|
|
|
timestamp_model_id = int(tr_backtest.startts)
|
2022-09-24 16:01:53 +00:00
|
|
|
|
2022-10-13 14:22:58 +00:00
|
|
|
dk.set_paths(pair, timestamp_model_id)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-09-25 13:35:55 +00:00
|
|
|
dk.set_new_model_names(pair, timestamp_model_id)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-12-01 15:53:19 +00:00
|
|
|
if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
|
2022-12-30 14:16:35 +00:00
|
|
|
if check_features:
|
|
|
|
self.dd.load_metadata(dk)
|
|
|
|
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
|
|
|
strategy, prediction_dataframe=dataframe.tail(1), pair=metadata["pair"]
|
|
|
|
)
|
|
|
|
dk.find_features(dataframe_dummy_features)
|
|
|
|
self.check_if_feature_list_matches_strategy(dk)
|
|
|
|
check_features = False
|
2022-09-01 10:09:23 +00:00
|
|
|
append_df = dk.get_backtesting_prediction()
|
2022-08-31 14:23:48 +00:00
|
|
|
dk.append_predictions(append_df)
|
|
|
|
else:
|
2022-12-28 22:03:41 +00:00
|
|
|
if populate_indicators:
|
|
|
|
dataframe = self.dk.use_strategy_to_populate_indicators(
|
2023-01-04 17:21:37 +00:00
|
|
|
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
2022-12-28 22:03:41 +00:00
|
|
|
)
|
|
|
|
populate_indicators = False
|
|
|
|
|
|
|
|
dataframe_base_train = dataframe.loc[dataframe["date"] < tr_train.stopdt, :]
|
2023-02-04 15:53:17 +00:00
|
|
|
dataframe_base_train = strategy.set_freqai_targets(
|
|
|
|
dataframe_base_train, metadata=metadata)
|
2022-12-28 22:03:41 +00:00
|
|
|
dataframe_base_backtest = dataframe.loc[dataframe["date"] < tr_backtest.stopdt, :]
|
2023-02-04 15:53:17 +00:00
|
|
|
dataframe_base_backtest = strategy.set_freqai_targets(
|
|
|
|
dataframe_base_backtest, metadata=metadata)
|
2022-12-28 22:03:41 +00:00
|
|
|
|
2023-02-21 20:08:34 +00:00
|
|
|
tr_train = dk.buffer_timerange(tr_train)
|
|
|
|
|
2022-12-28 22:03:41 +00:00
|
|
|
dataframe_train = dk.slice_dataframe(tr_train, dataframe_base_train)
|
|
|
|
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe_base_backtest)
|
|
|
|
|
2022-09-25 13:35:55 +00:00
|
|
|
if not self.model_exists(dk):
|
2022-08-31 14:23:48 +00:00
|
|
|
dk.find_features(dataframe_train)
|
2022-09-25 09:18:10 +00:00
|
|
|
dk.find_labels(dataframe_train)
|
2022-12-29 19:35:11 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.model = self.train(dataframe_train, pair, dk)
|
|
|
|
except Exception as msg:
|
|
|
|
logger.warning(
|
|
|
|
f"Training {pair} raised exception {msg.__class__.__name__}. "
|
|
|
|
f"Message: {msg}, skipping.")
|
|
|
|
|
2022-09-24 08:34:14 +00:00
|
|
|
self.dd.pair_dict[pair]["trained_timestamp"] = int(
|
2022-11-04 15:10:46 +00:00
|
|
|
tr_train.stopts)
|
2022-09-25 09:18:10 +00:00
|
|
|
if self.plot_features:
|
|
|
|
plot_feature_importance(self.model, pair, dk, self.plot_features)
|
2022-09-03 12:00:01 +00:00
|
|
|
if self.save_backtest_models:
|
|
|
|
logger.info('Saving backtest model to disk.')
|
2022-09-24 08:34:14 +00:00
|
|
|
self.dd.save_data(self.model, pair, dk)
|
2022-09-25 09:18:10 +00:00
|
|
|
else:
|
|
|
|
logger.info('Saving metadata to disk.')
|
2022-09-25 18:22:19 +00:00
|
|
|
self.dd.save_metadata(dk)
|
2022-08-31 14:23:48 +00:00
|
|
|
else:
|
2022-09-24 08:34:14 +00:00
|
|
|
self.model = self.dd.load_data(pair, dk)
|
2022-08-31 14:23:48 +00:00
|
|
|
|
|
|
|
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
2022-11-08 13:32:18 +00:00
|
|
|
append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest)
|
2022-08-31 14:23:48 +00:00
|
|
|
dk.append_predictions(append_df)
|
2022-09-01 10:09:23 +00:00
|
|
|
dk.save_backtesting_prediction(append_df)
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-11-08 21:20:39 +00:00
|
|
|
self.backtesting_fit_live_predictions(dk)
|
2022-07-03 15:34:44 +00:00
|
|
|
dk.fill_predictions(dataframe)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
return dk
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def start_live(
|
|
|
|
self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen
|
|
|
|
) -> FreqaiDataKitchen:
|
2022-05-17 15:13:38 +00:00
|
|
|
"""
|
|
|
|
The main broad execution for dry/live. This function will check if a retraining should be
|
|
|
|
performed, and if so, retrain and reset the model.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param dataframe: DataFrame = strategy passed dataframe
|
|
|
|
:param metadata: Dict = pair metadata
|
|
|
|
:param strategy: IStrategy = currently employed strategy
|
|
|
|
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
2022-05-25 09:31:03 +00:00
|
|
|
:returns:
|
2022-07-24 14:51:48 +00:00
|
|
|
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
2022-05-17 15:13:38 +00:00
|
|
|
"""
|
2022-05-31 09:58:21 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
# get the model metadata associated with the current pair
|
2023-02-22 21:01:41 +00:00
|
|
|
(_, trained_timestamp) = self.dd.get_pair_dict_info(metadata["pair"])
|
2022-05-30 19:35:48 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
# append the historic data once per round
|
2022-07-02 16:09:38 +00:00
|
|
|
if self.dd.historic_data:
|
2022-07-26 08:24:14 +00:00
|
|
|
self.dd.update_historic_data(strategy, dk)
|
2022-06-28 13:12:25 +00:00
|
|
|
logger.debug(f'Updating historic data on pair {metadata["pair"]}')
|
2022-11-02 18:32:22 +00:00
|
|
|
self.track_current_candle()
|
2022-06-03 13:19:46 +00:00
|
|
|
|
2023-02-02 10:40:23 +00:00
|
|
|
(_, new_trained_timerange, data_load_timerange) = dk.check_if_new_training_required(
|
|
|
|
trained_timestamp
|
|
|
|
)
|
|
|
|
dk.set_paths(metadata["pair"], new_trained_timerange.stopts)
|
2022-06-03 13:19:46 +00:00
|
|
|
|
2023-02-02 10:40:23 +00:00
|
|
|
# load candle history into memory if it is not yet.
|
|
|
|
if not self.dd.historic_data:
|
|
|
|
self.dd.load_all_pair_histories(data_load_timerange, dk)
|
2022-06-08 04:14:01 +00:00
|
|
|
|
2023-02-02 10:40:23 +00:00
|
|
|
if not self.scanning:
|
|
|
|
self.scanning = True
|
|
|
|
self.start_scanning(strategy)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
# load the model and associated data into the data kitchen
|
2022-07-26 08:24:14 +00:00
|
|
|
self.model = self.dd.load_data(metadata["pair"], dk)
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-10-20 14:30:32 +00:00
|
|
|
dataframe = dk.use_strategy_to_populate_indicators(
|
|
|
|
strategy, prediction_dataframe=dataframe, pair=metadata["pair"],
|
|
|
|
do_corr_pairs=self.get_corr_dataframes
|
|
|
|
)
|
2022-07-21 10:24:22 +00:00
|
|
|
|
2022-06-15 22:21:15 +00:00
|
|
|
if not self.model:
|
2022-07-06 16:20:21 +00:00
|
|
|
logger.warning(
|
|
|
|
f"No model ready for {metadata['pair']}, returning null values to strategy."
|
|
|
|
)
|
2022-07-02 16:09:38 +00:00
|
|
|
self.dd.return_null_values_to_strategy(dataframe, dk)
|
|
|
|
return dk
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-10-29 20:26:49 +00:00
|
|
|
if self.corr_pairlist:
|
|
|
|
dataframe = self.cache_corr_pairlist_dfs(dataframe, dk)
|
2022-10-20 14:30:32 +00:00
|
|
|
|
2022-09-25 09:18:10 +00:00
|
|
|
dk.find_labels(dataframe)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
self.build_strategy_return_arrays(dataframe, dk, metadata["pair"], trained_timestamp)
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
return dk
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def build_strategy_return_arrays(
|
|
|
|
self, dataframe: DataFrame, dk: FreqaiDataKitchen, pair: str, trained_timestamp: int
|
|
|
|
) -> None:
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
# hold the historical predictions in memory so we are sending back
|
2022-07-01 12:00:30 +00:00
|
|
|
# correct array to strategy
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
if pair not in self.dd.model_return_values:
|
2022-07-22 10:17:15 +00:00
|
|
|
# first predictions are made on entire historical candle set coming from strategy. This
|
|
|
|
# allows FreqUI to show full return values.
|
2022-07-02 16:09:38 +00:00
|
|
|
pred_df, do_preds = self.predict(dataframe, dk)
|
2022-08-10 13:16:50 +00:00
|
|
|
if pair not in self.dd.historic_predictions:
|
2022-10-02 16:33:39 +00:00
|
|
|
self.set_initial_historic_predictions(pred_df, dk, pair, dataframe)
|
2022-08-12 14:12:28 +00:00
|
|
|
self.dd.set_initial_return_values(pair, pred_df)
|
2022-08-12 11:13:08 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
2022-06-17 12:55:40 +00:00
|
|
|
return
|
2022-07-02 16:09:38 +00:00
|
|
|
elif self.dk.check_if_model_expired(trained_timestamp):
|
|
|
|
pred_df = DataFrame(np.zeros((2, len(dk.label_list))), columns=dk.label_list)
|
2022-07-29 06:12:50 +00:00
|
|
|
do_preds = np.ones(2, dtype=np.int_) * 2
|
|
|
|
dk.DI_values = np.zeros(2)
|
2022-07-03 08:59:38 +00:00
|
|
|
logger.warning(
|
2022-07-05 10:42:32 +00:00
|
|
|
f"Model expired for {pair}, returning null values to strategy. Strategy "
|
2022-07-03 08:59:38 +00:00
|
|
|
"construction should take care to consider this event with "
|
|
|
|
"prediction == 0 and do_predict == 2"
|
|
|
|
)
|
2022-05-30 09:37:05 +00:00
|
|
|
else:
|
2022-07-22 10:17:15 +00:00
|
|
|
# remaining predictions are made only on the most recent candles for performance and
|
|
|
|
# historical accuracy reasons.
|
2022-07-02 16:09:38 +00:00
|
|
|
pred_df, do_preds = self.predict(dataframe.iloc[-self.CONV_WIDTH:], dk, first=False)
|
|
|
|
|
2022-08-10 13:16:50 +00:00
|
|
|
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
2022-08-10 17:44:22 +00:00
|
|
|
self.fit_live_predictions(dk, pair)
|
2022-10-02 16:33:39 +00:00
|
|
|
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, dataframe)
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-06-17 12:55:40 +00:00
|
|
|
return
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def check_if_feature_list_matches_strategy(
|
2022-10-01 11:14:59 +00:00
|
|
|
self, dk: FreqaiDataKitchen
|
2022-07-03 08:59:38 +00:00
|
|
|
) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Ensure user is passing the proper feature set if they are reusing an `identifier` pointing
|
|
|
|
to a folder holding existing models.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param dataframe: DataFrame = strategy provided dataframe
|
|
|
|
:param dk: FreqaiDataKitchen = non-persistent data container/analyzer for
|
|
|
|
current coin/bot loop
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-10-01 11:14:59 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
if "training_features_list_raw" in dk.data:
|
|
|
|
feature_list = dk.data["training_features_list_raw"]
|
2022-05-28 09:11:41 +00:00
|
|
|
else:
|
2022-09-24 11:21:01 +00:00
|
|
|
feature_list = dk.data['training_features_list']
|
2022-09-30 22:22:05 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
if dk.training_features_list != feature_list:
|
2022-07-03 08:59:38 +00:00
|
|
|
raise OperationalException(
|
|
|
|
"Trying to access pretrained model with `identifier` "
|
|
|
|
"but found different features furnished by current strategy."
|
2022-07-28 05:24:30 +00:00
|
|
|
"Change `identifier` to train from scratch, or ensure the"
|
2022-07-03 08:59:38 +00:00
|
|
|
"strategy is furnishing the same features as the pretrained"
|
2022-09-24 11:21:01 +00:00
|
|
|
"model. In case of --strategy-list, please be aware that FreqAI "
|
|
|
|
"requires all strategies to maintain identical "
|
2023-02-21 13:22:40 +00:00
|
|
|
"feature_engineering_* functions"
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-26 19:07:50 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
|
2022-05-22 15:51:49 +00:00
|
|
|
"""
|
2022-08-19 16:35:24 +00:00
|
|
|
Base data cleaning method for train.
|
|
|
|
Functions here improve/modify the input data by identifying outliers,
|
|
|
|
computing additional metrics, adding noise, reducing dimensionality etc.
|
2022-05-22 15:51:49 +00:00
|
|
|
"""
|
2022-05-26 19:07:50 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
ft_params = self.freqai_info["feature_parameters"]
|
|
|
|
|
2022-09-07 16:45:16 +00:00
|
|
|
if ft_params.get('inlier_metric_window', 0):
|
|
|
|
dk.compute_inlier_metric(set_='train')
|
|
|
|
if self.freqai_info["data_split_parameters"]["test_size"] > 0:
|
|
|
|
dk.compute_inlier_metric(set_='test')
|
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get(
|
2022-07-12 16:09:17 +00:00
|
|
|
"principal_component_analysis", False
|
|
|
|
):
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.principal_component_analysis()
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("use_SVM_to_remove_outliers", False):
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.use_SVM_to_remove_outliers(predict=False)
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("DI_threshold", 0):
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.data["avg_mean_dist"] = dk.compute_distances()
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
|
2022-08-04 15:41:58 +00:00
|
|
|
if dk.pair in self.dd.old_DBSCAN_eps:
|
|
|
|
eps = self.dd.old_DBSCAN_eps[dk.pair]
|
|
|
|
else:
|
|
|
|
eps = None
|
|
|
|
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
|
|
|
|
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
|
2022-08-04 10:14:56 +00:00
|
|
|
|
2022-08-19 16:35:24 +00:00
|
|
|
if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0):
|
|
|
|
dk.add_noise_to_training_features()
|
|
|
|
|
2022-10-01 12:18:46 +00:00
|
|
|
def data_cleaning_predict(self, dk: FreqaiDataKitchen) -> None:
|
2022-05-22 15:51:49 +00:00
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
Base data cleaning method for predict.
|
2022-08-19 16:35:24 +00:00
|
|
|
Functions here are complementary to the functions of data_cleaning_train.
|
2022-05-22 15:51:49 +00:00
|
|
|
"""
|
2022-08-18 17:15:29 +00:00
|
|
|
ft_params = self.freqai_info["feature_parameters"]
|
|
|
|
|
2022-10-01 12:18:46 +00:00
|
|
|
# ensure user is feeding the correct indicators to the model
|
|
|
|
self.check_if_feature_list_matches_strategy(dk)
|
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get('inlier_metric_window', 0):
|
|
|
|
dk.compute_inlier_metric(set_='predict')
|
|
|
|
|
|
|
|
if ft_params.get(
|
2022-07-12 16:09:17 +00:00
|
|
|
"principal_component_analysis", False
|
|
|
|
):
|
2022-09-25 09:18:10 +00:00
|
|
|
dk.pca_transform(dk.data_dictionary['prediction_features'])
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("use_SVM_to_remove_outliers", False):
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.use_SVM_to_remove_outliers(predict=True)
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("DI_threshold", 0):
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.check_if_pred_in_training_spaces()
|
2022-05-25 09:31:03 +00:00
|
|
|
|
2022-08-18 17:15:29 +00:00
|
|
|
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
|
2022-08-04 10:14:56 +00:00
|
|
|
dk.use_DBSCAN_to_remove_outliers(predict=True)
|
|
|
|
|
2022-09-25 09:18:10 +00:00
|
|
|
def model_exists(self, dk: FreqaiDataKitchen) -> bool:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Given a pair and path, check if a model already exists
|
|
|
|
:param pair: pair e.g. BTC/USD
|
|
|
|
:param path: path to model
|
2022-07-22 10:17:15 +00:00
|
|
|
:return:
|
|
|
|
:boolean: whether the model file exists or not.
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2023-02-16 17:33:40 +00:00
|
|
|
if self.dd.model_type == 'joblib':
|
|
|
|
file_type = ".joblib"
|
|
|
|
elif self.dd.model_type == 'keras':
|
|
|
|
file_type = ".h5"
|
2023-03-14 20:13:30 +00:00
|
|
|
elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
2023-02-16 17:33:40 +00:00
|
|
|
file_type = ".zip"
|
2023-03-06 16:10:49 +00:00
|
|
|
|
2023-02-22 13:52:20 +00:00
|
|
|
path_to_modelfile = Path(dk.data_path / f"{dk.model_filename}_model{file_type}")
|
2022-05-04 15:42:34 +00:00
|
|
|
file_exists = path_to_modelfile.is_file()
|
2022-09-25 09:18:10 +00:00
|
|
|
if file_exists:
|
2022-07-02 16:09:38 +00:00
|
|
|
logger.info("Found model at %s", dk.data_path / dk.model_filename)
|
2022-09-25 09:18:10 +00:00
|
|
|
else:
|
2022-07-02 16:09:38 +00:00
|
|
|
logger.info("Could not find model at %s", dk.data_path / dk.model_filename)
|
2022-05-03 08:14:17 +00:00
|
|
|
return file_exists
|
2022-05-19 19:15:58 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
def set_full_path(self) -> None:
|
2022-10-22 01:48:26 +00:00
|
|
|
"""
|
|
|
|
Creates and sets the full path for the identifier
|
|
|
|
"""
|
2022-07-03 08:59:38 +00:00
|
|
|
self.full_path = Path(
|
2022-10-22 01:53:33 +00:00
|
|
|
self.config["user_data_dir"] / "models" / f"{self.identifier}"
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-06-26 21:03:48 +00:00
|
|
|
self.full_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
2022-09-03 13:52:29 +00:00
|
|
|
def extract_data_and_train_model(
|
2022-07-03 08:59:38 +00:00
|
|
|
self,
|
|
|
|
new_trained_timerange: TimeRange,
|
|
|
|
pair: str,
|
|
|
|
strategy: IStrategy,
|
|
|
|
dk: FreqaiDataKitchen,
|
|
|
|
data_load_timerange: TimeRange,
|
|
|
|
):
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-08-22 11:30:30 +00:00
|
|
|
Retrieve data and train model.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param new_trained_timerange: TimeRange = the timerange to train the model on
|
|
|
|
:param metadata: dict = strategy provided metadata
|
|
|
|
:param strategy: IStrategy = user defined strategy object
|
|
|
|
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
|
|
|
|
:param data_load_timerange: TimeRange = the amount of data to be loaded
|
2023-02-21 13:22:40 +00:00
|
|
|
for populating indicators
|
2022-07-24 14:51:48 +00:00
|
|
|
(larger than new_trained_timerange so that
|
|
|
|
new_trained_timerange does not contain any NaNs)
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-07-01 12:00:30 +00:00
|
|
|
|
2022-07-26 08:24:14 +00:00
|
|
|
corr_dataframes, base_dataframes = self.dd.get_base_and_corr_dataframes(
|
|
|
|
data_load_timerange, pair, dk
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-19 19:15:58 +00:00
|
|
|
|
2022-10-30 16:07:33 +00:00
|
|
|
unfiltered_dataframe = dk.use_strategy_to_populate_indicators(
|
|
|
|
strategy, corr_dataframes, base_dataframes, pair
|
|
|
|
)
|
2022-05-19 19:15:58 +00:00
|
|
|
|
2023-02-21 20:08:34 +00:00
|
|
|
new_trained_timerange = dk.buffer_timerange(new_trained_timerange)
|
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
|
|
|
|
|
|
|
|
# find the features indicated by strategy and store in datakitchen
|
|
|
|
dk.find_features(unfiltered_dataframe)
|
2022-09-25 09:18:10 +00:00
|
|
|
dk.find_labels(unfiltered_dataframe)
|
2022-05-31 16:42:27 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
model = self.train(unfiltered_dataframe, pair, dk)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
|
2022-09-29 04:48:38 +00:00
|
|
|
dk.set_new_model_names(pair, new_trained_timerange.stopts)
|
2022-07-26 08:24:14 +00:00
|
|
|
self.dd.save_data(model, pair, dk)
|
2022-06-16 14:12:38 +00:00
|
|
|
|
2022-09-25 09:18:10 +00:00
|
|
|
if self.plot_features:
|
|
|
|
plot_feature_importance(model, pair, dk, self.plot_features)
|
2022-09-17 15:53:43 +00:00
|
|
|
|
2023-02-22 21:27:56 +00:00
|
|
|
self.dd.purge_old_models()
|
2022-05-23 10:07:09 +00:00
|
|
|
|
2022-07-12 16:09:17 +00:00
|
|
|
def set_initial_historic_predictions(
|
2022-10-02 16:33:39 +00:00
|
|
|
self, pred_df: DataFrame, dk: FreqaiDataKitchen, pair: str, strat_df: DataFrame
|
2022-07-12 16:09:17 +00:00
|
|
|
) -> None:
|
2022-08-02 18:14:02 +00:00
|
|
|
"""
|
|
|
|
This function is called only if the datadrawer failed to load an
|
|
|
|
existing set of historic predictions. In this case, it builds
|
|
|
|
the structure and sets fake predictions off the first training
|
|
|
|
data. After that, FreqAI will append new real predictions to the
|
|
|
|
set of historic predictions.
|
|
|
|
|
|
|
|
These values are used to generate live statistics which can be used
|
|
|
|
in the strategy for adaptive values. E.g. &*_mean/std are quantities
|
|
|
|
that can computed based on live predictions from the set of historical
|
|
|
|
predictions. Those values can be used in the user strategy to better
|
|
|
|
assess prediction rarity, and thus wait for probabilistically favorable
|
|
|
|
entries relative to the live historical predictions.
|
|
|
|
|
|
|
|
If the user reuses an identifier on a subsequent instance,
|
|
|
|
this function will not be called. In that case, "real" predictions
|
|
|
|
will be appended to the loaded set of historic predictions.
|
2022-10-10 12:15:30 +00:00
|
|
|
:param df: DataFrame = the dataframe containing the training feature data
|
|
|
|
:param model: Any = A model which was `fit` using a common library such as
|
|
|
|
catboost or lightgbm
|
|
|
|
:param dk: FreqaiDataKitchen = object containing methods for data analysis
|
|
|
|
:param pair: str = current pair
|
2022-08-02 18:14:02 +00:00
|
|
|
"""
|
2022-07-23 15:14:11 +00:00
|
|
|
|
2022-08-02 18:14:02 +00:00
|
|
|
self.dd.historic_predictions[pair] = pred_df
|
|
|
|
hist_preds_df = self.dd.historic_predictions[pair]
|
|
|
|
|
2022-11-30 11:28:45 +00:00
|
|
|
self.set_start_dry_live_date(strat_df)
|
2022-11-29 23:53:35 +00:00
|
|
|
|
2022-08-06 11:51:19 +00:00
|
|
|
for label in hist_preds_df.columns:
|
|
|
|
if hist_preds_df[label].dtype == object:
|
|
|
|
continue
|
|
|
|
hist_preds_df[f'{label}_mean'] = 0
|
|
|
|
hist_preds_df[f'{label}_std'] = 0
|
|
|
|
|
2022-08-02 18:14:02 +00:00
|
|
|
hist_preds_df['do_predict'] = 0
|
|
|
|
|
|
|
|
if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0:
|
|
|
|
hist_preds_df['DI_values'] = 0
|
|
|
|
|
|
|
|
for return_str in dk.data['extra_returns_per_train']:
|
2022-11-26 17:04:47 +00:00
|
|
|
hist_preds_df[return_str] = dk.data['extra_returns_per_train'][return_str]
|
2022-07-11 20:01:48 +00:00
|
|
|
|
2022-10-02 16:33:39 +00:00
|
|
|
hist_preds_df['close_price'] = strat_df['close']
|
|
|
|
hist_preds_df['date_pred'] = strat_df['date']
|
|
|
|
|
2022-08-12 11:13:08 +00:00
|
|
|
# # for keras type models, the conv_window needs to be prepended so
|
|
|
|
# # viewing is correct in frequi
|
2022-08-30 16:55:58 +00:00
|
|
|
if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0):
|
2022-08-12 11:13:08 +00:00
|
|
|
n_lost_points = self.freqai_info.get('conv_width', 2)
|
|
|
|
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
|
|
|
columns=hist_preds_df.columns)
|
2022-08-12 14:12:28 +00:00
|
|
|
self.dd.historic_predictions[pair] = pd.concat(
|
2022-08-12 11:13:08 +00:00
|
|
|
[zeros_df, hist_preds_df], axis=0, ignore_index=True)
|
|
|
|
|
2022-08-10 17:44:22 +00:00
|
|
|
def fit_live_predictions(self, dk: FreqaiDataKitchen, pair: str) -> None:
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
|
|
|
Fit the labels with a gaussian distribution
|
|
|
|
"""
|
|
|
|
import scipy as spy
|
|
|
|
|
2022-08-10 13:16:50 +00:00
|
|
|
# add classes from classifier label types if used
|
|
|
|
full_labels = dk.label_list + dk.unique_class_list
|
|
|
|
|
2022-07-26 08:24:14 +00:00
|
|
|
num_candles = self.freqai_info.get("fit_live_predictions_candles", 100)
|
|
|
|
dk.data["labels_mean"], dk.data["labels_std"] = {}, {}
|
2022-08-10 13:16:50 +00:00
|
|
|
for label in full_labels:
|
2022-08-06 15:51:21 +00:00
|
|
|
if self.dd.historic_predictions[dk.pair][label].dtype == object:
|
|
|
|
continue
|
2022-11-17 18:20:07 +00:00
|
|
|
f = spy.stats.norm.fit(
|
2022-11-30 11:28:45 +00:00
|
|
|
self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
2022-07-26 08:24:14 +00:00
|
|
|
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2022-10-15 11:23:01 +00:00
|
|
|
def inference_timer(self, do: Literal['start', 'stop'] = 'start', pair: str = ''):
|
2022-08-14 14:41:50 +00:00
|
|
|
"""
|
|
|
|
Timer designed to track the cumulative time spent in FreqAI for one pass through
|
|
|
|
the whitelist. This will check if the time spent is more than 1/4 the time
|
|
|
|
of a single candle, and if so, it will warn the user of degraded performance
|
|
|
|
"""
|
|
|
|
if do == 'start':
|
|
|
|
self.pair_it += 1
|
|
|
|
self.begin_time = time.time()
|
|
|
|
elif do == 'stop':
|
|
|
|
end = time.time()
|
2022-10-09 18:22:42 +00:00
|
|
|
time_spent = (end - self.begin_time)
|
|
|
|
if self.freqai_info.get('write_metrics_to_disk', False):
|
|
|
|
self.dd.update_metric_tracker('inference_time', time_spent, pair)
|
|
|
|
self.inference_time += time_spent
|
2022-08-14 14:41:50 +00:00
|
|
|
if self.pair_it == self.total_pairs:
|
|
|
|
logger.info(
|
|
|
|
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
|
|
|
|
if self.inference_time > 0.25 * self.base_tf_seconds:
|
2022-09-03 13:01:28 +00:00
|
|
|
logger.warning("Inference took over 25% of the candle time. Reduce pairlist to"
|
|
|
|
" avoid blinding open trades and degrading performance.")
|
2022-08-14 14:41:50 +00:00
|
|
|
self.pair_it = 0
|
|
|
|
self.inference_time = 0
|
|
|
|
return
|
|
|
|
|
2022-10-15 11:23:01 +00:00
|
|
|
def train_timer(self, do: Literal['start', 'stop'] = 'start', pair: str = ''):
|
2022-08-22 11:30:30 +00:00
|
|
|
"""
|
|
|
|
Timer designed to track the cumulative time spent training the full pairlist in
|
|
|
|
FreqAI.
|
|
|
|
"""
|
|
|
|
if do == 'start':
|
|
|
|
self.pair_it_train += 1
|
|
|
|
self.begin_time_train = time.time()
|
|
|
|
elif do == 'stop':
|
|
|
|
end = time.time()
|
2022-10-09 18:22:42 +00:00
|
|
|
time_spent = (end - self.begin_time_train)
|
|
|
|
if self.freqai_info.get('write_metrics_to_disk', False):
|
|
|
|
self.dd.collect_metrics(time_spent, pair)
|
|
|
|
|
|
|
|
self.train_time += time_spent
|
2022-08-22 11:30:30 +00:00
|
|
|
if self.pair_it_train == self.total_pairs:
|
|
|
|
logger.info(
|
|
|
|
f'Total time spent training pairlist {self.train_time:.2f} seconds')
|
|
|
|
self.pair_it_train = 0
|
|
|
|
self.train_time = 0
|
|
|
|
return
|
|
|
|
|
2022-09-07 16:58:55 +00:00
|
|
|
def get_init_model(self, pair: str) -> Any:
|
|
|
|
if pair not in self.dd.model_dictionary or not self.continual_learning:
|
|
|
|
init_model = None
|
|
|
|
else:
|
|
|
|
init_model = self.dd.model_dictionary[pair]
|
|
|
|
|
|
|
|
return init_model
|
|
|
|
|
2022-09-18 15:00:55 +00:00
|
|
|
def _set_train_queue(self):
|
|
|
|
"""
|
|
|
|
Sets train queue from existing train timestamps if they exist
|
|
|
|
otherwise it sets the train queue based on the provided whitelist.
|
|
|
|
"""
|
|
|
|
current_pairlist = self.config.get("exchange", {}).get("pair_whitelist")
|
|
|
|
if not self.dd.pair_dict:
|
2022-09-19 17:16:32 +00:00
|
|
|
logger.info('Set fresh train queue from whitelist. '
|
|
|
|
f'Queue: {current_pairlist}')
|
2022-09-18 15:00:55 +00:00
|
|
|
return deque(current_pairlist)
|
|
|
|
|
|
|
|
best_queue = deque()
|
|
|
|
|
|
|
|
pair_dict_sorted = sorted(self.dd.pair_dict.items(),
|
|
|
|
key=lambda k: k[1]['trained_timestamp'])
|
|
|
|
for pair in pair_dict_sorted:
|
|
|
|
if pair[0] in current_pairlist:
|
2022-09-19 10:47:20 +00:00
|
|
|
best_queue.append(pair[0])
|
2022-09-18 15:08:07 +00:00
|
|
|
for pair in current_pairlist:
|
|
|
|
if pair not in best_queue:
|
|
|
|
best_queue.appendleft(pair)
|
|
|
|
|
2022-09-19 17:16:32 +00:00
|
|
|
logger.info('Set existing queue from trained timestamps. '
|
|
|
|
f'Best approximation queue: {best_queue}')
|
2022-09-18 15:00:55 +00:00
|
|
|
return best_queue
|
|
|
|
|
2022-10-29 20:26:49 +00:00
|
|
|
def cache_corr_pairlist_dfs(self, dataframe: DataFrame, dk: FreqaiDataKitchen) -> DataFrame:
|
|
|
|
"""
|
|
|
|
Cache the corr_pairlist dfs to speed up performance for subsequent pairs during the
|
|
|
|
current candle.
|
|
|
|
:param dataframe: strategy fed dataframe
|
|
|
|
:param dk: datakitchen object for current asset
|
|
|
|
:return: dataframe to attach/extract cached corr_pair dfs to/from.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if self.get_corr_dataframes:
|
|
|
|
self.corr_dataframes = dk.extract_corr_pair_columns_from_populated_indicators(dataframe)
|
|
|
|
if not self.corr_dataframes:
|
|
|
|
logger.warning("Couldn't cache corr_pair dataframes for improved performance. "
|
|
|
|
"Consider ensuring that the full coin/stake, e.g. XYZ/USD, "
|
|
|
|
"is included in the column names when you are creating features "
|
2023-02-21 13:22:40 +00:00
|
|
|
"in `feature_engineering_*` functions.")
|
2022-10-29 20:26:49 +00:00
|
|
|
self.get_corr_dataframes = not bool(self.corr_dataframes)
|
2022-11-02 19:20:35 +00:00
|
|
|
elif self.corr_dataframes:
|
2022-10-29 20:26:49 +00:00
|
|
|
dataframe = dk.attach_corr_pair_columns(
|
|
|
|
dataframe, self.corr_dataframes, dk.pair)
|
|
|
|
|
|
|
|
return dataframe
|
|
|
|
|
2022-11-02 18:32:22 +00:00
|
|
|
def track_current_candle(self):
|
|
|
|
"""
|
|
|
|
Checks if the latest candle appended by the datadrawer is
|
|
|
|
equivalent to the latest candle seen by FreqAI. If not, it
|
|
|
|
asks to refresh the cached corr_dfs, and resets the pair
|
|
|
|
counter.
|
|
|
|
"""
|
|
|
|
if self.dd.current_candle > self.current_candle:
|
|
|
|
self.get_corr_dataframes = True
|
2022-11-05 14:42:19 +00:00
|
|
|
self.pair_it = 1
|
2022-11-02 18:32:22 +00:00
|
|
|
self.current_candle = self.dd.current_candle
|
|
|
|
|
2022-12-01 15:53:19 +00:00
|
|
|
def ensure_data_exists(self, len_dataframe_backtest: int,
|
2022-11-04 15:10:46 +00:00
|
|
|
tr_backtest: TimeRange, pair: str) -> bool:
|
|
|
|
"""
|
|
|
|
Check if the dataframe is empty, if not, report useful information to user.
|
2022-12-01 15:53:19 +00:00
|
|
|
:param len_dataframe_backtest: the len of backtesting dataframe
|
2022-11-04 15:10:46 +00:00
|
|
|
:param tr_backtest: current backtesting timerange.
|
|
|
|
:param pair: current pair
|
|
|
|
:return: if the data exists or not
|
|
|
|
"""
|
2022-12-01 15:53:19 +00:00
|
|
|
if self.config.get("freqai_backtest_live_models", False) and len_dataframe_backtest == 0:
|
2022-11-10 17:26:14 +00:00
|
|
|
logger.info(f"No data found for pair {pair} from "
|
|
|
|
f"from { tr_backtest.start_fmt} to {tr_backtest.stop_fmt}. "
|
2022-11-04 15:10:46 +00:00
|
|
|
"Probably more than one training within the same candle period.")
|
2022-11-04 15:41:38 +00:00
|
|
|
return False
|
|
|
|
return True
|
2022-11-04 15:10:46 +00:00
|
|
|
|
|
|
|
def log_backtesting_progress(self, tr_train: TimeRange, pair: str,
|
|
|
|
train_it: int, total_trains: int):
|
|
|
|
"""
|
|
|
|
Log the backtesting progress so user knows how many pairs have been trained and
|
2022-11-07 18:35:28 +00:00
|
|
|
how many more pairs/trains remain.
|
2022-11-04 15:10:46 +00:00
|
|
|
:param tr_train: the training timerange
|
|
|
|
:param train_it: the train iteration for the current pair (the sliding window progress)
|
|
|
|
:param pair: the current pair
|
|
|
|
:param total_trains: total trains (total number of slides for the sliding window)
|
|
|
|
"""
|
|
|
|
if not self.config.get("freqai_backtest_live_models", False):
|
|
|
|
logger.info(
|
|
|
|
f"Training {pair}, {self.pair_it}/{self.total_pairs} pairs"
|
2022-11-10 17:26:14 +00:00
|
|
|
f" from {tr_train.start_fmt} "
|
|
|
|
f"to {tr_train.stop_fmt}, {train_it}/{total_trains} "
|
2022-11-04 15:10:46 +00:00
|
|
|
"trains"
|
|
|
|
)
|
2022-11-08 21:20:39 +00:00
|
|
|
|
|
|
|
def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen):
|
2022-11-09 12:51:42 +00:00
|
|
|
"""
|
|
|
|
Apply fit_live_predictions function in backtesting with a dummy historic_predictions
|
2022-11-09 13:07:24 +00:00
|
|
|
The loop is required to simulate dry/live operation, as it is not possible to predict
|
|
|
|
the type of logic implemented by the user.
|
2022-11-09 12:51:42 +00:00
|
|
|
:param dk: datakitchen object
|
|
|
|
"""
|
2022-11-08 21:20:39 +00:00
|
|
|
fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0)
|
|
|
|
if fit_live_predictions_candles:
|
2022-11-30 11:28:45 +00:00
|
|
|
logger.info("Applying fit_live_predictions in backtesting")
|
2022-11-09 12:51:42 +00:00
|
|
|
label_columns = [col for col in dk.full_df.columns if (
|
2022-11-08 21:20:39 +00:00
|
|
|
col.startswith("&") and
|
2022-11-09 12:51:42 +00:00
|
|
|
not (col.startswith("&") and col.endswith("_mean")) and
|
|
|
|
not (col.startswith("&") and col.endswith("_std")) and
|
2022-11-08 21:20:39 +00:00
|
|
|
col not in self.dk.data["extra_returns_per_train"])
|
|
|
|
]
|
|
|
|
|
|
|
|
for index in range(len(dk.full_df)):
|
2022-11-09 12:51:42 +00:00
|
|
|
if index >= fit_live_predictions_candles:
|
2022-11-08 21:20:39 +00:00
|
|
|
self.dd.historic_predictions[self.dk.pair] = (
|
2022-11-09 12:51:42 +00:00
|
|
|
dk.full_df.iloc[index - fit_live_predictions_candles:index])
|
2022-11-17 18:20:07 +00:00
|
|
|
self.fit_live_predictions(self.dk, self.dk.pair)
|
2022-11-09 12:51:42 +00:00
|
|
|
for label in label_columns:
|
2022-11-08 21:20:39 +00:00
|
|
|
if dk.full_df[label].dtype == object:
|
|
|
|
continue
|
|
|
|
if "labels_mean" in self.dk.data:
|
|
|
|
dk.full_df.at[index, f"{label}_mean"] = (
|
|
|
|
self.dk.data["labels_mean"][label])
|
|
|
|
if "labels_std" in self.dk.data:
|
|
|
|
dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label]
|
|
|
|
|
|
|
|
for extra_col in self.dk.data["extra_returns_per_train"]:
|
|
|
|
dk.full_df.at[index, f"{extra_col}"] = (
|
|
|
|
self.dk.data["extra_returns_per_train"][extra_col])
|
2022-11-17 18:20:07 +00:00
|
|
|
|
2022-11-08 21:20:39 +00:00
|
|
|
return
|
2022-11-09 12:51:42 +00:00
|
|
|
|
2022-11-19 17:15:58 +00:00
|
|
|
def update_metadata(self, metadata: Dict[str, Any]):
|
|
|
|
"""
|
|
|
|
Update global metadata and save the updated json file
|
|
|
|
:param metadata: new global metadata dict
|
|
|
|
"""
|
|
|
|
self.dd.save_global_metadata_to_disk(metadata)
|
|
|
|
self.metadata = metadata
|
|
|
|
|
|
|
|
def set_start_dry_live_date(self, live_dataframe: DataFrame):
|
|
|
|
key_name = "start_dry_live_date"
|
|
|
|
if key_name not in self.metadata:
|
|
|
|
metadata = self.metadata
|
|
|
|
metadata[key_name] = int(
|
|
|
|
pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp())
|
|
|
|
self.update_metadata(metadata)
|
|
|
|
|
2022-11-29 23:53:35 +00:00
|
|
|
def start_backtesting_from_historic_predictions(
|
|
|
|
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
|
|
|
|
) -> FreqaiDataKitchen:
|
|
|
|
"""
|
|
|
|
:param dataframe: DataFrame = strategy passed dataframe
|
|
|
|
:param metadata: Dict = pair metadata
|
|
|
|
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
|
|
|
:return:
|
|
|
|
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
|
|
|
"""
|
|
|
|
pair = metadata["pair"]
|
|
|
|
dk.return_dataframe = dataframe
|
|
|
|
saved_dataframe = self.dd.historic_predictions[pair]
|
|
|
|
columns_to_drop = list(set(saved_dataframe.columns).intersection(
|
|
|
|
dk.return_dataframe.columns))
|
|
|
|
dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop))
|
|
|
|
dk.return_dataframe = pd.merge(
|
|
|
|
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
|
|
|
|
return dk
|
|
|
|
|
2022-05-28 16:26:19 +00:00
|
|
|
# Following methods which are overridden by user made prediction models.
|
2022-08-06 12:55:46 +00:00
|
|
|
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
2022-05-23 10:07:09 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2022-09-09 17:17:15 +00:00
|
|
|
def train(self, unfiltered_df: DataFrame, pair: str,
|
2022-09-07 16:58:55 +00:00
|
|
|
dk: FreqaiDataKitchen, **kwargs) -> Any:
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|
|
|
|
Filter the training data and train a model to it. Train makes heavy use of the datahandler
|
|
|
|
for storing, saving, loading, and analyzing the data.
|
2022-09-09 17:17:15 +00:00
|
|
|
:param unfiltered_df: Full dataframe for the current training period
|
2022-07-24 14:51:48 +00:00
|
|
|
:param metadata: pair metadata from strategy.
|
|
|
|
:return: Trained model which can be used to inference (self.predict)
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
2022-09-07 16:58:55 +00:00
|
|
|
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs) -> Any:
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|
|
|
|
Most regressors use the same function names and arguments e.g. user
|
|
|
|
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
|
|
|
management will be properly handled by Freqai.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param data_dictionary: Dict = the dictionary constructed by DataHandler to hold
|
|
|
|
all the training and test data/labels.
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
@abstractmethod
|
2022-07-03 08:59:38 +00:00
|
|
|
def predict(
|
2022-09-09 17:17:15 +00:00
|
|
|
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
2022-07-29 06:12:50 +00:00
|
|
|
) -> Tuple[DataFrame, NDArray[np.int_]]:
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|
|
|
|
Filter the prediction features data and predict with it.
|
2022-09-09 17:17:15 +00:00
|
|
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
2022-07-24 14:51:48 +00:00
|
|
|
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
|
|
|
:param first: boolean = whether this is the first prediction or not.
|
2022-05-23 10:07:09 +00:00
|
|
|
:return:
|
|
|
|
:predictions: np.array of predictions
|
|
|
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
2022-05-25 09:31:03 +00:00
|
|
|
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
|
2022-05-23 10:07:09 +00:00
|
|
|
"""
|