2022-05-03 08:14:17 +00:00
|
|
|
import copy
|
2022-05-04 15:42:34 +00:00
|
|
|
import datetime
|
|
|
|
import json
|
2022-05-04 15:53:40 +00:00
|
|
|
import logging
|
2022-05-04 15:42:34 +00:00
|
|
|
import pickle as pk
|
2022-05-05 13:35:51 +00:00
|
|
|
import shutil
|
2022-05-04 15:42:34 +00:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Dict, List, Tuple
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
import numpy as np
|
2022-05-06 14:20:52 +00:00
|
|
|
import numpy.typing as npt
|
2022-05-03 08:14:17 +00:00
|
|
|
import pandas as pd
|
2022-05-22 15:51:49 +00:00
|
|
|
from joblib import dump, load # , Parallel, delayed # used for auto distribution assignment
|
2022-05-03 08:14:17 +00:00
|
|
|
from pandas import DataFrame
|
2022-05-22 15:51:49 +00:00
|
|
|
from sklearn import linear_model
|
2022-05-03 08:14:17 +00:00
|
|
|
from sklearn.metrics.pairwise import pairwise_distances
|
2022-05-04 15:42:34 +00:00
|
|
|
from sklearn.model_selection import train_test_split
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
from freqtrade.configuration import TimeRange
|
2022-05-09 13:25:00 +00:00
|
|
|
from freqtrade.data.history import load_pair_history
|
|
|
|
from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
2022-05-26 22:43:52 +00:00
|
|
|
from freqtrade.exceptions import OperationalException
|
2022-05-23 19:05:05 +00:00
|
|
|
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
2022-05-09 13:25:00 +00:00
|
|
|
from freqtrade.resolvers import ExchangeResolver
|
|
|
|
from freqtrade.strategy.interface import IStrategy
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
SECONDS_IN_DAY = 86400
|
2022-07-10 10:34:09 +00:00
|
|
|
SECONDS_IN_HOUR = 3600
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:53:40 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-06 10:54:49 +00:00
|
|
|
class FreqaiDataKitchen:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2022-05-23 19:05:05 +00:00
|
|
|
Class designed to analyze data for a single pair. Employed by the IFreqaiModel class.
|
2022-05-03 08:14:17 +00:00
|
|
|
Functionalities include holding, saving, loading, and analyzing the data.
|
2022-05-03 08:28:13 +00:00
|
|
|
author: Robert Caulk, rob.caulk@gmail.com
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
config: Dict[str, Any],
|
|
|
|
data_drawer: FreqaiDataDrawer,
|
|
|
|
live: bool = False,
|
|
|
|
pair: str = "",
|
|
|
|
):
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data: Dict[Any, Any] = {}
|
2022-05-05 12:37:37 +00:00
|
|
|
self.data_dictionary: Dict[Any, Any] = {}
|
2022-05-03 08:14:17 +00:00
|
|
|
self.config = config
|
2022-05-05 13:35:51 +00:00
|
|
|
self.freqai_config = config["freqai"]
|
2022-07-03 15:34:44 +00:00
|
|
|
self.full_df: DataFrame = DataFrame()
|
|
|
|
self.append_df: DataFrame = DataFrame()
|
2022-05-23 19:05:05 +00:00
|
|
|
self.data_path = Path()
|
2022-07-02 16:09:38 +00:00
|
|
|
self.label_list: List = []
|
2022-05-09 13:25:00 +00:00
|
|
|
self.model_filename: str = ""
|
2022-05-19 17:27:38 +00:00
|
|
|
self.live = live
|
2022-05-23 19:05:05 +00:00
|
|
|
self.pair = pair
|
2022-05-22 15:51:49 +00:00
|
|
|
self.svm_model: linear_model.SGDOneClassSVM = None
|
2022-07-12 16:09:17 +00:00
|
|
|
self.keras = self.freqai_config.get("keras", False)
|
2022-06-03 13:19:46 +00:00
|
|
|
self.set_all_pairs()
|
2022-05-19 17:27:38 +00:00
|
|
|
if not self.live:
|
2022-07-03 08:59:38 +00:00
|
|
|
self.full_timerange = self.create_fulltimerange(
|
2022-07-10 10:34:09 +00:00
|
|
|
self.config["timerange"], self.freqai_config.get("train_period_days")
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-05 13:35:51 +00:00
|
|
|
|
2022-05-09 13:25:00 +00:00
|
|
|
(self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(
|
|
|
|
self.full_timerange,
|
2022-07-10 10:34:09 +00:00
|
|
|
config["freqai"]["train_period_days"],
|
|
|
|
config["freqai"]["backtest_period_days"],
|
2022-05-09 13:25:00 +00:00
|
|
|
)
|
2022-07-02 16:09:38 +00:00
|
|
|
# self.strat_dataframe: DataFrame = strat_dataframe
|
|
|
|
self.dd = data_drawer
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def set_paths(
|
|
|
|
self,
|
|
|
|
pair: str,
|
|
|
|
trained_timestamp: int = None,
|
|
|
|
) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Set the paths to the data for the present coin/botloop
|
|
|
|
:params:
|
|
|
|
metadata: dict = strategy furnished pair metadata
|
|
|
|
trained_timestamp: int = timestamp of most recent training
|
|
|
|
"""
|
2022-07-03 08:59:38 +00:00
|
|
|
self.full_path = Path(
|
|
|
|
self.config["user_data_dir"] / "models" / str(self.freqai_config.get("identifier"))
|
|
|
|
)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_path = Path(
|
|
|
|
self.full_path / str("sub-train" + "-" + pair.split("/")[0] + str(trained_timestamp))
|
|
|
|
)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
|
|
|
return
|
2022-05-05 13:35:51 +00:00
|
|
|
|
2022-07-12 16:09:17 +00:00
|
|
|
def save_data(self, model: Any, coin: str = "", label=None) -> None:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Saves all data associated with a model for a single sub-train time range
|
|
|
|
:params:
|
2022-05-04 15:42:34 +00:00
|
|
|
:model: User trained model which can be reused for inferencing to generate
|
2022-05-03 08:14:17 +00:00
|
|
|
predictions
|
|
|
|
"""
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
if not self.data_path.is_dir():
|
|
|
|
self.data_path.mkdir(parents=True, exist_ok=True)
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
save_path = Path(self.data_path)
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
# Save the trained model
|
2022-07-12 16:09:17 +00:00
|
|
|
if not self.keras:
|
2022-07-02 16:09:38 +00:00
|
|
|
dump(model, save_path / f"{self.model_filename}_model.joblib")
|
2022-07-01 12:00:30 +00:00
|
|
|
else:
|
2022-07-02 16:09:38 +00:00
|
|
|
model.save(save_path / f"{self.model_filename}_model.h5")
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
if self.svm_model is not None:
|
|
|
|
dump(self.svm_model, save_path / str(self.model_filename + "_svm_model.joblib"))
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
self.data["data_path"] = str(self.data_path)
|
2022-05-19 17:27:38 +00:00
|
|
|
self.data["model_filename"] = str(self.model_filename)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data["training_features_list"] = list(self.data_dictionary["train_features"].columns)
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data["label_list"] = self.label_list
|
2022-05-03 08:14:17 +00:00
|
|
|
# store the metadata
|
2022-05-04 15:42:34 +00:00
|
|
|
with open(save_path / str(self.model_filename + "_metadata.json"), "w") as fp:
|
|
|
|
json.dump(self.data, fp, default=self.np_encoder)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
# save the train data to file so we can check preds for area of applicability later
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data_dictionary["train_features"].to_pickle(
|
|
|
|
save_path / str(self.model_filename + "_trained_df.pkl")
|
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
if self.freqai_config.get("feature_parameters", {}).get("principal_component_analysis"):
|
|
|
|
pk.dump(
|
|
|
|
self.pca, open(self.data_path / str(self.model_filename + "_pca_object.pkl"), "wb")
|
|
|
|
)
|
2022-05-30 22:40:45 +00:00
|
|
|
|
2022-05-25 12:40:32 +00:00
|
|
|
# if self.live:
|
2022-07-02 16:09:38 +00:00
|
|
|
self.dd.model_dictionary[self.model_filename] = model
|
2022-07-03 08:59:38 +00:00
|
|
|
self.dd.pair_dict[coin]["model_filename"] = self.model_filename
|
|
|
|
self.dd.pair_dict[coin]["data_path"] = str(self.data_path)
|
2022-07-02 16:09:38 +00:00
|
|
|
self.dd.save_drawer_to_disk()
|
2022-05-19 17:27:38 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
return
|
|
|
|
|
2022-07-12 16:09:17 +00:00
|
|
|
def load_data(self, coin: str = "") -> Any:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
loads all data required to make a prediction on a sub-train time range
|
|
|
|
:returns:
|
|
|
|
:model: User trained model which can be inferenced for new predictions
|
|
|
|
"""
|
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
if not self.dd.pair_dict[coin]["model_filename"]:
|
2022-06-15 22:21:15 +00:00
|
|
|
return None
|
|
|
|
|
2022-05-26 19:07:50 +00:00
|
|
|
if self.live:
|
2022-07-03 08:59:38 +00:00
|
|
|
self.model_filename = self.dd.pair_dict[coin]["model_filename"]
|
|
|
|
self.data_path = Path(self.dd.pair_dict[coin]["data_path"])
|
|
|
|
if self.freqai_config.get("follow_mode", False):
|
2022-05-31 10:35:09 +00:00
|
|
|
# follower can be on a different system which is rsynced to the leader:
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_path = Path(
|
|
|
|
self.config["user_data_dir"]
|
|
|
|
/ "models"
|
|
|
|
/ self.data_path.parts[-2]
|
|
|
|
/ self.data_path.parts[-1]
|
|
|
|
)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
with open(self.data_path / str(self.model_filename + "_metadata.json"), "r") as fp:
|
2022-05-03 08:14:17 +00:00
|
|
|
self.data = json.load(fp)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.training_features_list = self.data["training_features_list"]
|
2022-07-03 08:59:38 +00:00
|
|
|
self.label_list = self.data["label_list"]
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data_dictionary["train_features"] = pd.read_pickle(
|
2022-05-23 19:05:05 +00:00
|
|
|
self.data_path / str(self.model_filename + "_trained_df.pkl")
|
2022-05-04 15:42:34 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-19 17:27:38 +00:00
|
|
|
# try to access model in memory instead of loading object from disk to save time
|
2022-07-02 16:09:38 +00:00
|
|
|
if self.live and self.model_filename in self.dd.model_dictionary:
|
|
|
|
model = self.dd.model_dictionary[self.model_filename]
|
2022-07-12 16:09:17 +00:00
|
|
|
elif not self.keras:
|
2022-05-23 19:05:05 +00:00
|
|
|
model = load(self.data_path / str(self.model_filename + "_model.joblib"))
|
2022-07-01 12:00:30 +00:00
|
|
|
else:
|
|
|
|
from tensorflow import keras
|
|
|
|
model = keras.models.load_model(self.data_path / str(self.model_filename + "_model.h5"))
|
2022-05-19 17:27:38 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
if Path(self.data_path / str(self.model_filename + "_svm_model.joblib")).resolve().exists():
|
2022-05-23 19:05:05 +00:00
|
|
|
self.svm_model = load(self.data_path / str(self.model_filename + "_svm_model.joblib"))
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-05-30 22:40:45 +00:00
|
|
|
if not model:
|
|
|
|
raise OperationalException(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"Unable to load model, ensure model exists at " f"{self.data_path} "
|
|
|
|
)
|
2022-05-19 17:27:38 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
|
|
|
self.pca = pk.load(
|
2022-05-23 19:05:05 +00:00
|
|
|
open(self.data_path / str(self.model_filename + "_pca_object.pkl"), "rb")
|
2022-05-04 15:42:34 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return model
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
def make_train_test_datasets(
|
|
|
|
self, filtered_dataframe: DataFrame, labels: DataFrame
|
|
|
|
) -> Dict[Any, Any]:
|
|
|
|
"""
|
|
|
|
Given the dataframe for the full history for training, split the data into
|
|
|
|
training and test data according to user specified parameters in configuration
|
|
|
|
file.
|
2022-05-03 08:14:17 +00:00
|
|
|
:filtered_dataframe: cleaned dataframe ready to be split.
|
|
|
|
:labels: cleaned labels ready to be split.
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-07-10 10:34:09 +00:00
|
|
|
feat_dict = self.freqai_config.get("feature_parameters", {})
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-06 14:20:52 +00:00
|
|
|
weights: npt.ArrayLike
|
2022-07-10 10:34:09 +00:00
|
|
|
if feat_dict.get("weight_factor", 0) > 0:
|
2022-05-03 08:14:17 +00:00
|
|
|
weights = self.set_weights_higher_recent(len(filtered_dataframe))
|
2022-05-04 15:42:34 +00:00
|
|
|
else:
|
|
|
|
weights = np.ones(len(filtered_dataframe))
|
|
|
|
|
2022-07-10 10:34:09 +00:00
|
|
|
if feat_dict.get("stratify_training_data", 0) > 0:
|
2022-05-22 15:51:49 +00:00
|
|
|
stratification = np.zeros(len(filtered_dataframe))
|
|
|
|
for i in range(1, len(stratification)):
|
2022-07-10 10:34:09 +00:00
|
|
|
if i % feat_dict.get("stratify_training_data", 0) == 0:
|
2022-05-22 15:51:49 +00:00
|
|
|
stratification[i] = 1
|
2022-05-25 09:43:45 +00:00
|
|
|
else:
|
|
|
|
stratification = None
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
(
|
|
|
|
train_features,
|
|
|
|
test_features,
|
|
|
|
train_labels,
|
|
|
|
test_labels,
|
|
|
|
train_weights,
|
|
|
|
test_weights,
|
|
|
|
) = train_test_split(
|
|
|
|
filtered_dataframe[: filtered_dataframe.shape[0]],
|
2022-05-03 08:14:17 +00:00
|
|
|
labels,
|
|
|
|
weights,
|
2022-05-22 15:51:49 +00:00
|
|
|
stratify=stratification,
|
2022-07-03 08:59:38 +00:00
|
|
|
**self.config["freqai"]["data_split_parameters"],
|
2022-05-03 08:14:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return self.build_data_dictionary(
|
2022-05-04 15:42:34 +00:00
|
|
|
train_features, test_features, train_labels, test_labels, train_weights, test_weights
|
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
def filter_features(
|
|
|
|
self,
|
|
|
|
unfiltered_dataframe: DataFrame,
|
|
|
|
training_feature_list: List,
|
2022-07-02 16:09:38 +00:00
|
|
|
label_list: List = list(),
|
2022-05-04 15:42:34 +00:00
|
|
|
training_filter: bool = True,
|
|
|
|
) -> Tuple[DataFrame, DataFrame]:
|
|
|
|
"""
|
2022-07-02 16:09:38 +00:00
|
|
|
Filter the unfiltered dataframe to extract the user requested features/labels and properly
|
2022-05-04 15:42:34 +00:00
|
|
|
remove all NaNs. Any row with a NaN is removed from training dataset or replaced with
|
|
|
|
0s in the prediction dataset. However, prediction dataset do_predict will reflect any
|
2022-05-03 08:14:17 +00:00
|
|
|
row that had a NaN and will shield user from that prediction.
|
|
|
|
:params:
|
|
|
|
:unfiltered_dataframe: the full dataframe for the present training period
|
2022-05-04 15:42:34 +00:00
|
|
|
:training_feature_list: list, the training feature list constructed by
|
|
|
|
self.build_feature_list() according to user specified parameters in the configuration file.
|
2022-05-03 08:14:17 +00:00
|
|
|
:labels: the labels for the dataset
|
2022-05-04 15:42:34 +00:00
|
|
|
:training_filter: boolean which lets the function know if it is training data or
|
|
|
|
prediction data to be filtered.
|
2022-05-03 08:14:17 +00:00
|
|
|
:returns:
|
|
|
|
:filtered_dataframe: dataframe cleaned of NaNs and only containing the user
|
|
|
|
requested feature set.
|
|
|
|
:labels: labels cleaned of NaNs.
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
filtered_dataframe = unfiltered_dataframe.filter(training_feature_list, axis=1)
|
2022-06-02 15:12:12 +00:00
|
|
|
filtered_dataframe = filtered_dataframe.replace([np.inf, -np.inf], np.nan)
|
2022-07-02 16:09:38 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
drop_index = pd.isnull(filtered_dataframe).any(1) # get the rows that have NaNs,
|
|
|
|
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
|
|
|
|
if (
|
|
|
|
training_filter
|
|
|
|
): # we don't care about total row number (total no. datapoints) in training, we only care
|
|
|
|
# about removing any row with NaNs
|
2022-07-01 12:00:30 +00:00
|
|
|
# if labels has multiple columns (user wants to train multiple models), we detect here
|
2022-07-02 16:09:38 +00:00
|
|
|
labels = unfiltered_dataframe.filter(label_list, axis=1)
|
|
|
|
drop_index_labels = pd.isnull(labels).any(1)
|
2022-05-04 15:42:34 +00:00
|
|
|
drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)
|
|
|
|
filtered_dataframe = filtered_dataframe[
|
|
|
|
(drop_index == 0) & (drop_index_labels == 0)
|
|
|
|
] # dropping values
|
|
|
|
labels = labels[
|
|
|
|
(drop_index == 0) & (drop_index_labels == 0)
|
|
|
|
] # assuming the labels depend entirely on the dataframe here.
|
2022-05-31 16:42:27 +00:00
|
|
|
logger.info(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"dropped {len(unfiltered_dataframe) - len(filtered_dataframe)} training points"
|
|
|
|
f" due to NaNs in populated dataset {len(unfiltered_dataframe)}."
|
2022-05-31 16:42:27 +00:00
|
|
|
)
|
|
|
|
if (1 - len(filtered_dataframe) / len(unfiltered_dataframe)) > 0.1 and self.live:
|
2022-07-05 10:42:32 +00:00
|
|
|
worst_indicator = str(unfiltered_dataframe.count().idxmin())
|
2022-05-31 16:42:27 +00:00
|
|
|
logger.warning(
|
2022-07-05 10:42:32 +00:00
|
|
|
f" {(1 - len(filtered_dataframe)/len(unfiltered_dataframe)) * 100:.0f} percent "
|
|
|
|
" of training data dropped due to NaNs, model may perform inconsistent "
|
|
|
|
f"with expectations. Verify {worst_indicator}"
|
2022-05-31 16:42:27 +00:00
|
|
|
)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data["filter_drop_index_training"] = drop_index
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
else:
|
|
|
|
# we are backtesting so we need to preserve row number to send back to strategy,
|
|
|
|
# so now we use do_predict to avoid any prediction based on a NaN
|
2022-05-03 08:14:17 +00:00
|
|
|
drop_index = pd.isnull(filtered_dataframe).any(1)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data["filter_drop_index_prediction"] = drop_index
|
|
|
|
filtered_dataframe.fillna(0, inplace=True)
|
|
|
|
# replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction
|
|
|
|
# that was based on a single NaN is ultimately protected from buys with do_predict
|
2022-05-03 08:14:17 +00:00
|
|
|
drop_index = ~drop_index
|
2022-05-04 15:42:34 +00:00
|
|
|
self.do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))
|
2022-06-26 17:02:17 +00:00
|
|
|
if (len(self.do_predict) - self.do_predict.sum()) > 0:
|
|
|
|
logger.info(
|
|
|
|
"dropped %s of %s prediction data points due to NaNs.",
|
|
|
|
len(self.do_predict) - self.do_predict.sum(),
|
|
|
|
len(filtered_dataframe),
|
|
|
|
)
|
2022-07-02 16:09:38 +00:00
|
|
|
labels = []
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return filtered_dataframe, labels
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
def build_data_dictionary(
|
|
|
|
self,
|
|
|
|
train_df: DataFrame,
|
|
|
|
test_df: DataFrame,
|
|
|
|
train_labels: DataFrame,
|
|
|
|
test_labels: DataFrame,
|
|
|
|
train_weights: Any,
|
|
|
|
test_weights: Any,
|
|
|
|
) -> Dict:
|
|
|
|
|
|
|
|
self.data_dictionary = {
|
|
|
|
"train_features": train_df,
|
|
|
|
"test_features": test_df,
|
|
|
|
"train_labels": train_labels,
|
|
|
|
"test_labels": test_labels,
|
|
|
|
"train_weights": train_weights,
|
|
|
|
"test_weights": test_weights,
|
|
|
|
}
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return self.data_dictionary
|
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-22 15:51:49 +00:00
|
|
|
Normalize all data in the data_dictionary according to the training dataset
|
2022-05-03 08:14:17 +00:00
|
|
|
:params:
|
|
|
|
:data_dictionary: dictionary containing the cleaned and split training/test data/labels
|
|
|
|
:returns:
|
|
|
|
:data_dictionary: updated dictionary with standardized values.
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
# standardize the data by training stats
|
2022-05-22 15:51:49 +00:00
|
|
|
train_max = data_dictionary["train_features"].max()
|
|
|
|
train_min = data_dictionary["train_features"].min()
|
2022-07-03 08:59:38 +00:00
|
|
|
data_dictionary["train_features"] = (
|
|
|
|
2 * (data_dictionary["train_features"] - train_min) / (train_max - train_min) - 1
|
|
|
|
)
|
|
|
|
data_dictionary["test_features"] = (
|
|
|
|
2 * (data_dictionary["test_features"] - train_min) / (train_max - train_min) - 1
|
|
|
|
)
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
train_labels_max = data_dictionary["train_labels"].max()
|
|
|
|
train_labels_min = data_dictionary["train_labels"].min()
|
2022-07-03 08:59:38 +00:00
|
|
|
data_dictionary["train_labels"] = (
|
|
|
|
2
|
|
|
|
* (data_dictionary["train_labels"] - train_labels_min)
|
|
|
|
/ (train_labels_max - train_labels_min)
|
|
|
|
- 1
|
|
|
|
)
|
|
|
|
data_dictionary["test_labels"] = (
|
|
|
|
2
|
|
|
|
* (data_dictionary["test_labels"] - train_labels_min)
|
|
|
|
/ (train_labels_max - train_labels_min)
|
|
|
|
- 1
|
|
|
|
)
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
for item in train_max.keys():
|
|
|
|
self.data[item + "_max"] = train_max[item]
|
|
|
|
self.data[item + "_min"] = train_min[item]
|
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
self.data["labels_max"] = train_labels_max.to_dict()
|
|
|
|
self.data["labels_min"] = train_labels_min.to_dict()
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
return data_dictionary
|
|
|
|
|
2022-05-26 19:07:50 +00:00
|
|
|
def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame:
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-26 19:07:50 +00:00
|
|
|
Normalize a set of data using the mean and standard deviation from
|
2022-05-03 08:14:17 +00:00
|
|
|
the associated training data.
|
|
|
|
:params:
|
|
|
|
:df: Dataframe to be standardized
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
for item in df.keys():
|
2022-07-03 08:59:38 +00:00
|
|
|
df[item] = (
|
|
|
|
2
|
|
|
|
* (df[item] - self.data[item + "_min"])
|
|
|
|
/ (self.data[item + "_max"] - self.data[item + "_min"])
|
|
|
|
- 1
|
|
|
|
)
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
return df
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
def split_timerange(
|
|
|
|
self, tr: str, train_split: int = 28, bt_split: int = 7
|
|
|
|
) -> Tuple[list, list]:
|
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
Function which takes a single time range (tr) and splits it
|
|
|
|
into sub timeranges to train and backtest on based on user input
|
|
|
|
tr: str, full timerange to train on
|
|
|
|
train_split: the period length for the each training (days). Specified in user
|
|
|
|
configuration file
|
|
|
|
bt_split: the backtesting length (dats). Specified in user configuration file
|
2022-05-04 15:42:34 +00:00
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-19 14:16:44 +00:00
|
|
|
if not isinstance(train_split, int) or train_split < 1:
|
|
|
|
raise OperationalException(
|
|
|
|
"train_period_days must be an integer greater than 0. "
|
|
|
|
f"Got {train_split}."
|
|
|
|
)
|
2022-07-10 10:34:09 +00:00
|
|
|
train_period_days = train_split * SECONDS_IN_DAY
|
2022-05-03 08:14:17 +00:00
|
|
|
bt_period = bt_split * SECONDS_IN_DAY
|
|
|
|
|
|
|
|
full_timerange = TimeRange.parse_timerange(tr)
|
2022-05-06 10:54:49 +00:00
|
|
|
config_timerange = TimeRange.parse_timerange(self.config["timerange"])
|
2022-05-17 17:50:06 +00:00
|
|
|
if config_timerange.stopts == 0:
|
2022-07-03 08:59:38 +00:00
|
|
|
config_timerange.stopts = int(
|
|
|
|
datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
timerange_train = copy.deepcopy(full_timerange)
|
|
|
|
timerange_backtest = copy.deepcopy(full_timerange)
|
|
|
|
|
|
|
|
tr_training_list = []
|
|
|
|
tr_backtesting_list = []
|
2022-05-29 15:44:35 +00:00
|
|
|
tr_training_list_timerange = []
|
|
|
|
tr_backtesting_list_timerange = []
|
2022-05-03 08:14:17 +00:00
|
|
|
first = True
|
2022-05-06 10:54:49 +00:00
|
|
|
# within_config_timerange = True
|
2022-05-03 08:14:17 +00:00
|
|
|
while True:
|
2022-05-04 15:42:34 +00:00
|
|
|
if not first:
|
|
|
|
timerange_train.startts = timerange_train.startts + bt_period
|
2022-07-10 10:34:09 +00:00
|
|
|
timerange_train.stopts = timerange_train.startts + train_period_days
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
first = False
|
|
|
|
start = datetime.datetime.utcfromtimestamp(timerange_train.startts)
|
|
|
|
stop = datetime.datetime.utcfromtimestamp(timerange_train.stopts)
|
2022-05-04 15:42:34 +00:00
|
|
|
tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
2022-05-29 15:44:35 +00:00
|
|
|
tr_training_list_timerange.append(copy.deepcopy(timerange_train))
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
# associated backtest period
|
2022-05-06 11:06:54 +00:00
|
|
|
|
|
|
|
timerange_backtest.startts = timerange_train.stopts
|
|
|
|
|
2022-05-06 13:10:11 +00:00
|
|
|
timerange_backtest.stopts = timerange_backtest.startts + bt_period
|
|
|
|
|
2022-05-06 10:54:49 +00:00
|
|
|
if timerange_backtest.stopts > config_timerange.stopts:
|
|
|
|
timerange_backtest.stopts = config_timerange.stopts
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
start = datetime.datetime.utcfromtimestamp(timerange_backtest.startts)
|
|
|
|
stop = datetime.datetime.utcfromtimestamp(timerange_backtest.stopts)
|
2022-05-04 15:42:34 +00:00
|
|
|
tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
|
2022-05-29 15:44:35 +00:00
|
|
|
tr_backtesting_list_timerange.append(copy.deepcopy(timerange_backtest))
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-06 10:54:49 +00:00
|
|
|
# ensure we are predicting on exactly same amount of data as requested by user defined
|
|
|
|
# --timerange
|
|
|
|
if timerange_backtest.stopts == config_timerange.stopts:
|
|
|
|
break
|
|
|
|
|
2022-05-29 15:44:35 +00:00
|
|
|
# print(tr_training_list, tr_backtesting_list)
|
|
|
|
return tr_training_list_timerange, tr_backtesting_list_timerange
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-29 15:44:35 +00:00
|
|
|
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Given a full dataframe, extract the user desired window
|
|
|
|
:params:
|
|
|
|
:tr: timerange string that we wish to extract from df
|
|
|
|
:df: Dataframe containing all candles to run the entire backtest. Here
|
|
|
|
it is sliced down to just the present training period.
|
|
|
|
"""
|
2022-05-29 15:44:35 +00:00
|
|
|
# timerange = TimeRange.parse_timerange(tr)
|
2022-05-03 08:14:17 +00:00
|
|
|
start = datetime.datetime.fromtimestamp(timerange.startts, tz=datetime.timezone.utc)
|
|
|
|
stop = datetime.datetime.fromtimestamp(timerange.stopts, tz=datetime.timezone.utc)
|
2022-05-04 15:42:34 +00:00
|
|
|
df = df.loc[df["date"] >= start, :]
|
|
|
|
df = df.loc[df["date"] <= stop, :]
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return df
|
|
|
|
|
|
|
|
def principal_component_analysis(self) -> None:
|
|
|
|
"""
|
|
|
|
Performs Principal Component Analysis on the data for dimensionality reduction
|
|
|
|
and outlier detection (see self.remove_outliers())
|
|
|
|
No parameters or returns, it acts on the data_dictionary held by the DataHandler.
|
|
|
|
"""
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
from sklearn.decomposition import PCA # avoid importing if we dont need it
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
n_components = self.data_dictionary["train_features"].shape[1]
|
2022-05-03 08:14:17 +00:00
|
|
|
pca = PCA(n_components=n_components)
|
2022-05-04 15:42:34 +00:00
|
|
|
pca = pca.fit(self.data_dictionary["train_features"])
|
2022-05-03 08:14:17 +00:00
|
|
|
n_keep_components = np.argmin(pca.explained_variance_ratio_.cumsum() < 0.999)
|
|
|
|
pca2 = PCA(n_components=n_keep_components)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data["n_kept_components"] = n_keep_components
|
|
|
|
pca2 = pca2.fit(self.data_dictionary["train_features"])
|
2022-05-05 12:37:37 +00:00
|
|
|
logger.info("reduced feature dimension by %s", n_components - n_keep_components)
|
|
|
|
logger.info("explained variance %f", np.sum(pca2.explained_variance_ratio_))
|
2022-05-04 15:42:34 +00:00
|
|
|
train_components = pca2.transform(self.data_dictionary["train_features"])
|
|
|
|
test_components = pca2.transform(self.data_dictionary["test_features"])
|
|
|
|
|
|
|
|
self.data_dictionary["train_features"] = pd.DataFrame(
|
|
|
|
data=train_components,
|
|
|
|
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
|
|
|
index=self.data_dictionary["train_features"].index,
|
|
|
|
)
|
|
|
|
|
2022-05-28 09:11:41 +00:00
|
|
|
# keeping a copy of the non-transformed features so we can check for errors during
|
|
|
|
# model load from disk
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data["training_features_list_raw"] = copy.deepcopy(self.training_features_list)
|
2022-05-28 09:11:41 +00:00
|
|
|
self.training_features_list = self.data_dictionary["train_features"].columns
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
self.data_dictionary["test_features"] = pd.DataFrame(
|
|
|
|
data=test_components,
|
|
|
|
columns=["PC" + str(i) for i in range(0, n_keep_components)],
|
|
|
|
index=self.data_dictionary["test_features"].index,
|
|
|
|
)
|
|
|
|
|
|
|
|
self.data["n_kept_components"] = n_keep_components
|
2022-05-03 08:14:17 +00:00
|
|
|
self.pca = pca2
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}")
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
if not self.data_path.is_dir():
|
|
|
|
self.data_path.mkdir(parents=True, exist_ok=True)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return None
|
|
|
|
|
2022-05-23 10:07:09 +00:00
|
|
|
def pca_transform(self, filtered_dataframe: DataFrame) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Use an existing pca transform to transform data into components
|
|
|
|
:params:
|
|
|
|
filtered_dataframe: DataFrame = the cleaned dataframe
|
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
pca_components = self.pca.transform(filtered_dataframe)
|
|
|
|
self.data_dictionary["prediction_features"] = pd.DataFrame(
|
|
|
|
data=pca_components,
|
|
|
|
columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])],
|
|
|
|
index=filtered_dataframe.index,
|
|
|
|
)
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
def compute_distances(self) -> float:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Compute distances between each training point and every other training
|
|
|
|
point. This metric defines the neighborhood of trained data and is used
|
|
|
|
for prediction confidence in the Dissimilarity Index
|
|
|
|
"""
|
2022-05-04 15:53:40 +00:00
|
|
|
logger.info("computing average mean distance for all training points")
|
2022-07-03 08:59:38 +00:00
|
|
|
tc = self.freqai_config.get("model_training_parameters", {}).get("thread_count", -1)
|
2022-06-07 17:49:20 +00:00
|
|
|
pairwise = pairwise_distances(self.data_dictionary["train_features"], n_jobs=tc)
|
2022-05-03 08:14:17 +00:00
|
|
|
avg_mean_dist = pairwise.mean(axis=1).mean()
|
2022-07-03 08:59:38 +00:00
|
|
|
logger.info(f"avg_mean_dist {avg_mean_dist:.2f}")
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return avg_mean_dist
|
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
def use_SVM_to_remove_outliers(self, predict: bool) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Build/inference a Support Vector Machine to detect outliers
|
|
|
|
in training data and prediction
|
|
|
|
:params:
|
|
|
|
predict: bool = If true, inference an existing SVM model, else construct one
|
|
|
|
"""
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-12 16:09:17 +00:00
|
|
|
if self.keras:
|
|
|
|
logger.warning("SVM outlier removal not currently supported for Keras based models. "
|
|
|
|
"Skipping user requested function.")
|
|
|
|
if predict:
|
|
|
|
self.do_predict = np.ones(len(self.data_dictionary["prediction_features"]))
|
|
|
|
return
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
if predict:
|
2022-05-22 15:51:49 +00:00
|
|
|
assert self.svm_model, "No svm model available for outlier removal"
|
|
|
|
y_pred = self.svm_model.predict(self.data_dictionary["prediction_features"])
|
|
|
|
do_predict = np.where(y_pred == -1, 0, y_pred)
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-06-26 17:02:17 +00:00
|
|
|
if (len(do_predict) - do_predict.sum()) > 0:
|
|
|
|
logger.info(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"svm_remove_outliers() tossed {len(do_predict) - do_predict.sum()} predictions"
|
2022-06-26 17:02:17 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
self.do_predict += do_predict
|
|
|
|
self.do_predict -= 1
|
|
|
|
|
|
|
|
else:
|
2022-05-22 15:51:49 +00:00
|
|
|
# use SGDOneClassSVM to increase speed?
|
2022-07-03 08:59:38 +00:00
|
|
|
nu = self.freqai_config.get("feature_parameters", {}).get("svm_nu", 0.2)
|
2022-06-28 13:12:25 +00:00
|
|
|
self.svm_model = linear_model.SGDOneClassSVM(nu=nu).fit(
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_dictionary["train_features"]
|
|
|
|
)
|
2022-05-22 15:51:49 +00:00
|
|
|
y_pred = self.svm_model.predict(self.data_dictionary["train_features"])
|
|
|
|
dropped_points = np.where(y_pred == -1, 0, y_pred)
|
|
|
|
# keep_index = np.where(y_pred == 1)
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
|
|
|
|
(y_pred == 1)
|
|
|
|
]
|
|
|
|
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
|
|
|
|
(y_pred == 1)
|
|
|
|
]
|
|
|
|
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
|
|
|
|
(y_pred == 1)
|
|
|
|
]
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
logger.info(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"svm_remove_outliers() tossed {len(y_pred) - dropped_points.sum()}"
|
|
|
|
f" train points from {len(y_pred)}"
|
2022-05-22 15:51:49 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
# same for test data
|
|
|
|
y_pred = self.svm_model.predict(self.data_dictionary["test_features"])
|
|
|
|
dropped_points = np.where(y_pred == -1, 0, y_pred)
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
|
|
|
|
(y_pred == 1)
|
|
|
|
]
|
|
|
|
self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][(y_pred == 1)]
|
|
|
|
self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
|
|
|
|
(y_pred == 1)
|
|
|
|
]
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
logger.info(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"svm_remove_outliers() tossed {len(y_pred) - dropped_points.sum()}"
|
|
|
|
f" test points from {len(y_pred)}"
|
2022-05-22 15:51:49 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
def find_features(self, dataframe: DataFrame) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Find features in the strategy provided dataframe
|
|
|
|
:params:
|
|
|
|
dataframe: DataFrame = strategy provided dataframe
|
|
|
|
:returns:
|
|
|
|
features: list = the features to be used for training/prediction
|
|
|
|
"""
|
2022-05-17 16:15:03 +00:00
|
|
|
column_names = dataframe.columns
|
2022-07-03 08:59:38 +00:00
|
|
|
features = [c for c in column_names if "%" in c]
|
|
|
|
labels = [c for c in column_names if "&" in c]
|
2022-05-28 09:11:41 +00:00
|
|
|
if not features:
|
|
|
|
raise OperationalException("Could not find any features!")
|
2022-07-02 16:09:38 +00:00
|
|
|
|
|
|
|
self.training_features_list = features
|
|
|
|
self.label_list = labels
|
|
|
|
# return features, labels
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
def check_if_pred_in_training_spaces(self) -> None:
|
|
|
|
"""
|
2022-05-04 15:42:34 +00:00
|
|
|
Compares the distance from each prediction point to each training data
|
2022-05-03 08:14:17 +00:00
|
|
|
point. It uses this information to estimate a Dissimilarity Index (DI)
|
2022-05-04 15:42:34 +00:00
|
|
|
and avoid making predictions on any points that are too far away
|
|
|
|
from the training data set.
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
distance = pairwise_distances(
|
|
|
|
self.data_dictionary["train_features"],
|
|
|
|
self.data_dictionary["prediction_features"],
|
|
|
|
n_jobs=-1,
|
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
self.DI_values = distance.min(axis=0) / self.data["avg_mean_dist"]
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
do_predict = np.where(
|
2022-07-03 08:59:38 +00:00
|
|
|
self.DI_values < self.freqai_config.get("feature_parameters", {}).get("DI_threshold"),
|
2022-05-04 15:42:34 +00:00
|
|
|
1,
|
|
|
|
0,
|
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-06-26 17:02:17 +00:00
|
|
|
if (len(do_predict) - do_predict.sum()) > 0:
|
|
|
|
logger.info(
|
2022-07-03 08:59:38 +00:00
|
|
|
f"DI tossed {len(do_predict) - do_predict.sum():.2f} predictions for "
|
|
|
|
"being too far from training data"
|
2022-06-26 17:02:17 +00:00
|
|
|
)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
self.do_predict += do_predict
|
2022-05-03 08:14:17 +00:00
|
|
|
self.do_predict -= 1
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-06 14:20:52 +00:00
|
|
|
def set_weights_higher_recent(self, num_weights: int) -> npt.ArrayLike:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Set weights so that recent data is more heavily weighted during
|
|
|
|
training than older data.
|
|
|
|
"""
|
2022-07-19 15:49:18 +00:00
|
|
|
wfactor = self.config["freqai"]["feature_parameters"]["weight_factor"]
|
|
|
|
weights = np.exp(
|
|
|
|
- np.arange(num_weights) / (wfactor * num_weights))[::-1]
|
2022-05-03 08:14:17 +00:00
|
|
|
return weights
|
|
|
|
|
|
|
|
def append_predictions(self, predictions, do_predict, len_dataframe):
|
|
|
|
"""
|
|
|
|
Append backtest prediction from current backtest period to all previous periods
|
|
|
|
"""
|
|
|
|
|
2022-07-03 15:34:44 +00:00
|
|
|
self.append_df = DataFrame()
|
|
|
|
for label in self.label_list:
|
|
|
|
self.append_df[label] = predictions[label]
|
|
|
|
self.append_df[f"{label}_mean"] = self.data["labels_mean"][label]
|
|
|
|
self.append_df[f"{label}_std"] = self.data["labels_std"][label]
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-07-03 15:34:44 +00:00
|
|
|
self.append_df["do_predict"] = do_predict
|
2022-07-03 08:59:38 +00:00
|
|
|
if self.freqai_config.get("feature_parameters", {}).get("DI_threshold", 0) > 0:
|
2022-07-03 15:34:44 +00:00
|
|
|
self.append_df["DI_values"] = self.DI_values
|
|
|
|
|
|
|
|
if self.full_df.empty:
|
|
|
|
self.full_df = self.append_df
|
|
|
|
else:
|
|
|
|
self.full_df = pd.concat([self.full_df, self.append_df], axis=0)
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
return
|
|
|
|
|
2022-07-03 15:34:44 +00:00
|
|
|
def fill_predictions(self, dataframe):
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Back fill values to before the backtesting range so that the dataframe matches size
|
|
|
|
when it goes back to the strategy. These rows are not included in the backtest.
|
|
|
|
"""
|
|
|
|
|
2022-07-03 15:34:44 +00:00
|
|
|
len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count
|
|
|
|
filler_df = pd.DataFrame(
|
|
|
|
np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns
|
|
|
|
)
|
|
|
|
|
|
|
|
self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True)
|
|
|
|
|
|
|
|
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
|
|
|
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
|
|
|
|
|
|
|
self.append_df = DataFrame()
|
|
|
|
self.full_df = DataFrame()
|
2022-05-03 08:14:17 +00:00
|
|
|
|
|
|
|
return
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-07-10 10:34:09 +00:00
|
|
|
def create_fulltimerange(self, backtest_tr: str, backtest_period_days: int) -> str:
|
2022-07-19 14:16:44 +00:00
|
|
|
|
|
|
|
if not isinstance(backtest_period_days, int):
|
|
|
|
raise OperationalException('backtest_period_days must be an integer')
|
|
|
|
|
|
|
|
if backtest_period_days < 0:
|
|
|
|
raise OperationalException('backtest_period_days must be positive')
|
|
|
|
|
2022-05-05 13:35:51 +00:00
|
|
|
backtest_timerange = TimeRange.parse_timerange(backtest_tr)
|
|
|
|
|
2022-05-17 17:50:06 +00:00
|
|
|
if backtest_timerange.stopts == 0:
|
2022-07-03 08:59:38 +00:00
|
|
|
backtest_timerange.stopts = int(
|
|
|
|
datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
|
|
|
)
|
2022-05-17 17:50:06 +00:00
|
|
|
|
2022-07-10 10:34:09 +00:00
|
|
|
backtest_timerange.startts = (backtest_timerange.startts
|
|
|
|
- backtest_period_days * SECONDS_IN_DAY)
|
2022-05-05 13:35:51 +00:00
|
|
|
start = datetime.datetime.utcfromtimestamp(backtest_timerange.startts)
|
|
|
|
stop = datetime.datetime.utcfromtimestamp(backtest_timerange.stopts)
|
|
|
|
full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")
|
|
|
|
|
|
|
|
self.full_path = Path(
|
2022-07-03 08:59:38 +00:00
|
|
|
self.config["user_data_dir"] / "models" / str(self.freqai_config.get("identifier"))
|
2022-05-05 13:35:51 +00:00
|
|
|
)
|
|
|
|
|
2022-05-09 13:25:00 +00:00
|
|
|
config_path = Path(self.config["config_files"][0])
|
|
|
|
|
2022-05-05 13:35:51 +00:00
|
|
|
if not self.full_path.is_dir():
|
|
|
|
self.full_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
shutil.copy(
|
2022-05-15 13:26:09 +00:00
|
|
|
config_path.resolve(),
|
2022-05-09 13:25:00 +00:00
|
|
|
Path(self.full_path / config_path.parts[-1]),
|
2022-05-05 13:35:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return full_timerange
|
|
|
|
|
2022-06-17 12:55:40 +00:00
|
|
|
def check_if_model_expired(self, trained_timestamp: int) -> bool:
|
2022-06-17 14:16:23 +00:00
|
|
|
"""
|
|
|
|
A model age checker to determine if the model is trustworthy based on user defined
|
|
|
|
`expiration_hours` in the configuration file.
|
|
|
|
:params:
|
|
|
|
trained_timestamp: int = The time of training for the most recent model.
|
|
|
|
:returns:
|
|
|
|
bool = If the model is expired or not.
|
|
|
|
"""
|
2022-06-17 12:55:40 +00:00
|
|
|
time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
|
|
|
elapsed_time = (time - trained_timestamp) / 3600 # hours
|
2022-07-03 08:59:38 +00:00
|
|
|
max_time = self.freqai_config.get("expiration_hours", 0)
|
2022-06-21 06:12:51 +00:00
|
|
|
if max_time > 0:
|
|
|
|
return elapsed_time > max_time
|
2022-06-28 13:12:25 +00:00
|
|
|
else:
|
2022-06-21 06:12:51 +00:00
|
|
|
return False
|
2022-06-17 12:55:40 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def check_if_new_training_required(
|
|
|
|
self, trained_timestamp: int
|
|
|
|
) -> Tuple[bool, TimeRange, TimeRange]:
|
2022-05-09 13:25:00 +00:00
|
|
|
|
|
|
|
time = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()
|
2022-05-28 09:38:57 +00:00
|
|
|
trained_timerange = TimeRange()
|
2022-05-31 16:42:27 +00:00
|
|
|
data_load_timerange = TimeRange()
|
|
|
|
|
|
|
|
# find the max indicator length required
|
2022-07-10 10:34:09 +00:00
|
|
|
max_timeframe_chars = self.freqai_config.get(
|
|
|
|
"feature_parameters", {}).get("include_timeframes")[-1]
|
2022-07-03 08:59:38 +00:00
|
|
|
max_period = self.freqai_config.get("feature_parameters", {}).get(
|
2022-07-18 09:57:52 +00:00
|
|
|
"indicator_max_period_candles", 50
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-31 16:42:27 +00:00
|
|
|
additional_seconds = 0
|
2022-07-03 08:59:38 +00:00
|
|
|
if max_timeframe_chars[-1] == "d":
|
2022-05-31 16:42:27 +00:00
|
|
|
additional_seconds = max_period * SECONDS_IN_DAY * int(max_timeframe_chars[-2])
|
2022-07-03 08:59:38 +00:00
|
|
|
elif max_timeframe_chars[-1] == "h":
|
2022-05-31 16:42:27 +00:00
|
|
|
additional_seconds = max_period * 3600 * int(max_timeframe_chars[-2])
|
2022-07-03 08:59:38 +00:00
|
|
|
elif max_timeframe_chars[-1] == "m":
|
2022-05-31 16:42:27 +00:00
|
|
|
if len(max_timeframe_chars) == 2:
|
|
|
|
additional_seconds = max_period * 60 * int(max_timeframe_chars[-2])
|
|
|
|
elif len(max_timeframe_chars) == 3:
|
|
|
|
additional_seconds = max_period * 60 * int(float(max_timeframe_chars[0:2]))
|
|
|
|
else:
|
2022-07-03 08:59:38 +00:00
|
|
|
logger.warning(
|
|
|
|
"FreqAI could not detect max timeframe and therefore may not "
|
|
|
|
"download the proper amount of data for training"
|
|
|
|
)
|
2022-05-31 16:42:27 +00:00
|
|
|
|
2022-06-15 22:21:15 +00:00
|
|
|
# logger.info(f'Extending data download by {additional_seconds/SECONDS_IN_DAY:.2f} days')
|
2022-06-03 13:19:46 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
if trained_timestamp != 0:
|
2022-07-10 10:34:09 +00:00
|
|
|
elapsed_time = (time - trained_timestamp) / SECONDS_IN_HOUR
|
|
|
|
retrain = elapsed_time > self.freqai_config.get("live_retrain_hours", 0)
|
2022-05-22 22:06:26 +00:00
|
|
|
if retrain:
|
2022-07-03 08:59:38 +00:00
|
|
|
trained_timerange.startts = int(
|
2022-07-10 10:34:09 +00:00
|
|
|
time - self.freqai_config.get("train_period_days", 0) * SECONDS_IN_DAY
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-23 19:05:05 +00:00
|
|
|
trained_timerange.stopts = int(time)
|
2022-05-31 16:42:27 +00:00
|
|
|
# we want to load/populate indicators on more data than we plan to train on so
|
|
|
|
# because most of the indicators have a rolling timeperiod, and are thus NaNs
|
|
|
|
# unless they have data further back in time before the start of the train period
|
2022-07-03 08:59:38 +00:00
|
|
|
data_load_timerange.startts = int(
|
|
|
|
time
|
2022-07-10 10:34:09 +00:00
|
|
|
- self.freqai_config.get("train_period_days", 0) * SECONDS_IN_DAY
|
2022-07-03 08:59:38 +00:00
|
|
|
- additional_seconds
|
|
|
|
)
|
2022-05-31 16:42:27 +00:00
|
|
|
data_load_timerange.stopts = int(time)
|
2022-05-22 22:06:26 +00:00
|
|
|
else: # user passed no live_trained_timerange in config
|
2022-07-03 08:59:38 +00:00
|
|
|
trained_timerange.startts = int(
|
2022-07-10 10:34:09 +00:00
|
|
|
time - self.freqai_config.get("train_period_days") * SECONDS_IN_DAY
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-05-22 15:51:49 +00:00
|
|
|
trained_timerange.stopts = int(time)
|
2022-05-31 16:42:27 +00:00
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
data_load_timerange.startts = int(
|
|
|
|
time
|
2022-07-10 10:34:09 +00:00
|
|
|
- self.freqai_config.get("train_period_days", 0) * SECONDS_IN_DAY
|
2022-07-03 08:59:38 +00:00
|
|
|
- additional_seconds
|
|
|
|
)
|
2022-05-31 16:42:27 +00:00
|
|
|
data_load_timerange.stopts = int(time)
|
2022-05-22 15:51:49 +00:00
|
|
|
retrain = True
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-07-05 10:42:32 +00:00
|
|
|
# logger.info(
|
|
|
|
# f"downloading data for "
|
|
|
|
# f"{(data_load_timerange.stopts-data_load_timerange.startts)/SECONDS_IN_DAY:.2f} "
|
|
|
|
# " days. "
|
|
|
|
# f"Extension of {additional_seconds/SECONDS_IN_DAY:.2f} days"
|
|
|
|
# )
|
|
|
|
|
2022-05-31 16:42:27 +00:00
|
|
|
return retrain, trained_timerange, data_load_timerange
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-06-08 04:14:01 +00:00
|
|
|
def set_new_model_names(self, pair: str, trained_timerange: TimeRange):
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-06-08 04:14:01 +00:00
|
|
|
coin, _ = pair.split("/")
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data_path = Path(
|
|
|
|
self.full_path
|
|
|
|
/ str("sub-train" + "-" + pair.split("/")[0] + str(int(trained_timerange.stopts)))
|
|
|
|
)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
self.model_filename = "cb_" + coin.lower() + "_" + str(int(trained_timerange.stopts))
|
2022-05-25 12:40:32 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
def download_all_data_for_training(self, timerange: TimeRange) -> None:
|
|
|
|
"""
|
|
|
|
Called only once upon start of bot to download the necessary data for
|
|
|
|
populating indicators and training the model.
|
|
|
|
:params:
|
|
|
|
timerange: TimeRange = The full data timerange for populating the indicators
|
|
|
|
and training the model.
|
|
|
|
"""
|
2022-07-03 08:59:38 +00:00
|
|
|
exchange = ExchangeResolver.load_exchange(
|
|
|
|
self.config["exchange"]["name"], self.config, validate=False, freqai=True
|
|
|
|
)
|
2022-06-03 13:19:46 +00:00
|
|
|
|
|
|
|
new_pairs_days = int((timerange.stopts - timerange.startts) / SECONDS_IN_DAY)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
|
|
|
refresh_backtest_ohlcv_data(
|
2022-07-03 08:59:38 +00:00
|
|
|
exchange,
|
|
|
|
pairs=self.all_pairs,
|
2022-07-10 10:34:09 +00:00
|
|
|
timeframes=self.freqai_config.get("feature_parameters", {}).get("include_timeframes"),
|
2022-07-03 08:59:38 +00:00
|
|
|
datadir=self.config["datadir"],
|
|
|
|
timerange=timerange,
|
|
|
|
new_pairs_days=new_pairs_days,
|
|
|
|
erase=False,
|
|
|
|
data_format=self.config.get("dataformat_ohlcv", "json"),
|
|
|
|
trading_mode=self.config.get("trading_mode", "spot"),
|
|
|
|
prepend=self.config.get("prepend_data", False),
|
|
|
|
)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
def update_historic_data(self, strategy: IStrategy) -> None:
|
|
|
|
"""
|
|
|
|
Append new candles to our stores historic data (in memory) so that
|
|
|
|
we do not need to load candle history from disk and we dont need to
|
|
|
|
pinging exchange multiple times for the same candle.
|
|
|
|
:params:
|
|
|
|
dataframe: DataFrame = strategy provided dataframe
|
|
|
|
"""
|
2022-07-10 10:34:09 +00:00
|
|
|
feat_params = self.freqai_config.get("feature_parameters", {})
|
2022-07-02 16:09:38 +00:00
|
|
|
with self.dd.history_lock:
|
|
|
|
history_data = self.dd.historic_data
|
2022-06-03 13:19:46 +00:00
|
|
|
|
2022-06-06 22:54:18 +00:00
|
|
|
for pair in self.all_pairs:
|
2022-07-10 10:34:09 +00:00
|
|
|
for tf in feat_params.get("include_timeframes"):
|
2022-06-07 18:57:10 +00:00
|
|
|
|
2022-06-07 17:49:20 +00:00
|
|
|
# check if newest candle is already appended
|
2022-06-07 18:57:10 +00:00
|
|
|
df_dp = strategy.dp.get_pair_dataframe(pair, tf)
|
2022-06-17 12:55:40 +00:00
|
|
|
if len(df_dp.index) == 0:
|
|
|
|
continue
|
2022-07-03 08:59:38 +00:00
|
|
|
if str(history_data[pair][tf].iloc[-1]["date"]) == str(
|
|
|
|
df_dp.iloc[-1:]["date"].iloc[-1]
|
|
|
|
):
|
2022-06-07 17:49:20 +00:00
|
|
|
continue
|
2022-06-07 18:57:10 +00:00
|
|
|
|
2022-07-05 10:42:32 +00:00
|
|
|
try:
|
|
|
|
index = (
|
|
|
|
df_dp.loc[
|
|
|
|
df_dp["date"] == history_data[pair][tf].iloc[-1]["date"]
|
|
|
|
].index[0]
|
|
|
|
+ 1
|
|
|
|
)
|
|
|
|
except IndexError:
|
|
|
|
logger.warning(
|
|
|
|
f"Unable to update pair history for {pair}. "
|
|
|
|
"If this does not resolve itself after 1 additional candle, "
|
|
|
|
"please report the error to #freqai discord channel"
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
2022-06-07 17:49:20 +00:00
|
|
|
history_data[pair][tf] = pd.concat(
|
2022-07-03 08:59:38 +00:00
|
|
|
[
|
|
|
|
history_data[pair][tf],
|
|
|
|
strategy.dp.get_pair_dataframe(pair, tf).iloc[index:],
|
|
|
|
],
|
|
|
|
ignore_index=True,
|
|
|
|
axis=0,
|
|
|
|
)
|
2022-06-05 02:40:58 +00:00
|
|
|
|
2022-06-26 17:02:17 +00:00
|
|
|
# logger.info(f'Length of history data {len(history_data[pair][tf])}')
|
2022-06-03 13:19:46 +00:00
|
|
|
|
|
|
|
def set_all_pairs(self) -> None:
|
|
|
|
|
2022-07-10 10:34:09 +00:00
|
|
|
self.all_pairs = copy.deepcopy(self.freqai_config.get(
|
|
|
|
'feature_parameters', {}).get('include_corr_pairlist', []))
|
2022-07-03 08:59:38 +00:00
|
|
|
for pair in self.config.get("exchange", "").get("pair_whitelist"):
|
2022-06-03 13:19:46 +00:00
|
|
|
if pair not in self.all_pairs:
|
|
|
|
self.all_pairs.append(pair)
|
|
|
|
|
|
|
|
def load_all_pair_histories(self, timerange: TimeRange) -> None:
|
|
|
|
"""
|
|
|
|
Load pair histories for all whitelist and corr_pairlist pairs.
|
|
|
|
Only called once upon startup of bot.
|
|
|
|
:params:
|
|
|
|
timerange: TimeRange = full timerange required to populate all indicators
|
2022-07-10 10:34:09 +00:00
|
|
|
for training according to user defined train_period_days
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-07-02 16:09:38 +00:00
|
|
|
history_data = self.dd.historic_data
|
2022-06-03 13:19:46 +00:00
|
|
|
|
|
|
|
for pair in self.all_pairs:
|
|
|
|
if pair not in history_data:
|
|
|
|
history_data[pair] = {}
|
2022-07-10 10:34:09 +00:00
|
|
|
for tf in self.freqai_config.get("feature_parameters", {}).get("include_timeframes"):
|
2022-07-03 08:59:38 +00:00
|
|
|
history_data[pair][tf] = load_pair_history(
|
|
|
|
datadir=self.config["datadir"],
|
|
|
|
timeframe=tf,
|
|
|
|
pair=pair,
|
|
|
|
timerange=timerange,
|
|
|
|
data_format=self.config.get("dataformat_ohlcv", "json"),
|
|
|
|
candle_type=self.config.get("trading_mode", "spot"),
|
|
|
|
)
|
|
|
|
|
|
|
|
def get_base_and_corr_dataframes(
|
|
|
|
self, timerange: TimeRange, pair: str
|
|
|
|
) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Searches through our historic_data in memory and returns the dataframes relevant
|
|
|
|
to the present pair.
|
|
|
|
:params:
|
|
|
|
timerange: TimeRange = full timerange required to populate all indicators
|
2022-07-10 10:34:09 +00:00
|
|
|
for training according to user defined train_period_days
|
2022-06-03 13:19:46 +00:00
|
|
|
metadata: dict = strategy furnished pair metadata
|
|
|
|
"""
|
2022-06-08 04:14:01 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
with self.dd.history_lock:
|
2022-06-07 17:49:20 +00:00
|
|
|
corr_dataframes: Dict[Any, Any] = {}
|
|
|
|
base_dataframes: Dict[Any, Any] = {}
|
2022-07-02 16:09:38 +00:00
|
|
|
historic_data = self.dd.historic_data
|
2022-07-10 10:34:09 +00:00
|
|
|
pairs = self.freqai_config.get('feature_parameters', {}).get(
|
|
|
|
'include_corr_pairlist', [])
|
2022-06-07 17:49:20 +00:00
|
|
|
|
2022-07-10 10:34:09 +00:00
|
|
|
for tf in self.freqai_config.get("feature_parameters", {}).get("include_timeframes"):
|
2022-07-03 08:59:38 +00:00
|
|
|
base_dataframes[tf] = self.slice_dataframe(timerange, historic_data[pair][tf])
|
2022-06-07 17:49:20 +00:00
|
|
|
if pairs:
|
|
|
|
for p in pairs:
|
2022-06-08 04:14:01 +00:00
|
|
|
if pair in p:
|
2022-06-07 17:49:20 +00:00
|
|
|
continue # dont repeat anything from whitelist
|
|
|
|
if p not in corr_dataframes:
|
|
|
|
corr_dataframes[p] = {}
|
2022-07-03 08:59:38 +00:00
|
|
|
corr_dataframes[p][tf] = self.slice_dataframe(
|
|
|
|
timerange, historic_data[p][tf]
|
|
|
|
)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-09 15:01:49 +00:00
|
|
|
return corr_dataframes, base_dataframes
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-06-03 13:19:46 +00:00
|
|
|
# SUPERCEDED
|
|
|
|
# def load_pairs_histories(self, timerange: TimeRange, metadata: dict) -> Tuple[Dict[Any, Any],
|
|
|
|
# DataFrame]:
|
|
|
|
# corr_dataframes: Dict[Any, Any] = {}
|
|
|
|
# base_dataframes: Dict[Any, Any] = {}
|
2022-07-10 10:34:09 +00:00
|
|
|
# pairs = self.freqai_config.get('include_corr_pairlist', []) # + [metadata['pair']]
|
2022-06-03 13:19:46 +00:00
|
|
|
# # timerange = TimeRange.parse_timerange(new_timerange)
|
|
|
|
|
|
|
|
# for tf in self.freqai_config.get('timeframes'):
|
|
|
|
# base_dataframes[tf] = load_pair_history(datadir=self.config['datadir'],
|
|
|
|
# timeframe=tf,
|
|
|
|
# pair=metadata['pair'], timerange=timerange,
|
|
|
|
# data_format=self.config.get(
|
|
|
|
# 'dataformat_ohlcv', 'json'),
|
|
|
|
# candle_type=self.config.get(
|
|
|
|
# 'trading_mode', 'spot'))
|
|
|
|
# if pairs:
|
|
|
|
# for p in pairs:
|
|
|
|
# if metadata['pair'] in p:
|
|
|
|
# continue # dont repeat anything from whitelist
|
|
|
|
# if p not in corr_dataframes:
|
|
|
|
# corr_dataframes[p] = {}
|
|
|
|
# corr_dataframes[p][tf] = load_pair_history(datadir=self.config['datadir'],
|
|
|
|
# timeframe=tf,
|
|
|
|
# pair=p, timerange=timerange,
|
|
|
|
# data_format=self.config.get(
|
|
|
|
# 'dataformat_ohlcv', 'json'),
|
|
|
|
# candle_type=self.config.get(
|
|
|
|
# 'trading_mode', 'spot'))
|
|
|
|
|
|
|
|
# return corr_dataframes, base_dataframes
|
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
def use_strategy_to_populate_indicators(
|
|
|
|
self, strategy: IStrategy, corr_dataframes: dict, base_dataframes: dict, pair: str
|
|
|
|
) -> DataFrame:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Use the user defined strategy for populating indicators during
|
|
|
|
retrain
|
|
|
|
:params:
|
|
|
|
strategy: IStrategy = user defined strategy object
|
|
|
|
corr_dataframes: dict = dict containing the informative pair dataframes
|
|
|
|
(for user defined timeframes)
|
|
|
|
base_dataframes: dict = dict containing the current pair dataframes
|
|
|
|
(for user defined timeframes)
|
|
|
|
metadata: dict = strategy furnished pair metadata
|
|
|
|
:returns:
|
|
|
|
dataframe: DataFrame = dataframe containing populated indicators
|
|
|
|
"""
|
2022-07-03 08:59:38 +00:00
|
|
|
dataframe = base_dataframes[self.config["timeframe"]].copy()
|
2022-07-10 10:34:09 +00:00
|
|
|
pairs = self.freqai_config.get('feature_parameters', {}).get('include_corr_pairlist', [])
|
2022-07-03 10:15:59 +00:00
|
|
|
sgi = True
|
2022-07-10 10:34:09 +00:00
|
|
|
for tf in self.freqai_config.get("feature_parameters", {}).get("include_timeframes"):
|
2022-05-24 12:46:16 +00:00
|
|
|
dataframe = strategy.populate_any_indicators(
|
2022-07-03 10:15:59 +00:00
|
|
|
pair,
|
|
|
|
pair,
|
|
|
|
dataframe.copy(),
|
|
|
|
tf,
|
|
|
|
base_dataframes[tf],
|
|
|
|
coin=pair.split("/")[0] + "-",
|
|
|
|
set_generalized_indicators=sgi,
|
2022-07-03 08:59:38 +00:00
|
|
|
)
|
2022-07-03 10:15:59 +00:00
|
|
|
sgi = False
|
2022-05-23 10:07:09 +00:00
|
|
|
if pairs:
|
|
|
|
for i in pairs:
|
2022-06-08 04:14:01 +00:00
|
|
|
if pair in i:
|
2022-05-23 10:07:09 +00:00
|
|
|
continue # dont repeat anything from whitelist
|
2022-05-24 12:46:16 +00:00
|
|
|
dataframe = strategy.populate_any_indicators(
|
2022-07-03 08:59:38 +00:00
|
|
|
pair,
|
|
|
|
i,
|
|
|
|
dataframe.copy(),
|
|
|
|
tf,
|
|
|
|
corr_dataframes[i][tf],
|
|
|
|
coin=i.split("/")[0] + "-",
|
|
|
|
)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
|
|
|
return dataframe
|
|
|
|
|
2022-07-11 20:01:48 +00:00
|
|
|
def fit_live_predictions(self) -> None:
|
|
|
|
"""
|
|
|
|
Fit the labels with a gaussian distribution
|
|
|
|
"""
|
|
|
|
import scipy as spy
|
|
|
|
num_candles = self.freqai_config.get('fit_live_predictions_candles', 100)
|
|
|
|
self.data["labels_mean"], self.data["labels_std"] = {}, {}
|
|
|
|
for label in self.label_list:
|
|
|
|
f = spy.stats.norm.fit(self.dd.historic_predictions[self.pair][label].tail(num_candles))
|
|
|
|
self.data["labels_mean"][label], self.data["labels_std"][label] = f[0], f[1]
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2022-05-26 19:07:50 +00:00
|
|
|
def fit_labels(self) -> None:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Fit the labels with a gaussian distribution
|
|
|
|
"""
|
2022-05-26 19:07:50 +00:00
|
|
|
import scipy as spy
|
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
self.data["labels_mean"], self.data["labels_std"] = {}, {}
|
2022-07-02 16:09:38 +00:00
|
|
|
for label in self.label_list:
|
|
|
|
f = spy.stats.norm.fit(self.data_dictionary["train_labels"][label])
|
|
|
|
self.data["labels_mean"][label], self.data["labels_std"][label] = f[0], f[1]
|
2022-05-26 19:07:50 +00:00
|
|
|
|
|
|
|
# KEEPME incase we want to let user start to grab quantiles.
|
|
|
|
# upper_q = spy.stats.norm.ppf(self.freqai_config['feature_parameters'][
|
|
|
|
# 'target_quantile'], *f)
|
|
|
|
# lower_q = spy.stats.norm.ppf(1 - self.freqai_config['feature_parameters'][
|
|
|
|
# 'target_quantile'], *f)
|
|
|
|
# self.data["upper_quantile"] = upper_q
|
|
|
|
# self.data["lower_quantile"] = lower_q
|
|
|
|
return
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
def np_encoder(self, object):
|
|
|
|
if isinstance(object, np.generic):
|
|
|
|
return object.item()
|
2022-05-22 15:51:49 +00:00
|
|
|
|
|
|
|
# Functions containing useful data manpulation examples. but not actively in use.
|
|
|
|
|
|
|
|
# def build_feature_list(self, config: dict, metadata: dict) -> list:
|
|
|
|
# """
|
|
|
|
# SUPERCEDED BY self.find_features()
|
|
|
|
# Build the list of features that will be used to filter
|
|
|
|
# the full dataframe. Feature list is construced from the
|
|
|
|
# user configuration file.
|
|
|
|
# :params:
|
|
|
|
# :config: Canonical freqtrade config file containing all
|
|
|
|
# user defined input in config['freqai] dictionary.
|
|
|
|
# """
|
|
|
|
# features = []
|
|
|
|
# for tf in config["freqai"]["timeframes"]:
|
|
|
|
# for ft in config["freqai"]["base_features"]:
|
|
|
|
# for n in range(config["freqai"]["feature_parameters"]["shift"] + 1):
|
|
|
|
# shift = ""
|
|
|
|
# if n > 0:
|
|
|
|
# shift = "_shift-" + str(n)
|
|
|
|
# features.append(metadata['pair'].split("/")[0] + "-" + ft + shift + "_" + tf)
|
|
|
|
# for p in config["freqai"]["corr_pairlist"]:
|
|
|
|
# if metadata['pair'] in p:
|
|
|
|
# continue # avoid duplicate features
|
|
|
|
# features.append(p.split("/")[0] + "-" + ft + shift + "_" + tf)
|
|
|
|
|
|
|
|
# # logger.info("number of features %s", len(features))
|
|
|
|
# return features
|
|
|
|
|
|
|
|
# Possibly phasing these outlier removal methods below out in favor of
|
|
|
|
# use_SVM_to_remove_outliers (computationally more efficient and apparently higher performance).
|
|
|
|
# But these have good data manipulation examples, so keep them commented here for now.
|
|
|
|
|
|
|
|
# def determine_statistical_distributions(self) -> None:
|
|
|
|
# from fitter import Fitter
|
|
|
|
|
|
|
|
# logger.info('Determining best model for all features, may take some time')
|
|
|
|
|
|
|
|
# def compute_quantiles(ft):
|
|
|
|
# f = Fitter(self.data_dictionary["train_features"][ft],
|
|
|
|
# distributions=['gamma', 'cauchy', 'laplace',
|
|
|
|
# 'beta', 'uniform', 'lognorm'])
|
|
|
|
# f.fit()
|
|
|
|
# # f.summary()
|
|
|
|
# dist = list(f.get_best().items())[0][0]
|
|
|
|
# params = f.get_best()[dist]
|
|
|
|
# upper_q = getattr(spy.stats, list(f.get_best().items())[0][0]).ppf(0.999, **params)
|
|
|
|
# lower_q = getattr(spy.stats, list(f.get_best().items())[0][0]).ppf(0.001, **params)
|
|
|
|
|
|
|
|
# return ft, upper_q, lower_q, dist
|
|
|
|
|
|
|
|
# quantiles_tuple = Parallel(n_jobs=-1)(
|
|
|
|
# delayed(compute_quantiles)(ft) for ft in self.data_dictionary[
|
|
|
|
# 'train_features'].columns)
|
|
|
|
|
|
|
|
# df = pd.DataFrame(quantiles_tuple, columns=['features', 'upper_quantiles',
|
|
|
|
# 'lower_quantiles', 'dist'])
|
|
|
|
# self.data_dictionary['upper_quantiles'] = df['upper_quantiles']
|
|
|
|
# self.data_dictionary['lower_quantiles'] = df['lower_quantiles']
|
|
|
|
|
|
|
|
# return
|
|
|
|
|
|
|
|
# def remove_outliers(self, predict: bool) -> None:
|
|
|
|
# """
|
|
|
|
# Remove data that looks like an outlier based on the distribution of each
|
|
|
|
# variable.
|
|
|
|
# :params:
|
|
|
|
# :predict: boolean which tells the function if this is prediction data or
|
|
|
|
# training data coming in.
|
|
|
|
# """
|
|
|
|
|
|
|
|
# lower_quantile = self.data_dictionary["lower_quantiles"].to_numpy()
|
|
|
|
# upper_quantile = self.data_dictionary["upper_quantiles"].to_numpy()
|
|
|
|
|
|
|
|
# if predict:
|
|
|
|
|
|
|
|
# df = self.data_dictionary["prediction_features"][
|
|
|
|
# (self.data_dictionary["prediction_features"] < upper_quantile)
|
|
|
|
# & (self.data_dictionary["prediction_features"] > lower_quantile)
|
|
|
|
# ]
|
|
|
|
# drop_index = pd.isnull(df).any(1)
|
|
|
|
# self.data_dictionary["prediction_features"].fillna(0, inplace=True)
|
|
|
|
# drop_index = ~drop_index
|
|
|
|
# do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))
|
|
|
|
|
|
|
|
# logger.info(
|
|
|
|
# "remove_outliers() tossed %s predictions",
|
|
|
|
# len(do_predict) - do_predict.sum(),
|
|
|
|
# )
|
|
|
|
# self.do_predict += do_predict
|
|
|
|
# self.do_predict -= 1
|
|
|
|
|
|
|
|
# else:
|
|
|
|
|
|
|
|
# filter_train_df = self.data_dictionary["train_features"][
|
|
|
|
# (self.data_dictionary["train_features"] < upper_quantile)
|
|
|
|
# & (self.data_dictionary["train_features"] > lower_quantile)
|
|
|
|
# ]
|
|
|
|
# drop_index = pd.isnull(filter_train_df).any(1)
|
|
|
|
# drop_index = drop_index.replace(True, 1).replace(False, 0)
|
|
|
|
# self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
# self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
# self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
|
|
|
|
# logger.info(
|
|
|
|
# f'remove_outliers() tossed {drop_index.sum()}'
|
|
|
|
# f' training points from {len(filter_train_df)}'
|
|
|
|
# )
|
|
|
|
|
|
|
|
# # do the same for the test data
|
|
|
|
# filter_test_df = self.data_dictionary["test_features"][
|
|
|
|
# (self.data_dictionary["test_features"] < upper_quantile)
|
|
|
|
# & (self.data_dictionary["test_features"] > lower_quantile)
|
|
|
|
# ]
|
|
|
|
# drop_index = pd.isnull(filter_test_df).any(1)
|
|
|
|
# drop_index = drop_index.replace(True, 1).replace(False, 0)
|
|
|
|
# self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
# self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
# self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
|
|
|
|
# (drop_index == 0)
|
|
|
|
# ]
|
|
|
|
|
|
|
|
# logger.info(
|
|
|
|
# f'remove_outliers() tossed {drop_index.sum()}'
|
|
|
|
# f' test points from {len(filter_test_df)}'
|
|
|
|
# )
|
|
|
|
|
|
|
|
# return
|
2022-05-26 19:07:50 +00:00
|
|
|
|
|
|
|
# def standardize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
|
|
|
|
# """
|
|
|
|
# standardize all data in the data_dictionary according to the training dataset
|
|
|
|
# :params:
|
|
|
|
# :data_dictionary: dictionary containing the cleaned and split training/test data/labels
|
|
|
|
# :returns:
|
|
|
|
# :data_dictionary: updated dictionary with standardized values.
|
|
|
|
# """
|
|
|
|
# # standardize the data by training stats
|
|
|
|
# train_mean = data_dictionary["train_features"].mean()
|
|
|
|
# train_std = data_dictionary["train_features"].std()
|
|
|
|
# data_dictionary["train_features"] = (
|
|
|
|
# data_dictionary["train_features"] - train_mean
|
|
|
|
# ) / train_std
|
|
|
|
# data_dictionary["test_features"] = (
|
|
|
|
# data_dictionary["test_features"] - train_mean
|
|
|
|
# ) / train_std
|
|
|
|
|
|
|
|
# train_labels_std = data_dictionary["train_labels"].std()
|
|
|
|
# train_labels_mean = data_dictionary["train_labels"].mean()
|
|
|
|
# data_dictionary["train_labels"] = (
|
|
|
|
# data_dictionary["train_labels"] - train_labels_mean
|
|
|
|
# ) / train_labels_std
|
|
|
|
# data_dictionary["test_labels"] = (
|
|
|
|
# data_dictionary["test_labels"] - train_labels_mean
|
|
|
|
# ) / train_labels_std
|
|
|
|
|
|
|
|
# for item in train_std.keys():
|
|
|
|
# self.data[item + "_std"] = train_std[item]
|
|
|
|
# self.data[item + "_mean"] = train_mean[item]
|
|
|
|
|
|
|
|
# self.data["labels_std"] = train_labels_std
|
|
|
|
# self.data["labels_mean"] = train_labels_mean
|
|
|
|
|
|
|
|
# return data_dictionary
|
|
|
|
|
|
|
|
# def standardize_data_from_metadata(self, df: DataFrame) -> DataFrame:
|
|
|
|
# """
|
|
|
|
# Normalizes a set of data using the mean and standard deviation from
|
|
|
|
# the associated training data.
|
|
|
|
# :params:
|
|
|
|
# :df: Dataframe to be standardized
|
|
|
|
# """
|
|
|
|
|
|
|
|
# for item in df.keys():
|
|
|
|
# df[item] = (df[item] - self.data[item + "_mean"]) / self.data[item + "_std"]
|
|
|
|
|
|
|
|
# return df
|