flake8 passing, use pathlib in lieu of os.path to accommodate windows/mac OS

This commit is contained in:
robcaulk 2022-05-04 17:42:34 +02:00
parent 2600ba4e74
commit 99f7e44c30
7 changed files with 593 additions and 439 deletions

View File

@ -6,17 +6,19 @@
"fiat_display_currency": "USD", "fiat_display_currency": "USD",
"dry_run": true, "dry_run": true,
"timeframe": "5m", "timeframe": "5m",
"dry_run_wallet":1000, "dry_run_wallet": 1000,
"cancel_open_orders_on_exit": true, "cancel_open_orders_on_exit": true,
"unfilledtimeout": { "unfilledtimeout": {
"entry": 10, "entry": 10,
"exit": 30 "exit": 30
}, },
"exchange": { "exchange": {
"name": "ftx", "name": "ftx",
"key": "", "key": "",
"secret": "", "secret": "",
"ccxt_config": {"enableRateLimit": true}, "ccxt_config": {
"enableRateLimit": true
},
"ccxt_async_config": { "ccxt_async_config": {
"enableRateLimit": true, "enableRateLimit": true,
"rateLimit": 200 "rateLimit": 200
@ -24,8 +26,7 @@
"pair_whitelist": [ "pair_whitelist": [
"BTC/USDT" "BTC/USDT"
], ],
"pair_blacklist": [ "pair_blacklist": []
]
}, },
"entry_pricing": { "entry_pricing": {
"price_side": "same", "price_side": "same",
@ -43,54 +44,57 @@
"order_book_top": 1 "order_book_top": 1
}, },
"pairlists": [ "pairlists": [
{"method": "StaticPairList"} {
"method": "StaticPairList"
}
], ],
"freqai": { "freqai": {
"btc_pair" : "BTC/USDT", "btc_pair": "BTC/USDT",
"timeframes" : ["5m","15m","1h"], "timeframes": [
"full_timerange" : "20210601-20220101", "5m",
"train_period" : 30, "15m"
"backtest_period" : 7, ],
"identifier" : "example", "full_timerange": "20210601-20210901",
"base_features": [ "train_period": 30,
"rsi", "backtest_period": 7,
"close_over_20sma", "identifier": "example",
"relative_volume", "base_features": [
"bb_width", "rsi",
"mfi", "close_over_20sma",
"roc", "relative_volume",
"pct-change", "bb_width",
"adx", "mfi",
"macd" "roc",
], "pct-change",
"corr_pairlist": [ "adx",
"ETH/USDT", "macd"
"LINK/USDT", ],
"DOT/USDT" "corr_pairlist": [
], "ETH/USDT",
"training_timerange" : "20211220-20220117", "LINK/USDT",
"DOT/USDT"
"feature_parameters" : { ],
"period": 12, "training_timerange": "20211220-20220117",
"shift": 2, "feature_parameters": {
"drop_features": false, "period": 12,
"DI_threshold": 1, "shift": 1,
"weight_factor": 0, "drop_features": false,
"principal_component_analysis": false, "DI_threshold": 1,
"remove_outliers": false "weight_factor": 0,
}, "principal_component_analysis": false,
"data_split_parameters" : { "remove_outliers": false
"test_size": 0.25,
"random_state": 1
},
"model_training_parameters" : {
"n_estimators": 2000,
"random_state": 1,
"learning_rate": 0.02,
"task_type": "CPU"
}
}, },
"data_split_parameters": {
"test_size": 0.25,
"random_state": 1
},
"model_training_parameters": {
"n_estimators": 2000,
"random_state": 1,
"learning_rate": 0.02,
"task_type": "CPU"
}
},
"bot_name": "", "bot_name": "",
"initial_state": "running", "initial_state": "running",
"forcebuy_enable": false, "forcebuy_enable": false,

View File

@ -1,64 +1,77 @@
import json
import os
import copy import copy
import datetime
import json
import pickle as pk
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from joblib import dump, load
from pandas import DataFrame from pandas import DataFrame
from joblib import dump
from joblib import load
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances
import datetime from sklearn.model_selection import train_test_split
from typing import Any, Dict, List, Tuple
import pickle as pk
from freqtrade.configuration import TimeRange from freqtrade.configuration import TimeRange
SECONDS_IN_DAY = 86400 SECONDS_IN_DAY = 86400
class DataHandler: class DataHandler:
""" """
Class designed to handle all the data for the IFreqaiModel class model. Class designed to handle all the data for the IFreqaiModel class model.
Functionalities include holding, saving, loading, and analyzing the data. Functionalities include holding, saving, loading, and analyzing the data.
author: Robert Caulk, rob.caulk@gmail.com author: Robert Caulk, rob.caulk@gmail.com
""" """
def __init__(self, config: Dict[str, Any], dataframe: DataFrame, data: List): def __init__(self, config: Dict[str, Any], dataframe: DataFrame):
self.full_dataframe = dataframe self.full_dataframe = dataframe
(self.training_timeranges, (self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(
self.backtesting_timeranges) = self.split_timerange( config["freqai"]["full_timerange"],
config['freqai']['full_timerange'], config["freqai"]["train_period"],
config['freqai']['train_period'], config["freqai"]["backtest_period"],
config['freqai']['backtest_period']) )
self.data = data self.data: Dict[Any, Any] = {}
self.data_dictionary = {}
self.config = config self.config = config
self.freq_config = config['freqai'] self.freq_config = config["freqai"]
self.predictions = np.array([]) self.predictions = np.array([])
self.do_predict = np.array([]) self.do_predict = np.array([])
self.target_mean = np.array([]) self.target_mean = np.array([])
self.target_std = np.array([]) self.target_std = np.array([])
self.model_path = Path()
self.model_filename = ""
def save_data(self, model: Any) -> None: def save_data(self, model: Any) -> None:
""" """
Saves all data associated with a model for a single sub-train time range Saves all data associated with a model for a single sub-train time range
:params: :params:
:model: User trained model which can be reused for inferencing to generate :model: User trained model which can be reused for inferencing to generate
predictions predictions
""" """
if not os.path.exists(self.model_path): os.mkdir(self.model_path) if not self.model_path.is_dir():
save_path = self.model_path + self.model_filename self.model_path.mkdir(parents=True, exist_ok=True)
save_path = Path(self.model_path)
# if not os.path.exists(self.model_path):
# os.mkdir(self.model_path)
# save_path = self.model_path + self.model_filename
# Save the trained model # Save the trained model
dump(model, save_path+"_model.joblib") dump(model, save_path / str(self.model_filename + "_model.joblib"))
self.data['model_path'] = self.model_path self.data["model_path"] = self.model_path
self.data['model_filename'] = self.model_filename self.data["model_filename"] = self.model_filename
self.data['training_features_list'] = list(self.data_dictionary['train_features'].columns) self.data["training_features_list"] = list(self.data_dictionary["train_features"].columns)
# store the metadata # store the metadata
with open(save_path+"_metadata.json", 'w') as fp: with open(save_path / str(self.model_filename + "_metadata.json"), "w") as fp:
json.dump(self.data, fp, default=self.np_encoder) json.dump(self.data, fp, default=self.np_encoder)
# save the train data to file so we can check preds for area of applicability later # save the train data to file so we can check preds for area of applicability later
self.data_dictionary['train_features'].to_pickle(save_path+"_trained_df.pkl") self.data_dictionary["train_features"].to_pickle(
save_path / str(self.model_filename + "_trained_df.pkl")
)
return return
@ -68,156 +81,210 @@ class DataHandler:
:returns: :returns:
:model: User trained model which can be inferenced for new predictions :model: User trained model which can be inferenced for new predictions
""" """
model = load(self.model_path+self.model_filename+"_model.joblib") model = load(self.model_path / str(self.model_filename + "_model.joblib"))
with open(self.model_path+self.model_filename+"_metadata.json", 'r') as fp: with open(self.model_path / str(self.model_filename + "_metadata.json"), "r") as fp:
self.data = json.load(fp) self.data = json.load(fp)
if self.data.get('training_features_list'): self.training_features_list = self.data["training_features_list"]
self.training_features_list = [*self.data.get('training_features_list')] # if self.data.get("training_features_list"):
# self.training_features_list = [*self.data.get("training_features_list")]
self.data_dictionary['train_features'] = pd.read_pickle(self.model_path+ self.data_dictionary["train_features"] = pd.read_pickle(
self.model_filename+"_trained_df.pkl") self.model_path / str(self.model_filename + "_trained_df.pkl")
)
self.model_path = self.data['model_path'] self.model_path = self.data["model_path"]
self.model_filename = self.data['model_filename'] self.model_filename = self.data["model_filename"]
if self.config['freqai']['feature_parameters']['principal_component_analysis']: if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
self.pca = pk.load(open(self.model_path+self.model_filename+"_pca_object.pkl","rb")) self.pca = pk.load(
open(self.model_path / str(self.model_filename + "_pca_object.pkl"), "rb")
)
return model return model
def make_train_test_datasets(self, filtered_dataframe: DataFrame, labels: DataFrame) -> None: def make_train_test_datasets(
''' self, filtered_dataframe: DataFrame, labels: DataFrame
Given the dataframe for the full history for training, split the data into ) -> Dict[Any, Any]:
training and test data according to user specified parameters in configuration """
file. Given the dataframe for the full history for training, split the data into
training and test data according to user specified parameters in configuration
file.
:filtered_dataframe: cleaned dataframe ready to be split. :filtered_dataframe: cleaned dataframe ready to be split.
:labels: cleaned labels ready to be split. :labels: cleaned labels ready to be split.
''' """
if self.config['freqai']['feature_parameters']['weight_factor'] > 0: if self.config["freqai"]["feature_parameters"]["weight_factor"] > 0:
weights = self.set_weights_higher_recent(len(filtered_dataframe)) weights = self.set_weights_higher_recent(len(filtered_dataframe))
else: weights = np.ones(len(filtered_dataframe)) else:
weights = np.ones(len(filtered_dataframe))
(train_features, test_features, train_labels, (
test_labels, train_weights, test_weights) = train_test_split( train_features,
filtered_dataframe[:filtered_dataframe.shape[0]], test_features,
train_labels,
test_labels,
train_weights,
test_weights,
) = train_test_split(
filtered_dataframe[: filtered_dataframe.shape[0]],
labels, labels,
weights, weights,
**self.config['freqai']['data_split_parameters'] **self.config["freqai"]["data_split_parameters"]
) )
return self.build_data_dictionary( return self.build_data_dictionary(
train_features,test_features, train_features, test_features, train_labels, test_labels, train_weights, test_weights
train_labels,test_labels, )
train_weights,test_weights)
def filter_features(
self,
def filter_features(self, unfiltered_dataframe: DataFrame, training_feature_list: List, unfiltered_dataframe: DataFrame,
labels: DataFrame = None, training_filter: bool=True) -> Tuple[DataFrame, DataFrame]: training_feature_list: List,
''' labels: DataFrame = pd.DataFrame(),
Filter the unfiltered dataframe to extract the user requested features and properly training_filter: bool = True,
remove all NaNs. Any row with a NaN is removed from training dataset or replaced with ) -> Tuple[DataFrame, DataFrame]:
0s in the prediction dataset. However, prediction dataset do_predict will reflect any """
Filter the unfiltered dataframe to extract the user requested features and properly
remove all NaNs. Any row with a NaN is removed from training dataset or replaced with
0s in the prediction dataset. However, prediction dataset do_predict will reflect any
row that had a NaN and will shield user from that prediction. row that had a NaN and will shield user from that prediction.
:params: :params:
:unfiltered_dataframe: the full dataframe for the present training period :unfiltered_dataframe: the full dataframe for the present training period
:training_feature_list: list, the training feature list constructed by self.build_feature_list() :training_feature_list: list, the training feature list constructed by
according to user specified parameters in the configuration file. self.build_feature_list() according to user specified parameters in the configuration file.
:labels: the labels for the dataset :labels: the labels for the dataset
:training_filter: boolean which lets the function know if it is training data or :training_filter: boolean which lets the function know if it is training data or
prediction data to be filtered. prediction data to be filtered.
:returns: :returns:
:filtered_dataframe: dataframe cleaned of NaNs and only containing the user :filtered_dataframe: dataframe cleaned of NaNs and only containing the user
requested feature set. requested feature set.
:labels: labels cleaned of NaNs. :labels: labels cleaned of NaNs.
''' """
filtered_dataframe = unfiltered_dataframe.filter(training_feature_list, axis=1) filtered_dataframe = unfiltered_dataframe.filter(training_feature_list, axis=1)
drop_index = pd.isnull(filtered_dataframe).any(1) # get the rows that have NaNs, drop_index = pd.isnull(filtered_dataframe).any(1) # get the rows that have NaNs,
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
if training_filter: # we don't care about total row number (total no. datapoints) in training, we only care about removing any row with NaNs if (
training_filter
): # we don't care about total row number (total no. datapoints) in training, we only care
# about removing any row with NaNs
drop_index_labels = pd.isnull(labels) drop_index_labels = pd.isnull(labels)
filtered_dataframe = filtered_dataframe[(drop_index==False) & (drop_index_labels==False)] # dropping values drop_index_labels = drop_index_labels.replace(True, 1).replace(False, 0)
labels = labels[(drop_index==False) & (drop_index_labels==False)] # assuming the labels depend entirely on the dataframe here. filtered_dataframe = filtered_dataframe[
print('dropped',len(unfiltered_dataframe)-len(filtered_dataframe), (drop_index == 0) & (drop_index_labels == 0)
'training data points due to NaNs, ensure you have downloaded all historical training data') ] # dropping values
self.data['filter_drop_index_training'] = drop_index labels = labels[
(drop_index == 0) & (drop_index_labels == 0)
] # assuming the labels depend entirely on the dataframe here.
print(
"dropped",
len(unfiltered_dataframe) - len(filtered_dataframe),
"training data points due to NaNs, ensure you have downloaded",
"all historical training data",
)
self.data["filter_drop_index_training"] = drop_index
else: # we are backtesting so we need to preserve row number to send back to strategy, so now we use do_predict to avoid any prediction based on a NaN else:
# we are backtesting so we need to preserve row number to send back to strategy,
# so now we use do_predict to avoid any prediction based on a NaN
drop_index = pd.isnull(filtered_dataframe).any(1) drop_index = pd.isnull(filtered_dataframe).any(1)
self.data['filter_drop_index_prediction'] = drop_index self.data["filter_drop_index_prediction"] = drop_index
filtered_dataframe.fillna(0, inplace=True) # replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction that was based on a single NaN is ultimately protected from buys with do_predict filtered_dataframe.fillna(0, inplace=True)
# replacing all NaNs with zeros to avoid issues in 'prediction', but any prediction
# that was based on a single NaN is ultimately protected from buys with do_predict
drop_index = ~drop_index drop_index = ~drop_index
self.do_predict = np.array(drop_index.replace(True,1).replace(False,0)) self.do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))
print('dropped',len(self.do_predict) - self.do_predict.sum(),'of',len(filtered_dataframe), print(
'prediction data points due to NaNs. These are protected from prediction with do_predict vector returned to strategy.') "dropped",
len(self.do_predict) - self.do_predict.sum(),
"of",
len(filtered_dataframe),
"prediction data points due to NaNs. These are protected from prediction",
"with do_predict vector returned to strategy.",
)
return filtered_dataframe, labels return filtered_dataframe, labels
def build_data_dictionary(self, train_df: DataFrame, test_df: DataFrame, def build_data_dictionary(
train_labels: DataFrame, test_labels: DataFrame, self,
train_weights: Any, test_weights: Any) -> Dict: train_df: DataFrame,
test_df: DataFrame,
train_labels: DataFrame,
test_labels: DataFrame,
train_weights: Any,
test_weights: Any,
) -> Dict:
self.data_dictionary = {'train_features': train_df, self.data_dictionary = {
'test_features': test_df, "train_features": train_df,
'train_labels': train_labels, "test_features": test_df,
'test_labels': test_labels, "train_labels": train_labels,
'train_weights': train_weights, "test_labels": test_labels,
'test_weights': test_weights} "train_weights": train_weights,
"test_weights": test_weights,
}
return self.data_dictionary return self.data_dictionary
def standardize_data(self, data_dictionary: Dict) -> None: def standardize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
''' """
Standardize all data in the data_dictionary according to the training dataset Standardize all data in the data_dictionary according to the training dataset
:params: :params:
:data_dictionary: dictionary containing the cleaned and split training/test data/labels :data_dictionary: dictionary containing the cleaned and split training/test data/labels
:returns: :returns:
:data_dictionary: updated dictionary with standardized values. :data_dictionary: updated dictionary with standardized values.
''' """
# standardize the data by training stats # standardize the data by training stats
train_mean = data_dictionary['train_features'].mean() train_mean = data_dictionary["train_features"].mean()
train_std = data_dictionary['train_features'].std() train_std = data_dictionary["train_features"].std()
data_dictionary['train_features'] = (data_dictionary['train_features'] - train_mean) / train_std data_dictionary["train_features"] = (
data_dictionary['test_features'] = (data_dictionary['test_features'] - train_mean) / train_std data_dictionary["train_features"] - train_mean
) / train_std
data_dictionary["test_features"] = (
data_dictionary["test_features"] - train_mean
) / train_std
train_labels_std = data_dictionary['train_labels'].std() train_labels_std = data_dictionary["train_labels"].std()
train_labels_mean = data_dictionary['train_labels'].mean() train_labels_mean = data_dictionary["train_labels"].mean()
data_dictionary['train_labels'] = (data_dictionary['train_labels'] - train_labels_mean) / train_labels_std data_dictionary["train_labels"] = (
data_dictionary['test_labels'] = (data_dictionary['test_labels'] - train_labels_mean) / train_labels_std data_dictionary["train_labels"] - train_labels_mean
) / train_labels_std
data_dictionary["test_labels"] = (
data_dictionary["test_labels"] - train_labels_mean
) / train_labels_std
for item in train_std.keys(): for item in train_std.keys():
self.data[item+'_std'] = train_std[item] self.data[item + "_std"] = train_std[item]
self.data[item+'_mean'] = train_mean[item] self.data[item + "_mean"] = train_mean[item]
self.data['labels_std'] = train_labels_std self.data["labels_std"] = train_labels_std
self.data['labels_mean'] = train_labels_mean self.data["labels_mean"] = train_labels_mean
return data_dictionary return data_dictionary
def standardize_data_from_metadata(self, df: DataFrame) -> DataFrame: def standardize_data_from_metadata(self, df: DataFrame) -> DataFrame:
''' """
Standardizes a set of data using the mean and standard deviation from Standardizes a set of data using the mean and standard deviation from
the associated training data. the associated training data.
:params: :params:
:df: Dataframe to be standardized :df: Dataframe to be standardized
''' """
for item in df.keys(): for item in df.keys():
df[item] = (df[item] - self.data[item+'_mean']) / self.data[item+'_std'] df[item] = (df[item] - self.data[item + "_mean"]) / self.data[item + "_std"]
return df return df
def split_timerange(self, tr: Dict, train_split: int=28, bt_split: int=7) -> list: def split_timerange(
''' self, tr: str, train_split: int = 28, bt_split: int = 7
) -> Tuple[list, list]:
"""
Function which takes a single time range (tr) and splits it Function which takes a single time range (tr) and splits it
into sub timeranges to train and backtest on based on user input into sub timeranges to train and backtest on based on user input
tr: str, full timerange to train on tr: str, full timerange to train on
train_split: the period length for the each training (days). Specified in user train_split: the period length for the each training (days). Specified in user
configuration file configuration file
bt_split: the backtesting length (dats). Specified in user configuration file bt_split: the backtesting length (dats). Specified in user configuration file
''' """
train_period = train_split * SECONDS_IN_DAY train_period = train_split * SECONDS_IN_DAY
bt_period = bt_split * SECONDS_IN_DAY bt_period = bt_split * SECONDS_IN_DAY
@ -230,22 +297,24 @@ class DataHandler:
tr_backtesting_list = [] tr_backtesting_list = []
first = True first = True
while True: while True:
if not first: timerange_train.startts = timerange_train.startts + bt_period if not first:
timerange_train.startts = timerange_train.startts + bt_period
timerange_train.stopts = timerange_train.startts + train_period timerange_train.stopts = timerange_train.startts + train_period
# if a full training period doesnt fit, we stop # if a full training period doesnt fit, we stop
if timerange_train.stopts > full_timerange.stopts: break if timerange_train.stopts > full_timerange.stopts:
break
first = False first = False
start = datetime.datetime.utcfromtimestamp(timerange_train.startts) start = datetime.datetime.utcfromtimestamp(timerange_train.startts)
stop = datetime.datetime.utcfromtimestamp(timerange_train.stopts) stop = datetime.datetime.utcfromtimestamp(timerange_train.stopts)
tr_training_list.append(start.strftime("%Y%m%d")+'-'+stop.strftime("%Y%m%d")) tr_training_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
## associated backtest period # associated backtest period
timerange_backtest.startts = timerange_train.stopts timerange_backtest.startts = timerange_train.stopts
timerange_backtest.stopts = timerange_backtest.startts + bt_period timerange_backtest.stopts = timerange_backtest.startts + bt_period
start = datetime.datetime.utcfromtimestamp(timerange_backtest.startts) start = datetime.datetime.utcfromtimestamp(timerange_backtest.startts)
stop = datetime.datetime.utcfromtimestamp(timerange_backtest.stopts) stop = datetime.datetime.utcfromtimestamp(timerange_backtest.stopts)
tr_backtesting_list.append(start.strftime("%Y%m%d")+'-'+stop.strftime("%Y%m%d")) tr_backtesting_list.append(start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d"))
return tr_training_list, tr_backtesting_list return tr_training_list, tr_backtesting_list
@ -260,8 +329,8 @@ class DataHandler:
timerange = TimeRange.parse_timerange(tr) timerange = TimeRange.parse_timerange(tr)
start = datetime.datetime.fromtimestamp(timerange.startts, tz=datetime.timezone.utc) start = datetime.datetime.fromtimestamp(timerange.startts, tz=datetime.timezone.utc)
stop = datetime.datetime.fromtimestamp(timerange.stopts, tz=datetime.timezone.utc) stop = datetime.datetime.fromtimestamp(timerange.stopts, tz=datetime.timezone.utc)
df = df.loc[df['date'] >= start, :] df = df.loc[df["date"] >= start, :]
df = df.loc[df['date'] <= stop, :] df = df.loc[df["date"] <= stop, :]
return df return df
@ -272,128 +341,171 @@ class DataHandler:
No parameters or returns, it acts on the data_dictionary held by the DataHandler. No parameters or returns, it acts on the data_dictionary held by the DataHandler.
""" """
from sklearn.decomposition import PCA # avoid importing if we dont need it from sklearn.decomposition import PCA # avoid importing if we dont need it
n_components = self.data_dictionary['train_features'].shape[1] n_components = self.data_dictionary["train_features"].shape[1]
pca = PCA(n_components=n_components) pca = PCA(n_components=n_components)
pca = pca.fit(self.data_dictionary['train_features']) pca = pca.fit(self.data_dictionary["train_features"])
n_keep_components = np.argmin(pca.explained_variance_ratio_.cumsum() < 0.999) n_keep_components = np.argmin(pca.explained_variance_ratio_.cumsum() < 0.999)
pca2 = PCA(n_components=n_keep_components) pca2 = PCA(n_components=n_keep_components)
self.data['n_kept_components'] = n_keep_components self.data["n_kept_components"] = n_keep_components
pca2 = pca2.fit(self.data_dictionary['train_features']) pca2 = pca2.fit(self.data_dictionary["train_features"])
print('reduced feature dimension by',n_components-n_keep_components) print("reduced feature dimension by", n_components - n_keep_components)
print("explained variance",np.sum(pca2.explained_variance_ratio_)) print("explained variance", np.sum(pca2.explained_variance_ratio_))
train_components = pca2.transform(self.data_dictionary['train_features']) train_components = pca2.transform(self.data_dictionary["train_features"])
test_components = pca2.transform(self.data_dictionary['test_features']) test_components = pca2.transform(self.data_dictionary["test_features"])
self.data_dictionary['train_features'] = pd.DataFrame(data=train_components, self.data_dictionary["train_features"] = pd.DataFrame(
columns = ['PC'+str(i) for i in range(0,n_keep_components)], data=train_components,
index = self.data_dictionary['train_features'].index) columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["train_features"].index,
)
self.data_dictionary['test_features'] = pd.DataFrame(data=test_components, self.data_dictionary["test_features"] = pd.DataFrame(
columns = ['PC'+str(i) for i in range(0,n_keep_components)], data=test_components,
index = self.data_dictionary['test_features'].index) columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["test_features"].index,
)
self.data['n_kept_components'] = n_keep_components self.data["n_kept_components"] = n_keep_components
self.pca = pca2 self.pca = pca2
if not os.path.exists(self.model_path): os.mkdir(self.model_path)
pk.dump(pca2, open(self.model_path + self.model_filename+"_pca_object.pkl","wb")) if not self.model_path.is_dir():
self.model_path.mkdir(parents=True, exist_ok=True)
pk.dump(pca2, open(self.model_path / str(self.model_filename + "_pca_object.pkl"), "wb"))
return None return None
def compute_distances(self) -> float: def compute_distances(self) -> float:
print('computing average mean distance for all training points') print("computing average mean distance for all training points")
pairwise = pairwise_distances(self.data_dictionary['train_features'],n_jobs=-1) pairwise = pairwise_distances(self.data_dictionary["train_features"], n_jobs=-1)
avg_mean_dist = pairwise.mean(axis=1).mean() avg_mean_dist = pairwise.mean(axis=1).mean()
print('avg_mean_dist',avg_mean_dist) print("avg_mean_dist", avg_mean_dist)
return avg_mean_dist return avg_mean_dist
def remove_outliers(self,predict: bool) -> None: def remove_outliers(self, predict: bool) -> None:
""" """
Remove data that looks like an outlier based on the distribution of each Remove data that looks like an outlier based on the distribution of each
variable. variable.
:params: :params:
:predict: boolean which tells the function if this is prediction data or :predict: boolean which tells the function if this is prediction data or
training data coming in. training data coming in.
""" """
lower_quantile = self.data_dictionary['train_features'].quantile(0.001) lower_quantile = self.data_dictionary["train_features"].quantile(0.001)
upper_quantile = self.data_dictionary['train_features'].quantile(0.999) upper_quantile = self.data_dictionary["train_features"].quantile(0.999)
if predict: if predict:
df = self.data_dictionary['prediction_features'][(self.data_dictionary['prediction_features']<upper_quantile) & (self.data_dictionary['prediction_features']>lower_quantile)] df = self.data_dictionary["prediction_features"][
(self.data_dictionary["prediction_features"] < upper_quantile)
& (self.data_dictionary["prediction_features"] > lower_quantile)
]
drop_index = pd.isnull(df).any(1) drop_index = pd.isnull(df).any(1)
self.data_dictionary['prediction_features'].fillna(0,inplace=True) self.data_dictionary["prediction_features"].fillna(0, inplace=True)
drop_index = ~drop_index drop_index = ~drop_index
do_predict = np.array(drop_index.replace(True,1).replace(False,0)) do_predict = np.array(drop_index.replace(True, 1).replace(False, 0))
print('remove_outliers() tossed',len(do_predict)-do_predict.sum(),'predictions because they were beyond 3 std deviations from training data.') print(
"remove_outliers() tossed",
len(do_predict) - do_predict.sum(),
"predictions because they were beyond 3 std deviations from training data.",
)
self.do_predict += do_predict self.do_predict += do_predict
self.do_predict -= 1 self.do_predict -= 1
else: else:
filter_train_df = self.data_dictionary['train_features'][(self.data_dictionary['train_features']<upper_quantile) & (self.data_dictionary['train_features']>lower_quantile)] filter_train_df = self.data_dictionary["train_features"][
(self.data_dictionary["train_features"] < upper_quantile)
& (self.data_dictionary["train_features"] > lower_quantile)
]
drop_index = pd.isnull(filter_train_df).any(1) drop_index = pd.isnull(filter_train_df).any(1)
self.data_dictionary['train_features'] = self.data_dictionary['train_features'][(drop_index==False)] drop_index = drop_index.replace(True, 1).replace(False, 0)
self.data_dictionary['train_labels'] = self.data_dictionary['train_labels'][(drop_index==False)] self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
self.data_dictionary['train_weights'] = self.data_dictionary['train_weights'][(drop_index==False)] (drop_index == 0)
]
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
(drop_index == 0)
]
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
(drop_index == 0)
]
# do the same for the test data # do the same for the test data
filter_test_df = self.data_dictionary['test_features'][(self.data_dictionary['test_features']<upper_quantile) & (self.data_dictionary['test_features']>lower_quantile)] filter_test_df = self.data_dictionary["test_features"][
(self.data_dictionary["test_features"] < upper_quantile)
& (self.data_dictionary["test_features"] > lower_quantile)
]
drop_index = pd.isnull(filter_test_df).any(1) drop_index = pd.isnull(filter_test_df).any(1)
#pdb.set_trace() drop_index = drop_index.replace(True, 1).replace(False, 0)
self.data_dictionary['test_labels'] = self.data_dictionary['test_labels'][(drop_index==False)] self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][
self.data_dictionary['test_features'] = self.data_dictionary['test_features'][(drop_index==False)] (drop_index == 0)
self.data_dictionary['test_weights'] = self.data_dictionary['test_weights'][(drop_index==False)] ]
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
(drop_index == 0)
]
self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
(drop_index == 0)
]
return return
def build_feature_list(self, config: dict) -> int: def build_feature_list(self, config: dict) -> list:
""" """
Build the list of features that will be used to filter Build the list of features that will be used to filter
the full dataframe. Feature list is construced from the the full dataframe. Feature list is construced from the
user configuration file. user configuration file.
:params: :params:
:config: Canonical freqtrade config file containing all :config: Canonical freqtrade config file containing all
user defined input in config['freqai] dictionary. user defined input in config['freqai] dictionary.
""" """
features = [] features = []
for tf in config['freqai']['timeframes']: for tf in config["freqai"]["timeframes"]:
for ft in config['freqai']['base_features']: for ft in config["freqai"]["base_features"]:
for n in range(config['freqai']['feature_parameters']['shift']+1): for n in range(config["freqai"]["feature_parameters"]["shift"] + 1):
shift='' shift = ""
if n>0: shift = '_shift-'+str(n) if n > 0:
features.append(ft+shift+'_'+tf) shift = "_shift-" + str(n)
for p in config['freqai']['corr_pairlist']: features.append(ft + shift + "_" + tf)
features.append(p.split("/")[0]+'-'+ft+shift+'_'+tf) for p in config["freqai"]["corr_pairlist"]:
features.append(p.split("/")[0] + "-" + ft + shift + "_" + tf)
print('number of features',len(features)) print("number of features", len(features))
return features return features
def check_if_pred_in_training_spaces(self) -> None: def check_if_pred_in_training_spaces(self) -> None:
""" """
Compares the distance from each prediction point to each training data Compares the distance from each prediction point to each training data
point. It uses this information to estimate a Dissimilarity Index (DI) point. It uses this information to estimate a Dissimilarity Index (DI)
and avoid making predictions on any points that are too far away and avoid making predictions on any points that are too far away
from the training data set. from the training data set.
""" """
print('checking if prediction features are in AOA') print("checking if prediction features are in AOA")
distance = pairwise_distances(self.data_dictionary['train_features'], distance = pairwise_distances(
self.data_dictionary['prediction_features'],n_jobs=-1) self.data_dictionary["train_features"],
self.data_dictionary["prediction_features"],
n_jobs=-1,
)
do_predict = np.where(distance.min(axis=0) / do_predict = np.where(
self.data['avg_mean_dist'] < self.config['freqai']['feature_parameters']['DI_threshold'],1,0) distance.min(axis=0) / self.data["avg_mean_dist"]
< self.config["freqai"]["feature_parameters"]["DI_threshold"],
1,
0,
)
print('Distance checker tossed',len(do_predict)-do_predict.sum(), print(
'predictions for being too far from training data') "Distance checker tossed",
len(do_predict) - do_predict.sum(),
"predictions for being too far from training data",
)
self.do_predict += do_predict self.do_predict += do_predict
self.do_predict -= 1 self.do_predict -= 1
def set_weights_higher_recent(self, num_weights: int) -> int: def set_weights_higher_recent(self, num_weights: int) -> int:
""" """
Set weights so that recent data is more heavily weighted during Set weights so that recent data is more heavily weighted during
@ -401,8 +513,9 @@ class DataHandler:
""" """
weights = np.zeros(num_weights) weights = np.zeros(num_weights)
for i in range(1, len(weights)): for i in range(1, len(weights)):
weights[len(weights) - i] = np.exp(-i/ weights[len(weights) - i] = np.exp(
(self.config['freqai']['feature_parameters']['weight_factor']*num_weights)) -i / (self.config["freqai"]["feature_parameters"]["weight_factor"] * num_weights)
)
return weights return weights
def append_predictions(self, predictions, do_predict, len_dataframe): def append_predictions(self, predictions, do_predict, len_dataframe):
@ -411,12 +524,12 @@ class DataHandler:
""" """
ones = np.ones(len_dataframe) ones = np.ones(len_dataframe)
s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std'] s_mean, s_std = ones * self.data["s_mean"], ones * self.data["s_std"]
self.predictions = np.append(self.predictions,predictions) self.predictions = np.append(self.predictions, predictions)
self.do_predict = np.append(self.do_predict,do_predict) self.do_predict = np.append(self.do_predict, do_predict)
self.target_mean = np.append(self.target_mean,s_mean) self.target_mean = np.append(self.target_mean, s_mean)
self.target_std = np.append(self.target_std,s_std) self.target_std = np.append(self.target_std, s_std)
return return
@ -426,14 +539,14 @@ class DataHandler:
when it goes back to the strategy. These rows are not included in the backtest. when it goes back to the strategy. These rows are not included in the backtest.
""" """
filler = np.zeros(len_dataframe -len(self.predictions)) # startup_candle_count filler = np.zeros(len_dataframe - len(self.predictions)) # startup_candle_count
self.predictions = np.append(filler,self.predictions) self.predictions = np.append(filler, self.predictions)
self.do_predict = np.append(filler,self.do_predict) self.do_predict = np.append(filler, self.do_predict)
self.target_mean = np.append(filler,self.target_mean) self.target_mean = np.append(filler, self.target_mean)
self.target_std = np.append(filler,self.target_std) self.target_std = np.append(filler, self.target_std)
return return
def np_encoder(self, object): def np_encoder(self, object):
if isinstance(object, np.generic): if isinstance(object, np.generic):
return object.item() return object.item()

View File

@ -1,20 +1,23 @@
import gc
import shutil
from abc import ABC
from pathlib import Path
from typing import Any, Dict, Tuple
import os
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from pandas import DataFrame from pandas import DataFrame
import shutil
import gc
from typing import Any, Dict, Optional, Tuple
from abc import ABC
from freqtrade.freqai.data_handler import DataHandler from freqtrade.freqai.data_handler import DataHandler
pd.options.mode.chained_assignment = None pd.options.mode.chained_assignment = None
class IFreqaiModel(ABC): class IFreqaiModel(ABC):
""" """
Class containing all tools for training and prediction in the strategy. Class containing all tools for training and prediction in the strategy.
User models should inherit from this class as shown in User models should inherit from this class as shown in
templates/ExamplePredictionModel.py where the user overrides templates/ExamplePredictionModel.py where the user overrides
train(), predict(), fit(), and make_labels(). train(), predict(), fit(), and make_labels().
Author: Robert Caulk, rob.caulk@gmail.com Author: Robert Caulk, rob.caulk@gmail.com
@ -23,61 +26,71 @@ class IFreqaiModel(ABC):
def __init__(self, config: Dict[str, Any]) -> None: def __init__(self, config: Dict[str, Any]) -> None:
self.config = config self.config = config
self.freqai_info = config['freqai'] self.freqai_info = config["freqai"]
self.data_split_parameters = config['freqai']['data_split_parameters'] self.data_split_parameters = config["freqai"]["data_split_parameters"]
self.model_training_parameters = config['freqai']['model_training_parameters'] self.model_training_parameters = config["freqai"]["model_training_parameters"]
self.feature_parameters = config['freqai']['feature_parameters'] self.feature_parameters = config["freqai"]["feature_parameters"]
self.full_path = (str(config['user_data_dir'])+ self.full_path = Path(
"/models/"+self.freqai_info['full_timerange']+ config["user_data_dir"]
'-'+self.freqai_info['identifier']) / "models"
self.metadata = {} / str(self.freqai_info["full_timerange"] + self.freqai_info["identifier"])
self.data = {} )
self.time_last_trained = None self.time_last_trained = None
self.current_time = None self.current_time = None
self.model = None self.model = None
self.predictions = None self.predictions = None
if not os.path.exists(self.full_path): if not self.full_path.is_dir():
os.mkdir(self.full_path) self.full_path.mkdir(parents=True, exist_ok=True)
shutil.copy(self.config['config_files'][0],self.full_path+"/"+self.config['config_files'][0]) shutil.copy(
self.config["config_files"][0],
Path(self.full_path / self.config["config_files"][0]),
)
def start(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def start(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
""" """
Entry point to the FreqaiModel, it will train a new model if Entry point to the FreqaiModel, it will train a new model if
necesssary before making the prediction. necesssary before making the prediction.
The backtesting and training paradigm is a sliding training window The backtesting and training paradigm is a sliding training window
with a following backtest window. Both windows slide according to the with a following backtest window. Both windows slide according to the
length of the backtest window. This function is not intended to be length of the backtest window. This function is not intended to be
overridden by children of IFreqaiModel, but technically, it can be overridden by children of IFreqaiModel, but technically, it can be
if the user wishes to make deeper changes to the sliding window if the user wishes to make deeper changes to the sliding window
logic. logic.
:params: :params:
:dataframe: Full dataframe coming from strategy - it contains entire :dataframe: Full dataframe coming from strategy - it contains entire
backtesting timerange + additional historical data necessary to train backtesting timerange + additional historical data necessary to train
the model. the model.
:metadata: pair metadataa coming from strategy. :metadata: pair metadataa coming from strategy.
""" """
self.pair = metadata['pair'] self.pair = metadata["pair"]
self.dh = DataHandler(self.config, dataframe, self.data) self.dh = DataHandler(self.config, dataframe)
print('going to train',len(self.dh.training_timeranges), print(
'timeranges:',self.dh.training_timeranges) "going to train",
len(self.dh.training_timeranges),
"timeranges:",
self.dh.training_timeranges,
)
# Loop enforcing the sliding window training/backtesting paragigm # Loop enforcing the sliding window training/backtesting paragigm
# tr_train is the training time range e.g. 1 historical month # tr_train is the training time range e.g. 1 historical month
# tr_backtest is the backtesting time range e.g. the week directly # tr_backtest is the backtesting time range e.g. the week directly
# following tr_train. Both of these windows slide through the # following tr_train. Both of these windows slide through the
# entire backtest # entire backtest
for tr_train, tr_backtest in zip(self.dh.training_timeranges, for tr_train, tr_backtest in zip(
self.dh.backtesting_timeranges): self.dh.training_timeranges, self.dh.backtesting_timeranges
):
gc.collect() gc.collect()
#self.config['timerange'] = tr_train # self.config['timerange'] = tr_train
self.dh.data = {} # clean the pair specific data between models self.dh.data = {} # clean the pair specific data between models
self.freqai_info['training_timerange'] = tr_train self.freqai_info["training_timerange"] = tr_train
dataframe_train = self.dh.slice_dataframe(tr_train, dataframe) dataframe_train = self.dh.slice_dataframe(tr_train, dataframe)
dataframe_backtest = self.dh.slice_dataframe(tr_backtest, dataframe) dataframe_backtest = self.dh.slice_dataframe(tr_backtest, dataframe)
print("training",self.pair,"for",tr_train) print("training", self.pair, "for", tr_train)
self.dh.model_path = self.full_path+"/"+ 'sub-train'+'-'+str(tr_train)+'/' # self.dh.model_path = self.full_path + "/" + "sub-train" + "-" + str(tr_train) + "/"
self.dh.model_path = Path(self.full_path / str("sub-train" + "-" + str(tr_train)))
if not self.model_exists(self.pair, training_timerange=tr_train): if not self.model_exists(self.pair, training_timerange=tr_train):
self.model = self.train(dataframe_train, metadata) self.model = self.train(dataframe_train, metadata)
self.dh.save_data(self.model) self.dh.save_data(self.model)
@ -86,8 +99,8 @@ class IFreqaiModel(ABC):
preds, do_preds = self.predict(dataframe_backtest) preds, do_preds = self.predict(dataframe_backtest)
self.dh.append_predictions(preds,do_preds,len(dataframe_backtest)) self.dh.append_predictions(preds, do_preds, len(dataframe_backtest))
self.dh.fill_predictions(len(dataframe)) self.dh.fill_predictions(len(dataframe))
return self.dh.predictions, self.dh.do_predict, self.dh.target_mean, self.dh.target_std return self.dh.predictions, self.dh.do_predict, self.dh.target_mean, self.dh.target_std
@ -107,7 +120,7 @@ class IFreqaiModel(ABC):
for storing, saving, loading, and analyzing the data. for storing, saving, loading, and analyzing the data.
:params: :params:
:unfiltered_dataframe: Full dataframe for the current training period :unfiltered_dataframe: Full dataframe for the current training period
:metadata: pair metadata from strategy. :metadata: pair metadata from strategy.
:returns: :returns:
:model: Trained model which can be used to inference (self.predict) :model: Trained model which can be used to inference (self.predict)
""" """
@ -116,40 +129,40 @@ class IFreqaiModel(ABC):
def fit(self) -> Any: def fit(self) -> Any:
""" """
Most regressors use the same function names and arguments e.g. user Most regressors use the same function names and arguments e.g. user
can drop in LGBMRegressor in place of CatBoostRegressor and all data can drop in LGBMRegressor in place of CatBoostRegressor and all data
management will be properly handled by Freqai. management will be properly handled by Freqai.
:params: :params:
:data_dictionary: the dictionary constructed by DataHandler to hold :data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels. all the training and test data/labels.
""" """
return None return Any
def predict(self) -> Optional[Tuple[DataFrame, DataFrame]]: def predict(self, dataframe: DataFrame) -> Tuple[np.array, np.array]:
""" """
Filter the prediction features data and predict with it. Filter the prediction features data and predict with it.
:param: unfiltered_dataframe: Full dataframe for the current backtest period. :param: unfiltered_dataframe: Full dataframe for the current backtest period.
:return: :return:
:predictions: np.array of predictions :predictions: np.array of predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index) data (NaNs) or felt uncertain about data (PCA and DI index)
""" """
return None return np.array([]), np.array([])
def model_exists(self, pair: str, training_timerange: str = None) -> bool: def model_exists(self, pair: str, training_timerange: str) -> bool:
""" """
Given a pair and path, check if a model already exists Given a pair and path, check if a model already exists
:param pair: pair e.g. BTC/USD :param pair: pair e.g. BTC/USD
:param path: path to model :param path: path to model
""" """
coin,_ = pair.split('/') coin, _ = pair.split("/")
self.dh.model_filename = f"cb_"+coin.lower()+"_"+training_timerange self.dh.model_filename = "cb_" + coin.lower() + "_" + training_timerange
file_exists = os.path.isfile(self.dh.model_path+ path_to_modelfile = Path(self.dh.model_path / str(self.dh.model_filename + "_model.joblib"))
self.dh.model_filename+"_model.joblib") file_exists = path_to_modelfile.is_file()
if file_exists: if file_exists:
print("Found model at", self.dh.model_path+self.dh.model_filename) print("Found model at", self.dh.model_path / self.dh.model_filename)
else: print("Could not find model at", else:
self.dh.model_path+self.dh.model_filename) print("Could not find model at", self.dh.model_path / self.dh.model_filename)
return file_exists return file_exists

View File

@ -3,10 +3,10 @@ from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
class CustomModel: class CustomModel:
""" """
A bridge between the user defined IFreqaiModel class A bridge between the user defined IFreqaiModel class
and the strategy. and the strategy.
""" """
def __init__(self,config): def __init__(self, config):
self.bridge = FreqaiModelResolver.load_freqaimodel(config) self.bridge = FreqaiModelResolver.load_freqaimodel(config)

View File

@ -12,6 +12,7 @@ from freqtrade.exceptions import OperationalException
from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.resolvers import IResolver from freqtrade.resolvers import IResolver
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -19,10 +20,11 @@ class FreqaiModelResolver(IResolver):
""" """
This class contains all the logic to load custom hyperopt loss class This class contains all the logic to load custom hyperopt loss class
""" """
object_type = IFreqaiModel object_type = IFreqaiModel
object_type_str = "FreqaiModel" object_type_str = "FreqaiModel"
user_subdir = USERPATH_FREQAIMODELS user_subdir = USERPATH_FREQAIMODELS
initial_search_path = Path(__file__).parent.parent.joinpath('optimize').resolve() initial_search_path = Path(__file__).parent.parent.joinpath("optimize").resolve()
@staticmethod @staticmethod
def load_freqaimodel(config: Dict) -> IFreqaiModel: def load_freqaimodel(config: Dict) -> IFreqaiModel:
@ -31,15 +33,17 @@ class FreqaiModelResolver(IResolver):
:param config: configuration dictionary :param config: configuration dictionary
""" """
freqaimodel_name = config.get('freqaimodel') freqaimodel_name = config.get("freqaimodel")
if not freqaimodel_name: if not freqaimodel_name:
raise OperationalException( raise OperationalException(
"No freqaimodel set. Please use `--freqaimodel` to " "No freqaimodel set. Please use `--freqaimodel` to "
"specify the FreqaiModel class to use.\n" "specify the FreqaiModel class to use.\n"
) )
freqaimodel = FreqaiModelResolver.load_object(freqaimodel_name, freqaimodel = FreqaiModelResolver.load_object(
config, kwargs={'config': config}, freqaimodel_name,
extra_dir=config.get('freqaimodel_path')) config,
kwargs={"config": config},
extra_dir=config.get("freqaimodel_path"),
)
return freqaimodel return freqaimodel

View File

@ -1,15 +1,17 @@
import numpy as np from typing import Any, Dict, Tuple
import pandas as pd import pandas as pd
from catboost import CatBoostRegressor, Pool from catboost import CatBoostRegressor, Pool
from pandas import DataFrame from pandas import DataFrame
from typing import Any, Dict, Tuple
from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.freqai_interface import IFreqaiModel
class ExamplePredictionModel(IFreqaiModel): class ExamplePredictionModel(IFreqaiModel):
""" """
User created prediction model. The class needs to override three necessary User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed. has its own DataHandler where data is held, saved, loaded, and managed.
""" """
def make_labels(self, dataframe: DataFrame) -> DataFrame: def make_labels(self, dataframe: DataFrame) -> DataFrame:
@ -19,15 +21,20 @@ class ExamplePredictionModel(IFreqaiModel):
:dataframe: the full dataframe for the present training period :dataframe: the full dataframe for the present training period
""" """
dataframe['s'] = (dataframe['close'].shift(-self.feature_parameters['period']).rolling( dataframe["s"] = (
self.feature_parameters['period']).max() / dataframe['close'] - 1) dataframe["close"]
self.dh.data['s_mean'] = dataframe['s'].mean() .shift(-self.feature_parameters["period"])
self.dh.data['s_std'] = dataframe['s'].std() .rolling(self.feature_parameters["period"])
.max()
/ dataframe["close"]
- 1
)
self.dh.data["s_mean"] = dataframe["s"].mean()
self.dh.data["s_std"] = dataframe["s"].std()
print('label mean',self.dh.data['s_mean'],'label std',self.dh.data['s_std']) print("label mean", self.dh.data["s_mean"], "label std", self.dh.data["s_std"])
return dataframe['s']
return dataframe["s"]
def train(self, unfiltered_dataframe: DataFrame, metadata: dict) -> Tuple[DataFrame, DataFrame]: def train(self, unfiltered_dataframe: DataFrame, metadata: dict) -> Tuple[DataFrame, DataFrame]:
""" """
@ -35,7 +42,7 @@ class ExamplePredictionModel(IFreqaiModel):
for storing, saving, loading, and managed. for storing, saving, loading, and managed.
:params: :params:
:unfiltered_dataframe: Full dataframe for the current training period :unfiltered_dataframe: Full dataframe for the current training period
:metadata: pair metadata from strategy. :metadata: pair metadata from strategy.
:returns: :returns:
:model: Trained model which can be used to inference (self.predict) :model: Trained model which can be used to inference (self.predict)
""" """
@ -46,8 +53,12 @@ class ExamplePredictionModel(IFreqaiModel):
unfiltered_labels = self.make_labels(unfiltered_dataframe) unfiltered_labels = self.make_labels(unfiltered_dataframe)
# filter the features requested by user in the configuration file and elegantly handle NaNs # filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = self.dh.filter_features(unfiltered_dataframe, features_filtered, labels_filtered = self.dh.filter_features(
self.dh.training_features_list, unfiltered_labels, training_filter=True) unfiltered_dataframe,
self.dh.training_features_list,
unfiltered_labels,
training_filter=True,
)
# split data into train/test data. # split data into train/test data.
data_dictionary = self.dh.make_train_test_datasets(features_filtered, labels_filtered) data_dictionary = self.dh.make_train_test_datasets(features_filtered, labels_filtered)
@ -55,46 +66,47 @@ class ExamplePredictionModel(IFreqaiModel):
data_dictionary = self.dh.standardize_data(data_dictionary) data_dictionary = self.dh.standardize_data(data_dictionary)
# optional additional data cleaning # optional additional data cleaning
if self.feature_parameters['principal_component_analysis']: if self.feature_parameters["principal_component_analysis"]:
self.dh.principal_component_analysis() self.dh.principal_component_analysis()
if self.feature_parameters["remove_outliers"]: if self.feature_parameters["remove_outliers"]:
self.dh.remove_outliers(predict=False) self.dh.remove_outliers(predict=False)
if self.feature_parameters['DI_threshold']: if self.feature_parameters["DI_threshold"]:
self.dh.data['avg_mean_dist'] = self.dh.compute_distances() self.dh.data["avg_mean_dist"] = self.dh.compute_distances()
print("length of train data", len(data_dictionary['train_features'])) print("length of train data", len(data_dictionary["train_features"]))
model = self.fit(data_dictionary) model = self.fit(data_dictionary)
print('Finished training') print("Finished training")
print(f'--------------------done training {metadata["pair"]}--------------------') print(f'--------------------done training {metadata["pair"]}--------------------')
return model return model
def fit(self, data_dictionary: Dict) -> Any: def fit(self, data_dictionary: Dict) -> Any:
""" """
Most regressors use the same function names and arguments e.g. user Most regressors use the same function names and arguments e.g. user
can drop in LGBMRegressor in place of CatBoostRegressor and all data can drop in LGBMRegressor in place of CatBoostRegressor and all data
management will be properly handled by Freqai. management will be properly handled by Freqai.
:params: :params:
:data_dictionary: the dictionary constructed by DataHandler to hold :data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels. all the training and test data/labels.
""" """
train_data = Pool( train_data = Pool(
data=data_dictionary['train_features'], data=data_dictionary["train_features"],
label=data_dictionary['train_labels'], label=data_dictionary["train_labels"],
weight=data_dictionary['train_weights'] weight=data_dictionary["train_weights"],
) )
test_data = Pool( test_data = Pool(
data=data_dictionary['test_features'], data=data_dictionary["test_features"],
label=data_dictionary['test_labels'], label=data_dictionary["test_labels"],
weight=data_dictionary['test_weights'] weight=data_dictionary["test_weights"],
) )
model = CatBoostRegressor(verbose=100, early_stopping_rounds=400, model = CatBoostRegressor(
**self.model_training_parameters) verbose=100, early_stopping_rounds=400, **self.model_training_parameters
)
model.fit(X=train_data, eval_set=test_data) model.fit(X=train_data, eval_set=test_data)
return model return model
@ -103,7 +115,7 @@ class ExamplePredictionModel(IFreqaiModel):
""" """
Filter the prediction features data and predict with it. Filter the prediction features data and predict with it.
:param: unfiltered_dataframe: Full dataframe for the current backtest period. :param: unfiltered_dataframe: Full dataframe for the current backtest period.
:return: :return:
:predictions: np.array of predictions :predictions: np.array of predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index) data (NaNs) or felt uncertain about data (PCA and DI index)
@ -112,27 +124,31 @@ class ExamplePredictionModel(IFreqaiModel):
print("--------------------Starting prediction--------------------") print("--------------------Starting prediction--------------------")
original_feature_list = self.dh.build_feature_list(self.config) original_feature_list = self.dh.build_feature_list(self.config)
filtered_dataframe, _ = self.dh.filter_features(unfiltered_dataframe, original_feature_list, training_filter=False) filtered_dataframe, _ = self.dh.filter_features(
unfiltered_dataframe, original_feature_list, training_filter=False
)
filtered_dataframe = self.dh.standardize_data_from_metadata(filtered_dataframe) filtered_dataframe = self.dh.standardize_data_from_metadata(filtered_dataframe)
self.dh.data_dictionary['prediction_features'] = filtered_dataframe self.dh.data_dictionary["prediction_features"] = filtered_dataframe
# optional additional data cleaning # optional additional data cleaning
if self.feature_parameters['principal_component_analysis']: if self.feature_parameters["principal_component_analysis"]:
pca_components = self.dh.pca.transform(filtered_dataframe) pca_components = self.dh.pca.transform(filtered_dataframe)
self.dh.data_dictionary['prediction_features'] = pd.DataFrame(data=pca_components, self.dh.data_dictionary["prediction_features"] = pd.DataFrame(
columns = ['PC'+str(i) for i in range(0,self.dh.data['n_kept_components'])], data=pca_components,
index = filtered_dataframe.index) columns=["PC" + str(i) for i in range(0, self.dh.data["n_kept_components"])],
index=filtered_dataframe.index,
)
if self.feature_parameters["remove_outliers"]: if self.feature_parameters["remove_outliers"]:
self.dh.remove_outliers(predict=True) # creates dropped index self.dh.remove_outliers(predict=True) # creates dropped index
if self.feature_parameters['DI_threshold']: if self.feature_parameters["DI_threshold"]:
self.dh.check_if_pred_in_training_spaces() # sets do_predict self.dh.check_if_pred_in_training_spaces() # sets do_predict
predictions = self.model.predict(self.dh.data_dictionary['prediction_features']) predictions = self.model.predict(self.dh.data_dictionary["prediction_features"])
# compute the non-standardized predictions # compute the non-standardized predictions
predictions = predictions * self.dh.data['labels_std'] + self.dh.data['labels_mean'] predictions = predictions * self.dh.data["labels_std"] + self.dh.data["labels_mean"]
print("--------------------Finished prediction--------------------") print("--------------------Finished prediction--------------------")

View File

@ -1,61 +1,59 @@
import logging import logging
from functools import reduce
import numpy as np
import pandas as pd
import talib.abstract as ta import talib.abstract as ta
from pandas import DataFrame from pandas import DataFrame
import pandas as pd
from technical import qtpylib from technical import qtpylib
import numpy as np
from freqtrade.strategy import (merge_informative_pair)
from freqtrade.strategy.interface import IStrategy
from freqtrade.freqai.strategy_bridge import CustomModel from freqtrade.freqai.strategy_bridge import CustomModel
from functools import reduce from freqtrade.strategy import merge_informative_pair
from freqtrade.strategy.interface import IStrategy
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class FreqaiExampleStrategy(IStrategy): class FreqaiExampleStrategy(IStrategy):
""" """
Example strategy showing how the user connects their own Example strategy showing how the user connects their own
IFreqaiModel to the strategy. Namely, the user uses: IFreqaiModel to the strategy. Namely, the user uses:
self.model = CustomModel(self.config) self.model = CustomModel(self.config)
self.model.bridge.start(dataframe, metadata) self.model.bridge.start(dataframe, metadata)
to make predictions on their data. populate_any_indicators() automatically to make predictions on their data. populate_any_indicators() automatically
generates the variety of features indicated by the user in the generates the variety of features indicated by the user in the
canonical freqtrade configuration file under config['freqai']. canonical freqtrade configuration file under config['freqai'].
""" """
minimal_roi = { minimal_roi = {"0": 0.01, "240": -1}
"0": 0.01,
"240": -1
}
plot_config = { plot_config = {
'main_plot': { "main_plot": {},
"subplots": {
"prediction": {"prediction": {"color": "blue"}},
"target_roi": {
"target_roi": {"color": "brown"},
},
"do_predict": {
"do_predict": {"color": "brown"},
},
}, },
'subplots': {
"prediction":{
'prediction':{'color':'blue'}
},
"target_roi":{
'target_roi':{'color':'brown'},
},
"do_predict":{
'do_predict':{'color':'brown'},
},
}
} }
stoploss = -0.05 stoploss = -0.05
use_sell_signal = True use_sell_signal = True
startup_candle_count: int = 1000 startup_candle_count: int = 1000
def informative_pairs(self): def informative_pairs(self):
pairs = self.freqai_info['corr_pairlist'] pairs = self.freqai_info["corr_pairlist"]
informative_pairs = [] informative_pairs = []
for tf in self.timeframes: for tf in self.timeframes:
informative_pairs.append([(pair, tf) for pair in pairs]) informative_pairs.append([(pair, tf) for pair in pairs])
return informative_pairs return informative_pairs
def populate_any_indicators(self, pair, df, tf, informative=None,coin=''): def populate_any_indicators(self, pair, df, tf, informative=None, coin=""):
""" """
Function designed to automatically generate, name and merge features Function designed to automatically generate, name and merge features
from user indicated timeframes in the configuration file. User can add from user indicated timeframes in the configuration file. User can add
@ -70,110 +68,116 @@ class FreqaiExampleStrategy(IStrategy):
if informative is None: if informative is None:
informative = self.dp.get_pair_dataframe(pair, tf) informative = self.dp.get_pair_dataframe(pair, tf)
informative[coin+'rsi'] = ta.RSI(informative, timeperiod=14) informative[coin + "rsi"] = ta.RSI(informative, timeperiod=14)
informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25) informative[coin + "mfi"] = ta.MFI(informative, timeperiod=25)
informative[coin+'adx'] = ta.ADX(informative, window=20) informative[coin + "adx"] = ta.ADX(informative, window=20)
informative[coin+'20sma'] = ta.SMA(informative,timeperiod=20) informative[coin + "20sma"] = ta.SMA(informative, timeperiod=20)
informative[coin+'21ema'] = ta.EMA(informative,timeperiod=21) informative[coin + "21ema"] = ta.EMA(informative, timeperiod=21)
informative[coin+'bmsb'] = np.where(informative[coin+'20sma'].lt(informative[coin+'21ema']),1,0) informative[coin + "bmsb"] = np.where(
informative[coin+'close_over_20sma'] = informative['close']/informative[coin+'20sma'] informative[coin + "20sma"].lt(informative[coin + "21ema"]), 1, 0
)
informative[coin + "close_over_20sma"] = informative["close"] / informative[coin + "20sma"]
informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25) informative[coin + "mfi"] = ta.MFI(informative, timeperiod=25)
informative[coin+'ema21'] = ta.EMA(informative, timeperiod=21) informative[coin + "ema21"] = ta.EMA(informative, timeperiod=21)
informative[coin+'sma20'] = ta.SMA(informative, timeperiod=20) informative[coin + "sma20"] = ta.SMA(informative, timeperiod=20)
stoch = ta.STOCHRSI(informative, 15, 20, 2, 2) stoch = ta.STOCHRSI(informative, 15, 20, 2, 2)
informative[coin+'srsi-fk'] = stoch['fastk'] informative[coin + "srsi-fk"] = stoch["fastk"]
informative[coin+'srsi-fd'] = stoch['fastd'] informative[coin + "srsi-fd"] = stoch["fastd"]
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(informative), window=14, stds=2.2) bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(informative), window=14, stds=2.2)
informative[coin+'bb_lowerband'] = bollinger['lower'] informative[coin + "bb_lowerband"] = bollinger["lower"]
informative[coin+'bb_middleband'] = bollinger['mid'] informative[coin + "bb_middleband"] = bollinger["mid"]
informative[coin+'bb_upperband'] = bollinger['upper'] informative[coin + "bb_upperband"] = bollinger["upper"]
informative[coin+'bb_width'] = ((informative[coin+"bb_upperband"] - informative[coin+"bb_lowerband"]) / informative[coin+"bb_middleband"]) informative[coin + "bb_width"] = (
informative[coin+'close-bb_lower'] = informative['close'] / informative[coin+'bb_lowerband'] informative[coin + "bb_upperband"] - informative[coin + "bb_lowerband"]
) / informative[coin + "bb_middleband"]
informative[coin + "close-bb_lower"] = (
informative["close"] / informative[coin + "bb_lowerband"]
)
informative[coin+'roc'] = ta.ROC(informative, timeperiod=3) informative[coin + "roc"] = ta.ROC(informative, timeperiod=3)
informative[coin+'adx'] = ta.ADX(informative, window=14) informative[coin + "adx"] = ta.ADX(informative, window=14)
macd = ta.MACD(informative) macd = ta.MACD(informative)
informative[coin+'macd'] = macd['macd'] informative[coin + "macd"] = macd["macd"]
informative[coin+'pct-change'] = informative['close'].pct_change() informative[coin + "pct-change"] = informative["close"].pct_change()
informative[coin+'relative_volume'] = informative['volume'] / informative['volume'].rolling(10).mean() informative[coin + "relative_volume"] = (
informative["volume"] / informative["volume"].rolling(10).mean()
)
informative[coin+'pct-change'] = informative['close'].pct_change() informative[coin + "pct-change"] = informative["close"].pct_change()
indicators = [col for col in informative if col.startswith(coin)] indicators = [col for col in informative if col.startswith(coin)]
for n in range(self.freqai_info['feature_parameters']['shift']+1): for n in range(self.freqai_info["feature_parameters"]["shift"] + 1):
if n==0: continue if n == 0:
continue
informative_shift = informative[indicators].shift(n) informative_shift = informative[indicators].shift(n)
informative_shift = informative_shift.add_suffix('_shift-'+str(n)) informative_shift = informative_shift.add_suffix("_shift-" + str(n))
informative = pd.concat((informative,informative_shift),axis=1) informative = pd.concat((informative, informative_shift), axis=1)
df = merge_informative_pair(df, informative, self.config['timeframe'], tf, ffill=True) df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
skip_columns = [(s + '_'+tf) for s in skip_columns = [(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]]
['date', 'open', 'high', 'low', 'close', 'volume']]
df = df.drop(columns=skip_columns) df = df.drop(columns=skip_columns)
return df return df
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# the configuration file parameters are stored here # the configuration file parameters are stored here
self.freqai_info = self.config['freqai'] self.freqai_info = self.config["freqai"]
# the model is instantiated here # the model is instantiated here
self.model = CustomModel(self.config) self.model = CustomModel(self.config)
print('Populating indicators...') print("Populating indicators...")
# the following loops are necessary for building the features # the following loops are necessary for building the features
# indicated by the user in the configuration file. # indicated by the user in the configuration file.
for tf in self.freqai_info['timeframes']: for tf in self.freqai_info["timeframes"]:
dataframe = self.populate_any_indicators(metadata['pair'], dataframe = self.populate_any_indicators(metadata["pair"], dataframe.copy(), tf)
dataframe.copy(), tf) for i in self.freqai_info["corr_pairlist"]:
for i in self.freqai_info['corr_pairlist']: dataframe = self.populate_any_indicators(
dataframe = self.populate_any_indicators(i, i, dataframe.copy(), tf, coin=i.split("/")[0] + "-"
dataframe.copy(), tf, coin=i.split("/")[0]+'-') )
# the model will return 4 values, its prediction, an indication of whether or not the prediction # the model will return 4 values, its prediction, an indication of whether or not the
# should be accepted, the target mean/std values from the labels used during each training period. # prediction should be accepted, the target mean/std values from the labels used during
(dataframe['prediction'], dataframe['do_predict'], # each training period.
dataframe['target_mean'], dataframe['target_std']) = self.model.bridge.start(dataframe, metadata) (
dataframe["prediction"],
dataframe["do_predict"],
dataframe["target_mean"],
dataframe["target_std"],
) = self.model.bridge.start(dataframe, metadata)
dataframe['target_roi'] = dataframe['target_mean']+dataframe['target_std']*0.5 dataframe["target_roi"] = dataframe["target_mean"] + dataframe["target_std"] * 0.5
dataframe['sell_roi'] = dataframe['target_mean']-dataframe['target_std']*1.5 dataframe["sell_roi"] = dataframe["target_mean"] - dataframe["target_std"] * 1.5
return dataframe return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
buy_conditions = [ buy_conditions = [
(dataframe['prediction'] > dataframe['target_roi']) (dataframe["prediction"] > dataframe["target_roi"]) & (dataframe["do_predict"] == 1)
&
(dataframe['do_predict'] == 1)
] ]
if buy_conditions: if buy_conditions:
dataframe.loc[reduce(lambda x, y: x | y, buy_conditions), 'buy'] = 1 dataframe.loc[reduce(lambda x, y: x | y, buy_conditions), "buy"] = 1
return dataframe return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# sell_goal = eval('self.'+metadata['pair'].split("/")[0]+'_sell_goal.value') # sell_goal = eval('self.'+metadata['pair'].split("/")[0]+'_sell_goal.value')
sell_conditions = [ sell_conditions = [
(dataframe['prediction'] < dataframe['sell_roi']) (dataframe["prediction"] < dataframe["sell_roi"]) & (dataframe["do_predict"] == 1)
&
(dataframe['do_predict'] == 1)
] ]
if sell_conditions: if sell_conditions:
dataframe.loc[reduce(lambda x, y: x | y, sell_conditions), 'sell'] = 1 dataframe.loc[reduce(lambda x, y: x | y, sell_conditions), "sell"] = 1
return dataframe return dataframe
def get_ticker_indicator(self): def get_ticker_indicator(self):
return int(self.config['timeframe'][:-1]) return int(self.config["timeframe"][:-1])