Merge branch 'develop' into precise_calcs

This commit is contained in:
Matthias
2022-08-16 09:29:39 +02:00
64 changed files with 5823 additions and 39 deletions

View File

@@ -12,7 +12,8 @@ from freqtrade.constants import DEFAULT_CONFIG
ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search"]
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search", "freqaimodel",
"freqaimodel_path"]
ARGS_TRADE = ["db_url", "sd_notify", "dry_run", "dry_run_wallet", "fee"]

View File

@@ -647,4 +647,14 @@ AVAILABLE_CLI_OPTIONS = {
nargs='+',
default=[],
),
"freqaimodel": Arg(
'--freqaimodel',
help='Specify a custom freqaimodels.',
metavar='NAME',
),
"freqaimodel_path": Arg(
'--freqaimodel-path',
help='Specify additional lookup path for freqaimodels.',
metavar='PATH',
),
}

View File

@@ -12,7 +12,7 @@ from freqtrade.enums import CandleType, RunMode, TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.exchange.exchange import market_is_active
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, expand_pairlist
from freqtrade.resolvers import ExchangeResolver
@@ -50,7 +50,8 @@ def start_download_data(args: Dict[str, Any]) -> None:
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
or config.get('include_inactive')]
expanded_pairs = expand_pairlist(config['pairs'], markets)
expanded_pairs = dynamic_expand_pairlist(config, markets)
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):

View File

@@ -97,6 +97,8 @@ class Configuration:
self._process_analyze_options(config)
self._process_freqai_options(config)
# Check if the exchange set by the user is supported
check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))
@@ -461,6 +463,16 @@ class Configuration:
config.update({'runmode': self.runmode})
def _process_freqai_options(self, config: Dict[str, Any]) -> None:
self._args_to_config(config, argname='freqaimodel',
logstring='Using freqaimodel class name: {}')
self._args_to_config(config, argname='freqaimodel_path',
logstring='Using freqaimodel path: {}')
return
def _args_to_config(self, config: Dict[str, Any], argname: str,
logstring: str, logfun: Optional[Callable] = None,
deprecated_msg: Optional[str] = None) -> None:

View File

@@ -55,6 +55,7 @@ FTHYPT_FILEVERSION = 'fthypt_fileversion'
USERPATH_HYPEROPTS = 'hyperopts'
USERPATH_STRATEGIES = 'strategies'
USERPATH_NOTEBOOKS = 'notebooks'
USERPATH_FREQAIMODELS = 'freqaimodels'
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
@@ -240,6 +241,7 @@ CONF_SCHEMA = {
},
'exchange': {'$ref': '#/definitions/exchange'},
'edge': {'$ref': '#/definitions/edge'},
'freqai': {'$ref': '#/definitions/freqai'},
'experimental': {
'type': 'object',
'properties': {
@@ -480,7 +482,60 @@ CONF_SCHEMA = {
'remove_pumps': {'type': 'boolean'}
},
'required': ['process_throttle_secs', 'allowed_risk']
}
},
"freqai": {
"type": "object",
"properties": {
"enabled": {"type": "boolean", "default": False},
"keras": {"type": "boolean", "default": False},
"conv_width": {"type": "integer", "default": 2},
"train_period_days": {"type": "integer", "default": 0},
"backtest_period_days": {"type": "number", "default": 7},
"identifier": {"type": "string", "default": "example"},
"feature_parameters": {
"type": "object",
"properties": {
"include_corr_pairlist": {"type": "array"},
"include_timeframes": {"type": "array"},
"label_period_candles": {"type": "integer"},
"include_shifted_candles": {"type": "integer", "default": 0},
"DI_threshold": {"type": "number", "default": 0},
"weight_factor": {"type": "number", "default": 0},
"principal_component_analysis": {"type": "boolean", "default": False},
"use_SVM_to_remove_outliers": {"type": "boolean", "default": False},
"svm_params": {"type": "object",
"properties": {
"shuffle": {"type": "boolean", "default": False},
"nu": {"type": "number", "default": 0.1}
},
}
},
"required": ["include_timeframes", "include_corr_pairlist", ]
},
"data_split_parameters": {
"type": "object",
"properties": {
"test_size": {"type": "number"},
"random_state": {"type": "integer"},
},
},
"model_training_parameters": {
"type": "object",
"properties": {
"n_estimators": {"type": "integer", "default": 1000}
},
},
},
"required": [
"enabled",
"train_period_days",
"backtest_period_days",
"identifier",
"feature_parameters",
"data_split_parameters",
"model_training_parameters"
]
},
},
}

View File

@@ -1009,7 +1009,8 @@ class Exchange:
time_in_force: str = 'gtc',
) -> Dict:
if self._config['dry_run']:
dry_order = self.create_dry_run_order(pair, ordertype, side, amount, rate, leverage)
dry_order = self.create_dry_run_order(
pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage)
return dry_order
params = self._get_params(side, ordertype, leverage, reduceOnly, time_in_force)

View File

View File

@@ -0,0 +1,609 @@
import collections
import json
import logging
import re
import shutil
import threading
from pathlib import Path
from typing import Any, Dict, Tuple, TypedDict
import numpy as np
import pandas as pd
import rapidjson
from joblib import dump, load
from joblib.externals import cloudpickle
from numpy.typing import NDArray
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.data.history import load_pair_history
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.strategy.interface import IStrategy
logger = logging.getLogger(__name__)
class pair_info(TypedDict):
model_filename: str
first: bool
trained_timestamp: int
priority: int
data_path: str
extras: dict
class FreqaiDataDrawer:
"""
Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving
/loading to/from disk.
This object remains persistent throughout live/dry.
Record of contribution:
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
project.
Conception and software development:
Robert Caulk @robcaulk
Theoretical brainstorming:
Elin Törnquist @th0rntwig
Code review, software architecture brainstorming:
@xmatthias
Beta testing and bug reporting:
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
"""
def __init__(self, full_path: Path, config: dict, follow_mode: bool = False):
self.config = config
self.freqai_info = config.get("freqai", {})
# dictionary holding all pair metadata necessary to load in from disk
self.pair_dict: Dict[str, pair_info] = {}
# dictionary holding all actively inferenced models in memory given a model filename
self.model_dictionary: Dict[str, Any] = {}
self.model_return_values: Dict[str, DataFrame] = {}
self.historic_data: Dict[str, Dict[str, DataFrame]] = {}
self.historic_predictions: Dict[str, DataFrame] = {}
self.follower_dict: Dict[str, pair_info] = {}
self.full_path = full_path
self.follower_name: str = self.config.get("bot_name", "follower1")
self.follower_dict_path = Path(
self.full_path / f"follower_dictionary-{self.follower_name}.json"
)
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
self.follow_mode = follow_mode
if follow_mode:
self.create_follower_dict()
self.load_drawer_from_disk()
self.load_historic_predictions_from_disk()
self.training_queue: Dict[str, int] = {}
self.history_lock = threading.Lock()
self.save_lock = threading.Lock()
self.pair_dict_lock = threading.Lock()
self.old_DBSCAN_eps: Dict[str, float] = {}
self.empty_pair_dict: pair_info = {
"model_filename": "", "trained_timestamp": 0,
"priority": 1, "first": True, "data_path": "", "extras": {}}
def load_drawer_from_disk(self):
"""
Locate and load a previously saved data drawer full of all pair model metadata in
present model folder.
:return: bool - whether or not the drawer was located
"""
exists = self.pair_dictionary_path.is_file()
if exists:
with open(self.pair_dictionary_path, "r") as fp:
self.pair_dict = json.load(fp)
elif not self.follow_mode:
logger.info("Could not find existing datadrawer, starting from scratch")
else:
logger.warning(
f"Follower could not find pair_dictionary at {self.full_path} "
"sending null values back to strategy"
)
return exists
def load_historic_predictions_from_disk(self):
"""
Locate and load a previously saved historic predictions.
:return: bool - whether or not the drawer was located
"""
exists = self.historic_predictions_path.is_file()
if exists:
with open(self.historic_predictions_path, "rb") as fp:
self.historic_predictions = cloudpickle.load(fp)
logger.info(
f"Found existing historic predictions at {self.full_path}, but beware "
"that statistics may be inaccurate if the bot has been offline for "
"an extended period of time."
)
elif not self.follow_mode:
logger.info("Could not find existing historic_predictions, starting from scratch")
else:
logger.warning(
f"Follower could not find historic predictions at {self.full_path} "
"sending null values back to strategy"
)
return exists
def save_historic_predictions_to_disk(self):
"""
Save data drawer full of all pair model metadata in present model folder.
"""
with open(self.historic_predictions_path, "wb") as fp:
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
def save_drawer_to_disk(self):
"""
Save data drawer full of all pair model metadata in present model folder.
"""
with self.save_lock:
with open(self.pair_dictionary_path, 'w') as fp:
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
number_mode=rapidjson.NM_NATIVE)
def save_follower_dict_to_disk(self):
"""
Save follower dictionary to disk (used by strategy for persistent prediction targets)
"""
with open(self.follower_dict_path, "w") as fp:
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
number_mode=rapidjson.NM_NATIVE)
def create_follower_dict(self):
"""
Create or dictionary for each follower to maintain unique persistent prediction targets
"""
whitelist_pairs = self.config.get("exchange", {}).get("pair_whitelist")
exists = self.follower_dict_path.is_file()
if exists:
logger.info("Found an existing follower dictionary")
for pair in whitelist_pairs:
self.follower_dict[pair] = {}
self.save_follower_dict_to_disk()
def np_encoder(self, object):
if isinstance(object, np.generic):
return object.item()
def get_pair_dict_info(self, pair: str) -> Tuple[str, int, bool]:
"""
Locate and load existing model metadata from persistent storage. If not located,
create a new one and append the current pair to it and prepare it for its first
training
:param pair: str: pair to lookup
:return:
model_filename: str = unique filename used for loading persistent objects from disk
trained_timestamp: int = the last time the coin was trained
return_null_array: bool = Follower could not find pair metadata
"""
pair_dict = self.pair_dict.get(pair)
data_path_set = self.pair_dict.get(pair, self.empty_pair_dict).get("data_path", "")
return_null_array = False
if pair_dict:
model_filename = pair_dict["model_filename"]
trained_timestamp = pair_dict["trained_timestamp"]
elif not self.follow_mode:
self.pair_dict[pair] = self.empty_pair_dict.copy()
model_filename = ""
trained_timestamp = 0
self.pair_dict[pair]["priority"] = len(self.pair_dict)
if not data_path_set and self.follow_mode:
logger.warning(
f"Follower could not find current pair {pair} in "
f"pair_dictionary at path {self.full_path}, sending null values "
"back to strategy."
)
trained_timestamp = 0
model_filename = ''
return_null_array = True
return model_filename, trained_timestamp, return_null_array
def set_pair_dict_info(self, metadata: dict) -> None:
pair_in_dict = self.pair_dict.get(metadata["pair"])
if pair_in_dict:
return
else:
self.pair_dict[metadata["pair"]] = self.empty_pair_dict.copy()
self.pair_dict[metadata["pair"]]["priority"] = len(self.pair_dict)
return
def pair_to_end_of_training_queue(self, pair: str) -> None:
# march all pairs up in the queue
with self.pair_dict_lock:
for p in self.pair_dict:
self.pair_dict[p]["priority"] -= 1
# send pair to end of queue
self.pair_dict[pair]["priority"] = len(self.pair_dict)
def set_initial_return_values(self, pair: str, pred_df: DataFrame) -> None:
"""
Set the initial return values to the historical predictions dataframe. This avoids needing
to repredict on historical candles, and also stores historical predictions despite
retrainings (so stored predictions are true predictions, not just inferencing on trained
data)
"""
hist_df = self.historic_predictions
len_diff = len(hist_df[pair].index) - len(pred_df.index)
if len_diff < 0:
df_concat = pd.concat([pred_df.iloc[:abs(len_diff)], hist_df[pair]],
ignore_index=True, keys=hist_df[pair].keys())
else:
df_concat = hist_df[pair].tail(len(pred_df.index)).reset_index(drop=True)
df_concat = df_concat.fillna(0)
self.model_return_values[pair] = df_concat
def append_model_predictions(self, pair: str, predictions: DataFrame,
do_preds: NDArray[np.int_],
dk: FreqaiDataKitchen, len_df: int) -> None:
"""
Append model predictions to historic predictions dataframe, then set the
strategy return dataframe to the tail of the historic predictions. The length of
the tail is equivalent to the length of the dataframe that entered FreqAI from
the strategy originally. Doing this allows FreqUI to always display the correct
historic predictions.
"""
index = self.historic_predictions[pair].index[-1:]
columns = self.historic_predictions[pair].columns
nan_df = pd.DataFrame(np.nan, index=index, columns=columns)
self.historic_predictions[pair] = pd.concat(
[self.historic_predictions[pair], nan_df], ignore_index=True, axis=0)
df = self.historic_predictions[pair]
# model outputs and associated statistics
for label in predictions.columns:
df[label].iloc[-1] = predictions[label].iloc[-1]
if df[label].dtype == object:
continue
df[f"{label}_mean"].iloc[-1] = dk.data["labels_mean"][label]
df[f"{label}_std"].iloc[-1] = dk.data["labels_std"][label]
# outlier indicators
df["do_predict"].iloc[-1] = do_preds[-1]
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
df["DI_values"].iloc[-1] = dk.DI_values[-1]
# extra values the user added within custom prediction model
if dk.data['extra_returns_per_train']:
rets = dk.data['extra_returns_per_train']
for return_str in rets:
df[return_str].iloc[-1] = rets[return_str]
self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True)
def attach_return_values_to_return_dataframe(
self, pair: str, dataframe: DataFrame) -> DataFrame:
"""
Attach the return values to the strat dataframe
:param dataframe: DataFrame = strategy dataframe
:return: DataFrame = strat dataframe with return values attached
"""
df = self.model_return_values[pair]
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
dataframe = pd.concat([dataframe[to_keep], df], axis=1)
return dataframe
def return_null_values_to_strategy(self, dataframe: DataFrame, dk: FreqaiDataKitchen) -> None:
"""
Build 0 filled dataframe to return to strategy
"""
dk.find_features(dataframe)
full_labels = dk.label_list + dk.unique_class_list
for label in full_labels:
dataframe[label] = 0
dataframe[f"{label}_mean"] = 0
dataframe[f"{label}_std"] = 0
dataframe["do_predict"] = 0
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
dataframe["DI_values"] = 0
if dk.data['extra_returns_per_train']:
rets = dk.data['extra_returns_per_train']
for return_str in rets:
dataframe[return_str] = 0
dk.return_dataframe = dataframe
def purge_old_models(self) -> None:
model_folders = [x for x in self.full_path.iterdir() if x.is_dir()]
pattern = re.compile(r"sub-train-(\w+)_(\d{10})")
delete_dict: Dict[str, Any] = {}
for dir in model_folders:
result = pattern.match(str(dir.name))
if result is None:
break
coin = result.group(1)
timestamp = result.group(2)
if coin not in delete_dict:
delete_dict[coin] = {}
delete_dict[coin]["num_folders"] = 1
delete_dict[coin]["timestamps"] = {int(timestamp): dir}
else:
delete_dict[coin]["num_folders"] += 1
delete_dict[coin]["timestamps"][int(timestamp)] = dir
for coin in delete_dict:
if delete_dict[coin]["num_folders"] > 2:
sorted_dict = collections.OrderedDict(
sorted(delete_dict[coin]["timestamps"].items())
)
num_delete = len(sorted_dict) - 2
deleted = 0
for k, v in sorted_dict.items():
if deleted >= num_delete:
break
logger.info(f"Freqai purging old model file {v}")
shutil.rmtree(v)
deleted += 1
def update_follower_metadata(self):
# follower needs to load from disk to get any changes made by leader to pair_dict
self.load_drawer_from_disk()
if self.config.get("freqai", {}).get("purge_old_models", False):
self.purge_old_models()
# Functions pulled back from FreqaiDataKitchen because they relied on DataDrawer
def save_data(self, model: Any, coin: str, dk: FreqaiDataKitchen) -> None:
"""
Saves all data associated with a model for a single sub-train time range
:params:
:model: User trained model which can be reused for inferencing to generate
predictions
"""
if not dk.data_path.is_dir():
dk.data_path.mkdir(parents=True, exist_ok=True)
save_path = Path(dk.data_path)
# Save the trained model
if not dk.keras:
dump(model, save_path / f"{dk.model_filename}_model.joblib")
else:
model.save(save_path / f"{dk.model_filename}_model.h5")
if dk.svm_model is not None:
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
dk.data["data_path"] = str(dk.data_path)
dk.data["model_filename"] = str(dk.model_filename)
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
dk.data["label_list"] = dk.label_list
# store the metadata
with open(save_path / f"{dk.model_filename}_metadata.json", "w") as fp:
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
# save the train data to file so we can check preds for area of applicability later
dk.data_dictionary["train_features"].to_pickle(
save_path / f"{dk.model_filename}_trained_df.pkl"
)
dk.data_dictionary["train_dates"].to_pickle(
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
)
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
cloudpickle.dump(
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
)
# if self.live:
self.model_dictionary[dk.model_filename] = model
self.pair_dict[coin]["model_filename"] = dk.model_filename
self.pair_dict[coin]["data_path"] = str(dk.data_path)
self.save_drawer_to_disk()
return
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any:
"""
loads all data required to make a prediction on a sub-train time range
:returns:
:model: User trained model which can be inferenced for new predictions
"""
if not self.pair_dict[coin]["model_filename"]:
return None
if dk.live:
dk.model_filename = self.pair_dict[coin]["model_filename"]
dk.data_path = Path(self.pair_dict[coin]["data_path"])
if self.freqai_info.get("follow_mode", False):
# follower can be on a different system which is rsynced from the leader:
dk.data_path = Path(
self.config["user_data_dir"]
/ "models"
/ dk.data_path.parts[-2]
/ dk.data_path.parts[-1]
)
with open(dk.data_path / f"{dk.model_filename}_metadata.json", "r") as fp:
dk.data = json.load(fp)
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
dk.data_dictionary["train_features"] = pd.read_pickle(
dk.data_path / f"{dk.model_filename}_trained_df.pkl"
)
# try to access model in memory instead of loading object from disk to save time
if dk.live and dk.model_filename in self.model_dictionary:
model = self.model_dictionary[dk.model_filename]
elif not dk.keras:
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
else:
from tensorflow import keras
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
if not model:
raise OperationalException(
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
)
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
dk.pca = cloudpickle.load(
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
)
return model
def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None:
"""
Append new candles to our stores historic data (in memory) so that
we do not need to load candle history from disk and we dont need to
pinging exchange multiple times for the same candle.
:params:
dataframe: DataFrame = strategy provided dataframe
"""
feat_params = self.freqai_info["feature_parameters"]
with self.history_lock:
history_data = self.historic_data
for pair in dk.all_pairs:
for tf in feat_params.get("include_timeframes"):
# check if newest candle is already appended
df_dp = strategy.dp.get_pair_dataframe(pair, tf)
if len(df_dp.index) == 0:
continue
if str(history_data[pair][tf].iloc[-1]["date"]) == str(
df_dp.iloc[-1:]["date"].iloc[-1]
):
continue
try:
index = (
df_dp.loc[
df_dp["date"] == history_data[pair][tf].iloc[-1]["date"]
].index[0]
+ 1
)
except IndexError:
logger.warning(
f"Unable to update pair history for {pair}. "
"If this does not resolve itself after 1 additional candle, "
"please report the error to #freqai discord channel"
)
return
history_data[pair][tf] = pd.concat(
[
history_data[pair][tf],
df_dp.iloc[index:],
],
ignore_index=True,
axis=0,
)
def load_all_pair_histories(self, timerange: TimeRange, dk: FreqaiDataKitchen) -> None:
"""
Load pair histories for all whitelist and corr_pairlist pairs.
Only called once upon startup of bot.
:params:
timerange: TimeRange = full timerange required to populate all indicators
for training according to user defined train_period_days
"""
history_data = self.historic_data
for pair in dk.all_pairs:
if pair not in history_data:
history_data[pair] = {}
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
history_data[pair][tf] = load_pair_history(
datadir=self.config["datadir"],
timeframe=tf,
pair=pair,
timerange=timerange,
data_format=self.config.get("dataformat_ohlcv", "json"),
candle_type=self.config.get("trading_mode", "spot"),
)
def get_base_and_corr_dataframes(
self, timerange: TimeRange, pair: str, dk: FreqaiDataKitchen
) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
"""
Searches through our historic_data in memory and returns the dataframes relevant
to the present pair.
:params:
timerange: TimeRange = full timerange required to populate all indicators
for training according to user defined train_period_days
metadata: dict = strategy furnished pair metadata
"""
with self.history_lock:
corr_dataframes: Dict[Any, Any] = {}
base_dataframes: Dict[Any, Any] = {}
historic_data = self.historic_data
pairs = self.freqai_info["feature_parameters"].get(
"include_corr_pairlist", []
)
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
base_dataframes[tf] = dk.slice_dataframe(timerange, historic_data[pair][tf])
if pairs:
for p in pairs:
if pair in p:
continue # dont repeat anything from whitelist
if p not in corr_dataframes:
corr_dataframes[p] = {}
corr_dataframes[p][tf] = dk.slice_dataframe(
timerange, historic_data[p][tf]
)
return corr_dataframes, base_dataframes
# to be used if we want to send predictions directly to the follower instead of forcing
# follower to load models and inference
# def save_model_return_values_to_disk(self) -> None:
# with open(self.full_path / str('model_return_values.json'), "w") as fp:
# json.dump(self.model_return_values, fp, default=self.np_encoder)
# def load_model_return_values_from_disk(self, dk: FreqaiDataKitchen) -> FreqaiDataKitchen:
# exists = Path(self.full_path / str('model_return_values.json')).resolve().exists()
# if exists:
# with open(self.full_path / str('model_return_values.json'), "r") as fp:
# self.model_return_values = json.load(fp)
# elif not self.follow_mode:
# logger.info("Could not find existing datadrawer, starting from scratch")
# else:
# logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '
# 'sending null values back to strategy')
# return exists, dk

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,653 @@
# import contextlib
import datetime
import logging
import shutil
import threading
import time
from abc import ABC, abstractmethod
from pathlib import Path
from threading import Lock
from typing import Any, Dict, Tuple
import numpy as np
import pandas as pd
from numpy.typing import NDArray
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.strategy.interface import IStrategy
pd.options.mode.chained_assignment = None
logger = logging.getLogger(__name__)
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
class IFreqaiModel(ABC):
"""
Class containing all tools for training and prediction in the strategy.
Base*PredictionModels inherit from this class.
Record of contribution:
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
project.
Conception and software development:
Robert Caulk @robcaulk
Theoretical brainstorming:
Elin Törnquist @th0rntwig
Code review, software architecture brainstorming:
@xmatthias
Beta testing and bug reporting:
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
"""
def __init__(self, config: Dict[str, Any]) -> None:
self.config = config
self.assert_config(self.config)
self.freqai_info: Dict[str, Any] = config["freqai"]
self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get(
"data_split_parameters", {})
self.model_training_parameters: Dict[str, Any] = config.get("freqai", {}).get(
"model_training_parameters", {})
self.feature_parameters = config.get("freqai", {}).get("feature_parameters")
self.retrain = False
self.first = True
self.set_full_path()
self.follow_mode: bool = self.freqai_info.get("follow_mode", False)
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
self.identifier: str = self.freqai_info.get("identifier", "no_id_provided")
self.scanning = False
self.keras: bool = self.freqai_info.get("keras", False)
if self.keras and self.freqai_info.get("feature_parameters", {}).get("DI_threshold", 0):
self.freqai_info["feature_parameters"]["DI_threshold"] = 0
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
self.pair_it = 0
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
self.last_trade_database_summary: DataFrame = {}
self.current_trade_database_summary: DataFrame = {}
self.analysis_lock = Lock()
self.inference_time: float = 0
self.begin_time: float = 0
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
def assert_config(self, config: Dict[str, Any]) -> None:
if not config.get("freqai", {}):
raise OperationalException("No freqai parameters found in configuration file.")
def start(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> DataFrame:
"""
Entry point to the FreqaiModel from a specific pair, it will train a new model if
necessary before making the prediction.
:param dataframe: Full dataframe coming from strategy - it contains entire
backtesting timerange + additional historical data necessary to train
the model.
:param metadata: pair metadata coming from strategy.
:param strategy: Strategy to train on
"""
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
self.dd.set_pair_dict_info(metadata)
if self.live:
self.inference_timer('start')
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
dk = self.start_live(dataframe, metadata, strategy, self.dk)
# For backtesting, each pair enters and then gets trained for each window along the
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
# (backtest window, i.e. window immediately following the training window).
# FreqAI slides the window and sequentially builds the backtesting results before returning
# the concatenated results for the full backtesting period back to the strategy.
elif not self.follow_mode:
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
with self.analysis_lock:
dataframe = self.dk.use_strategy_to_populate_indicators(
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
)
dk = self.start_backtesting(dataframe, metadata, self.dk)
dataframe = dk.remove_features_from_df(dk.return_dataframe)
del dk
if self.live:
self.inference_timer('stop')
return dataframe
@threaded
def start_scanning(self, strategy: IStrategy) -> None:
"""
Function designed to constantly scan pairs for retraining on a separate thread (intracandle)
to improve model youth. This function is agnostic to data preparation/collection/storage,
it simply trains on what ever data is available in the self.dd.
:param strategy: IStrategy = The user defined strategy class
"""
while 1:
time.sleep(1)
for pair in self.config.get("exchange", {}).get("pair_whitelist"):
(_, trained_timestamp, _) = self.dd.get_pair_dict_info(pair)
if self.dd.pair_dict[pair]["priority"] != 1:
continue
dk = FreqaiDataKitchen(self.config, self.live, pair)
dk.set_paths(pair, trained_timestamp)
(
retrain,
new_trained_timerange,
data_load_timerange,
) = dk.check_if_new_training_required(trained_timestamp)
dk.set_paths(pair, new_trained_timerange.stopts)
if retrain:
self.train_model_in_series(
new_trained_timerange, pair, strategy, dk, data_load_timerange
)
self.dd.save_historic_predictions_to_disk()
def start_backtesting(
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
) -> FreqaiDataKitchen:
"""
The main broad execution for backtesting. For backtesting, each pair enters and then gets
trained for each window along the sliding window defined by "train_period_days"
(training window) and "backtest_period_days" (backtest window, i.e. window immediately
following the training window). FreqAI slides the window and sequentially builds
the backtesting results before returning the concatenated results for the full
backtesting period back to the strategy.
:param dataframe: DataFrame = strategy passed dataframe
:param metadata: Dict = pair metadata
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
:return:
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
"""
self.pair_it += 1
train_it = 0
# Loop enforcing the sliding window training/backtesting paradigm
# tr_train is the training time range e.g. 1 historical month
# tr_backtest is the backtesting time range e.g. the week directly
# following tr_train. Both of these windows slide through the
# entire backtest
for tr_train, tr_backtest in zip(dk.training_timeranges, dk.backtesting_timeranges):
(_, _, _) = self.dd.get_pair_dict_info(metadata["pair"])
train_it += 1
total_trains = len(dk.backtesting_timeranges)
self.training_timerange = tr_train
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
trained_timestamp = tr_train
tr_train_startts_str = datetime.datetime.utcfromtimestamp(tr_train.startts).strftime(
"%Y-%m-%d %H:%M:%S"
)
tr_train_stopts_str = datetime.datetime.utcfromtimestamp(tr_train.stopts).strftime(
"%Y-%m-%d %H:%M:%S"
)
logger.info(
f"Training {metadata['pair']}, {self.pair_it}/{self.total_pairs} pairs"
f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} "
"trains"
)
dk.data_path = Path(
dk.full_path
/
f"sub-train-{metadata['pair'].split('/')[0]}_{int(trained_timestamp.stopts)}"
)
if not self.model_exists(
metadata["pair"], dk, trained_timestamp=int(trained_timestamp.stopts)
):
dk.find_features(dataframe_train)
self.model = self.train(dataframe_train, metadata["pair"], dk)
self.dd.pair_dict[metadata["pair"]]["trained_timestamp"] = int(
trained_timestamp.stopts)
dk.set_new_model_names(metadata["pair"], trained_timestamp)
self.dd.save_data(self.model, metadata["pair"], dk)
else:
self.model = self.dd.load_data(metadata["pair"], dk)
self.check_if_feature_list_matches_strategy(dataframe_train, dk)
pred_df, do_preds = self.predict(dataframe_backtest, dk)
dk.append_predictions(pred_df, do_preds)
dk.fill_predictions(dataframe)
return dk
def start_live(
self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen
) -> FreqaiDataKitchen:
"""
The main broad execution for dry/live. This function will check if a retraining should be
performed, and if so, retrain and reset the model.
:param dataframe: DataFrame = strategy passed dataframe
:param metadata: Dict = pair metadata
:param strategy: IStrategy = currently employed strategy
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
:returns:
dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
"""
# update follower
if self.follow_mode:
self.dd.update_follower_metadata()
# get the model metadata associated with the current pair
(_, trained_timestamp, return_null_array) = self.dd.get_pair_dict_info(metadata["pair"])
# if the metadata doesn't exist, the follower returns null arrays to strategy
if self.follow_mode and return_null_array:
logger.info("Returning null array from follower to strategy")
self.dd.return_null_values_to_strategy(dataframe, dk)
return dk
# append the historic data once per round
if self.dd.historic_data:
self.dd.update_historic_data(strategy, dk)
logger.debug(f'Updating historic data on pair {metadata["pair"]}')
if not self.follow_mode:
(_, new_trained_timerange, data_load_timerange) = dk.check_if_new_training_required(
trained_timestamp
)
dk.set_paths(metadata["pair"], new_trained_timerange.stopts)
# download candle history if it is not already in memory
if not self.dd.historic_data:
logger.info(
"Downloading all training data for all pairs in whitelist and "
"corr_pairlist, this may take a while if you do not have the "
"data saved"
)
dk.download_all_data_for_training(data_load_timerange, strategy.dp)
self.dd.load_all_pair_histories(data_load_timerange, dk)
if not self.scanning:
self.scanning = True
self.start_scanning(strategy)
elif self.follow_mode:
dk.set_paths(metadata["pair"], trained_timestamp)
logger.info(
"FreqAI instance set to follow_mode, finding existing pair "
f"using { self.identifier }"
)
# load the model and associated data into the data kitchen
self.model = self.dd.load_data(metadata["pair"], dk)
with self.analysis_lock:
dataframe = self.dk.use_strategy_to_populate_indicators(
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
)
if not self.model:
logger.warning(
f"No model ready for {metadata['pair']}, returning null values to strategy."
)
self.dd.return_null_values_to_strategy(dataframe, dk)
return dk
# ensure user is feeding the correct indicators to the model
self.check_if_feature_list_matches_strategy(dataframe, dk)
self.build_strategy_return_arrays(dataframe, dk, metadata["pair"], trained_timestamp)
return dk
def build_strategy_return_arrays(
self, dataframe: DataFrame, dk: FreqaiDataKitchen, pair: str, trained_timestamp: int
) -> None:
# hold the historical predictions in memory so we are sending back
# correct array to strategy
if pair not in self.dd.model_return_values:
# first predictions are made on entire historical candle set coming from strategy. This
# allows FreqUI to show full return values.
pred_df, do_preds = self.predict(dataframe, dk)
if pair not in self.dd.historic_predictions:
self.set_initial_historic_predictions(pred_df, dk, pair)
self.dd.set_initial_return_values(pair, pred_df)
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
return
elif self.dk.check_if_model_expired(trained_timestamp):
pred_df = DataFrame(np.zeros((2, len(dk.label_list))), columns=dk.label_list)
do_preds = np.ones(2, dtype=np.int_) * 2
dk.DI_values = np.zeros(2)
logger.warning(
f"Model expired for {pair}, returning null values to strategy. Strategy "
"construction should take care to consider this event with "
"prediction == 0 and do_predict == 2"
)
else:
# remaining predictions are made only on the most recent candles for performance and
# historical accuracy reasons.
pred_df, do_preds = self.predict(dataframe.iloc[-self.CONV_WIDTH:], dk, first=False)
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
self.fit_live_predictions(dk, pair)
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, len(dataframe))
dk.return_dataframe = self.dd.attach_return_values_to_return_dataframe(pair, dataframe)
return
def check_if_feature_list_matches_strategy(
self, dataframe: DataFrame, dk: FreqaiDataKitchen
) -> None:
"""
Ensure user is passing the proper feature set if they are reusing an `identifier` pointing
to a folder holding existing models.
:param dataframe: DataFrame = strategy provided dataframe
:param dk: FreqaiDataKitchen = non-persistent data container/analyzer for
current coin/bot loop
"""
dk.find_features(dataframe)
if "training_features_list_raw" in dk.data:
feature_list = dk.data["training_features_list_raw"]
else:
feature_list = dk.training_features_list
if dk.training_features_list != feature_list:
raise OperationalException(
"Trying to access pretrained model with `identifier` "
"but found different features furnished by current strategy."
"Change `identifier` to train from scratch, or ensure the"
"strategy is furnishing the same features as the pretrained"
"model"
)
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
"""
Base data cleaning method for train
Any function inside this method should drop training data points from the filtered_dataframe
based on user decided logic. See FreqaiDataKitchen::use_SVM_to_remove_outliers() for an
example of how outlier data points are dropped from the dataframe used for training.
"""
if self.freqai_info["feature_parameters"].get(
"principal_component_analysis", False
):
dk.principal_component_analysis()
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=False)
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
dk.data["avg_mean_dist"] = dk.compute_distances()
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
if dk.pair in self.dd.old_DBSCAN_eps:
eps = self.dd.old_DBSCAN_eps[dk.pair]
else:
eps = None
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
def data_cleaning_predict(self, dk: FreqaiDataKitchen, dataframe: DataFrame) -> None:
"""
Base data cleaning method for predict.
These functions each modify dk.do_predict, which is a dataframe with equal length
to the number of candles coming from and returning to the strategy. Inside do_predict,
1 allows prediction and < 0 signals to the strategy that the model is not confident in
the prediction.
See FreqaiDataKitchen::remove_outliers() for an example
of how the do_predict vector is modified. do_predict is ultimately passed back to strategy
for buy signals.
"""
if self.freqai_info["feature_parameters"].get(
"principal_component_analysis", False
):
dk.pca_transform(dataframe)
if self.freqai_info["feature_parameters"].get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=True)
if self.freqai_info["feature_parameters"].get("DI_threshold", 0):
dk.check_if_pred_in_training_spaces()
if self.freqai_info["feature_parameters"].get("use_DBSCAN_to_remove_outliers", False):
dk.use_DBSCAN_to_remove_outliers(predict=True)
def model_exists(
self,
pair: str,
dk: FreqaiDataKitchen,
trained_timestamp: int = None,
model_filename: str = "",
scanning: bool = False,
) -> bool:
"""
Given a pair and path, check if a model already exists
:param pair: pair e.g. BTC/USD
:param path: path to model
:return:
:boolean: whether the model file exists or not.
"""
coin, _ = pair.split("/")
if not self.live:
dk.model_filename = model_filename = f"cb_{coin.lower()}_{trained_timestamp}"
path_to_modelfile = Path(dk.data_path / f"{model_filename}_model.joblib")
file_exists = path_to_modelfile.is_file()
if file_exists and not scanning:
logger.info("Found model at %s", dk.data_path / dk.model_filename)
elif not scanning:
logger.info("Could not find model at %s", dk.data_path / dk.model_filename)
return file_exists
def set_full_path(self) -> None:
self.full_path = Path(
self.config["user_data_dir"] / "models" / f"{self.freqai_info['identifier']}"
)
self.full_path.mkdir(parents=True, exist_ok=True)
shutil.copy(
self.config["config_files"][0],
Path(self.full_path, Path(self.config["config_files"][0]).name),
)
def train_model_in_series(
self,
new_trained_timerange: TimeRange,
pair: str,
strategy: IStrategy,
dk: FreqaiDataKitchen,
data_load_timerange: TimeRange,
):
"""
Retrieve data and train model in single threaded mode (only used if model directory is empty
upon startup for dry/live )
:param new_trained_timerange: TimeRange = the timerange to train the model on
:param metadata: dict = strategy provided metadata
:param strategy: IStrategy = user defined strategy object
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
:param data_load_timerange: TimeRange = the amount of data to be loaded
for populate_any_indicators
(larger than new_trained_timerange so that
new_trained_timerange does not contain any NaNs)
"""
corr_dataframes, base_dataframes = self.dd.get_base_and_corr_dataframes(
data_load_timerange, pair, dk
)
with self.analysis_lock:
unfiltered_dataframe = dk.use_strategy_to_populate_indicators(
strategy, corr_dataframes, base_dataframes, pair
)
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
# find the features indicated by strategy and store in datakitchen
dk.find_features(unfiltered_dataframe)
model = self.train(unfiltered_dataframe, pair, dk)
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
dk.set_new_model_names(pair, new_trained_timerange)
self.dd.pair_dict[pair]["first"] = False
if self.dd.pair_dict[pair]["priority"] == 1 and self.scanning:
self.dd.pair_to_end_of_training_queue(pair)
self.dd.save_data(model, pair, dk)
if self.freqai_info.get("purge_old_models", False):
self.dd.purge_old_models()
def set_initial_historic_predictions(
self, pred_df: DataFrame, dk: FreqaiDataKitchen, pair: str
) -> None:
"""
This function is called only if the datadrawer failed to load an
existing set of historic predictions. In this case, it builds
the structure and sets fake predictions off the first training
data. After that, FreqAI will append new real predictions to the
set of historic predictions.
These values are used to generate live statistics which can be used
in the strategy for adaptive values. E.g. &*_mean/std are quantities
that can computed based on live predictions from the set of historical
predictions. Those values can be used in the user strategy to better
assess prediction rarity, and thus wait for probabilistically favorable
entries relative to the live historical predictions.
If the user reuses an identifier on a subsequent instance,
this function will not be called. In that case, "real" predictions
will be appended to the loaded set of historic predictions.
:param: df: DataFrame = the dataframe containing the training feature data
:param: model: Any = A model which was `fit` using a common library such as
catboost or lightgbm
:param: dk: FreqaiDataKitchen = object containing methods for data analysis
:param: pair: str = current pair
"""
self.dd.historic_predictions[pair] = pred_df
hist_preds_df = self.dd.historic_predictions[pair]
for label in hist_preds_df.columns:
if hist_preds_df[label].dtype == object:
continue
hist_preds_df[f'{label}_mean'] = 0
hist_preds_df[f'{label}_std'] = 0
hist_preds_df['do_predict'] = 0
if self.freqai_info['feature_parameters'].get('DI_threshold', 0) > 0:
hist_preds_df['DI_values'] = 0
for return_str in dk.data['extra_returns_per_train']:
hist_preds_df[return_str] = 0
# # for keras type models, the conv_window needs to be prepended so
# # viewing is correct in frequi
if self.freqai_info.get('keras', False):
n_lost_points = self.freqai_info.get('conv_width', 2)
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
columns=hist_preds_df.columns)
self.dd.historic_predictions[pair] = pd.concat(
[zeros_df, hist_preds_df], axis=0, ignore_index=True)
def fit_live_predictions(self, dk: FreqaiDataKitchen, pair: str) -> None:
"""
Fit the labels with a gaussian distribution
"""
import scipy as spy
# add classes from classifier label types if used
full_labels = dk.label_list + dk.unique_class_list
num_candles = self.freqai_info.get("fit_live_predictions_candles", 100)
dk.data["labels_mean"], dk.data["labels_std"] = {}, {}
for label in full_labels:
if self.dd.historic_predictions[dk.pair][label].dtype == object:
continue
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles))
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
return
def inference_timer(self, do='start'):
"""
Timer designed to track the cumulative time spent in FreqAI for one pass through
the whitelist. This will check if the time spent is more than 1/4 the time
of a single candle, and if so, it will warn the user of degraded performance
"""
if do == 'start':
self.pair_it += 1
self.begin_time = time.time()
elif do == 'stop':
end = time.time()
self.inference_time += (end - self.begin_time)
if self.pair_it == self.total_pairs:
logger.info(
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
if self.inference_time > 0.25 * self.base_tf_seconds:
logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to'
' avoid blinding open trades and degrading performance.')
self.pair_it = 0
self.inference_time = 0
return
# Following methods which are overridden by user made prediction models.
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
@abstractmethod
def train(self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datahandler
for storing, saving, loading, and analyzing the data.
:param unfiltered_dataframe: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return: Trained model which can be used to inference (self.predict)
"""
@abstractmethod
def fit(self, data_dictionary: Dict[str, Any]) -> Any:
"""
Most regressors use the same function names and arguments e.g. user
can drop in LGBMRegressor in place of CatBoostRegressor and all data
management will be properly handled by Freqai.
:param data_dictionary: Dict = the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
return
@abstractmethod
def predict(
self, dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = True
) -> Tuple[DataFrame, NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param unfiltered_dataframe: Full dataframe for the current backtest period.
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
:param first: boolean = whether this is the first prediction or not.
:return:
:predictions: np.array of predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
"""

View File

@@ -0,0 +1,99 @@
import logging
from typing import Any, Tuple
import numpy as np
import numpy.typing as npt
import pandas as pd
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BaseClassifierModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
"""
def train(
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_dataframe: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info("-------------------- Starting training " f"{pair} --------------------")
# filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = dk.filter_features(
unfiltered_dataframe,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date}--------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
)
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
model = self.fit(data_dictionary)
logger.info(f"--------------------done training {pair}--------------------")
return model
def predict(
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
:return:
:pred_df: dataframe containing the predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index)
"""
dk.find_features(unfiltered_dataframe)
filtered_dataframe, _ = dk.filter_features(
unfiltered_dataframe, dk.training_features_list, training_filter=False
)
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
dk.data_dictionary["prediction_features"] = filtered_dataframe
self.data_cleaning_predict(dk, filtered_dataframe)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
pred_df = DataFrame(predictions, columns=dk.label_list)
predictions_prob = self.model.predict_proba(dk.data_dictionary["prediction_features"])
pred_df_prob = DataFrame(predictions_prob, columns=self.model.classes_)
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
return (pred_df, dk.do_predict)

View File

@@ -0,0 +1,96 @@
import logging
from typing import Any, Tuple
import numpy as np
import numpy.typing as npt
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BaseRegressionModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
"""
def train(
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_dataframe: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info("-------------------- Starting training " f"{pair} --------------------")
# filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = dk.filter_features(
unfiltered_dataframe,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date}--------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
)
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
model = self.fit(data_dictionary)
logger.info(f"--------------------done training {pair}--------------------")
return model
def predict(
self, unfiltered_dataframe: DataFrame, dk: FreqaiDataKitchen, first: bool = False
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
:return:
:pred_df: dataframe containing the predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index)
"""
dk.find_features(unfiltered_dataframe)
filtered_dataframe, _ = dk.filter_features(
unfiltered_dataframe, dk.training_features_list, training_filter=False
)
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
dk.data_dictionary["prediction_features"] = filtered_dataframe
# optional additional data cleaning/analysis
self.data_cleaning_predict(dk, filtered_dataframe)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
pred_df = DataFrame(predictions, columns=dk.label_list)
pred_df = dk.denormalize_labels_from_metadata(pred_df)
return (pred_df, dk.do_predict)

View File

@@ -0,0 +1,64 @@
import logging
from typing import Any
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BaseTensorFlowModel(IFreqaiModel):
"""
Base class for TensorFlow type models.
User *must* inherit from this class and set fit() and predict().
"""
def train(
self, unfiltered_dataframe: DataFrame, pair: str, dk: FreqaiDataKitchen
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_dataframe: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info("-------------------- Starting training " f"{pair} --------------------")
# filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = dk.filter_features(
unfiltered_dataframe,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
start_date = unfiltered_dataframe["date"].iloc[0].strftime("%Y-%m-%d")
end_date = unfiltered_dataframe["date"].iloc[-1].strftime("%Y-%m-%d")
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date}--------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get('fit_live_predictions', 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}' " features"
)
logger.info(f'Training model on {len(data_dictionary["train_features"])} data points')
model = self.fit(data_dictionary)
logger.info(f"--------------------done training {pair}--------------------")
return model

View File

@@ -0,0 +1,41 @@
import logging
from typing import Any, Dict
from catboost import CatBoostClassifier, Pool
from freqtrade.freqai.prediction_models.BaseClassifierModel import BaseClassifierModel
logger = logging.getLogger(__name__)
class CatboostClassifier(BaseClassifierModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
User sets up the training and test data to fit their desired model here
:params:
:data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
train_data = Pool(
data=data_dictionary["train_features"],
label=data_dictionary["train_labels"],
weight=data_dictionary["train_weights"],
)
cbr = CatBoostClassifier(
allow_writing_files=False,
loss_function='MultiClass',
**self.model_training_parameters,
)
cbr.fit(train_data)
return cbr

View File

@@ -0,0 +1,53 @@
import gc
import logging
from typing import Any, Dict
from catboost import CatBoostRegressor, Pool
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
logger = logging.getLogger(__name__)
class CatboostRegressor(BaseRegressionModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
train_data = Pool(
data=data_dictionary["train_features"],
label=data_dictionary["train_labels"],
weight=data_dictionary["train_weights"],
)
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
test_data = None
else:
test_data = Pool(
data=data_dictionary["test_features"],
label=data_dictionary["test_labels"],
weight=data_dictionary["test_weights"],
)
model = CatBoostRegressor(
allow_writing_files=False,
**self.model_training_parameters,
)
model.fit(X=train_data, eval_set=test_data)
# some evidence that catboost pools have memory leaks:
# https://github.com/catboost/catboost/issues/1835
del train_data, test_data
gc.collect()
return model

View File

@@ -0,0 +1,44 @@
import logging
from typing import Any, Dict
from catboost import CatBoostRegressor # , Pool
from sklearn.multioutput import MultiOutputRegressor
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
logger = logging.getLogger(__name__)
class CatboostRegressorMultiTarget(BaseRegressionModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
cbr = CatBoostRegressor(
allow_writing_files=False,
**self.model_training_parameters,
)
X = data_dictionary["train_features"]
y = data_dictionary["train_labels"]
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
sample_weight = data_dictionary["train_weights"]
model = MultiOutputRegressor(estimator=cbr)
model.fit(X=X, y=y, sample_weight=sample_weight) # , eval_set=eval_set)
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
train_score = model.score(X, y)
test_score = model.score(*eval_set)
logger.info(f"Train score {train_score}, Test score {test_score}")
return model

View File

@@ -0,0 +1,38 @@
import logging
from typing import Any, Dict
from lightgbm import LGBMClassifier
from freqtrade.freqai.prediction_models.BaseClassifierModel import BaseClassifierModel
logger = logging.getLogger(__name__)
class LightGBMClassifier(BaseClassifierModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
User sets up the training and test data to fit their desired model here
:params:
:data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
eval_set = None
else:
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
X = data_dictionary["train_features"]
y = data_dictionary["train_labels"]
model = LGBMClassifier(**self.model_training_parameters)
model.fit(X=X, y=y, eval_set=eval_set)
return model

View File

@@ -0,0 +1,39 @@
import logging
from typing import Any, Dict
from lightgbm import LGBMRegressor
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
logger = logging.getLogger(__name__)
class LightGBMRegressor(BaseRegressionModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
Most regressors use the same function names and arguments e.g. user
can drop in LGBMRegressor in place of CatBoostRegressor and all data
management will be properly handled by Freqai.
:param data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0:
eval_set = None
else:
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
X = data_dictionary["train_features"]
y = data_dictionary["train_labels"]
model = LGBMRegressor(**self.model_training_parameters)
model.fit(X=X, y=y, eval_set=eval_set)
return model

View File

@@ -0,0 +1,39 @@
import logging
from typing import Any, Dict
from lightgbm import LGBMRegressor
from sklearn.multioutput import MultiOutputRegressor
from freqtrade.freqai.prediction_models.BaseRegressionModel import BaseRegressionModel
logger = logging.getLogger(__name__)
class LightGBMRegressorMultiTarget(BaseRegressionModel):
"""
User created prediction model. The class needs to override three necessary
functions, predict(), train(), fit(). The class inherits ModelHandler which
has its own DataHandler where data is held, saved, loaded, and managed.
"""
def fit(self, data_dictionary: Dict) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary constructed by DataHandler to hold
all the training and test data/labels.
"""
lgb = LGBMRegressor(**self.model_training_parameters)
X = data_dictionary["train_features"]
y = data_dictionary["train_labels"]
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
sample_weight = data_dictionary["train_weights"]
model = MultiOutputRegressor(estimator=lgb)
model.fit(X=X, y=y, sample_weight=sample_weight) # , eval_set=eval_set)
train_score = model.score(X, y)
test_score = model.score(*eval_set)
logger.info(f"Train score {train_score}, Test score {test_score}")
return model

View File

@@ -1489,12 +1489,6 @@ class FreqtradeBot(LoggingMixin):
ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS, ExitType.LIQUIDATION):
exit_type = 'stoploss'
# if stoploss is on exchange and we are on dry_run mode,
# we consider the sell price stop price
if (self.config['dry_run'] and exit_type == 'stoploss'
and self.strategy.order_types['stoploss_on_exchange']):
limit = trade.stoploss_or_liquidation
# set custom_exit_price if available
proposed_limit_rate = limit
current_profit = trade.calc_profit_ratio(limit)

View File

@@ -89,6 +89,9 @@ class Backtesting:
self.dataprovider = DataProvider(self.config, self.exchange)
if self.config.get('strategy_list'):
if self.config.get('freqai', {}).get('enabled', False):
raise OperationalException(
"You can't use strategy_list and freqai at the same time.")
for strat in list(self.config['strategy_list']):
stratconf = deepcopy(self.config)
stratconf['strategy'] = strat
@@ -207,6 +210,15 @@ class Backtesting:
"""
self.progress.init_step(BacktestState.DATALOAD, 1)
if self.config.get('freqai', {}).get('enabled', False):
startup_candles = int(self.config.get('freqai', {}).get('startup_candles', 0))
if not startup_candles:
raise OperationalException('FreqAI backtesting module requires user set '
'startup_candles in config.')
self.required_startup += int(self.config.get('freqai', {}).get('startup_candles', 0))
logger.info(f'Increasing startup_candle_count for freqai to {self.required_startup}')
self.config['startup_candle_count'] = self.required_startup
data = history.load_data(
datadir=self.config['datadir'],
pairs=self.pairlists.whitelist,

View File

@@ -1,5 +1,5 @@
import re
from typing import List
from typing import Any, Dict, List
def expand_pairlist(wildcardpl: List[str], available_pairs: List[str],
@@ -40,3 +40,13 @@ def expand_pairlist(wildcardpl: List[str], available_pairs: List[str],
except re.error as err:
raise ValueError(f"Wildcard error in {pair_wc}, {err}")
return result
def dynamic_expand_pairlist(config: Dict[str, Any], markets: List[str]) -> List[str]:
expanded_pairs = expand_pairlist(config['pairs'], markets)
if config.get('freqai', {}).get('enabled', False):
corr_pairlist = config['freqai']['feature_parameters']['include_corr_pairlist']
expanded_pairs += [pair for pair in corr_pairlist
if pair not in config['pairs']]
return expanded_pairs

View File

@@ -0,0 +1,57 @@
# pragma pylint: disable=attribute-defined-outside-init
"""
This module load a custom model for freqai
"""
import logging
from pathlib import Path
from typing import Dict
from freqtrade.constants import USERPATH_FREQAIMODELS
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.resolvers import IResolver
logger = logging.getLogger(__name__)
class FreqaiModelResolver(IResolver):
"""
This class contains all the logic to load custom hyperopt loss class
"""
object_type = IFreqaiModel
object_type_str = "FreqaiModel"
user_subdir = USERPATH_FREQAIMODELS
initial_search_path = (
Path(__file__).parent.parent.joinpath("freqai/prediction_models").resolve()
)
@staticmethod
def load_freqaimodel(config: Dict) -> IFreqaiModel:
"""
Load the custom class from config parameter
:param config: configuration dictionary
"""
disallowed_models = ["BaseRegressionModel", "BaseTensorFlowModel"]
freqaimodel_name = config.get("freqaimodel")
if not freqaimodel_name:
raise OperationalException(
"No freqaimodel set. Please use `--freqaimodel` to "
"specify the FreqaiModel class to use.\n"
)
if freqaimodel_name in disallowed_models:
raise OperationalException(
f"{freqaimodel_name} is a baseclass and cannot be used directly. Please choose "
"an existing child class or inherit from this baseclass.\n"
)
freqaimodel = FreqaiModelResolver.load_object(
freqaimodel_name,
config,
kwargs={"config": config},
extra_dir=config.get("freqaimodel_path"),
)
return freqaimodel

View File

@@ -120,7 +120,8 @@ class Telegram(RPCHandler):
r'/daily$', r'/daily \d+$', r'/profit$', r'/profit \d+',
r'/stats$', r'/count$', r'/locks$', r'/balance$',
r'/stopbuy$', r'/reload_config$', r'/show_config$',
r'/logs$', r'/whitelist$', r'/blacklist$', r'/bl_delete$',
r'/logs$', r'/whitelist$', r'/whitelist(\ssorted|\sbaseonly)+$',
r'/blacklist$', r'/bl_delete$',
r'/weekly$', r'/weekly \d+$', r'/monthly$', r'/monthly \d+$',
r'/forcebuy$', r'/forcelong$', r'/forceshort$',
r'/forcesell$', r'/forceexit$',
@@ -1368,6 +1369,12 @@ class Telegram(RPCHandler):
try:
whitelist = self._rpc._rpc_whitelist()
if context.args:
if "sorted" in context.args:
whitelist['whitelist'] = sorted(whitelist['whitelist'])
if "baseonly" in context.args:
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
message += f"`{', '.join(whitelist['whitelist'])}`"
@@ -1487,7 +1494,8 @@ class Telegram(RPCHandler):
"*/fx <trade_id>|all:* `Alias to /forceexit`\n"
f"{force_enter_text if self._config.get('force_entry_enable', False) else ''}"
"*/delete <trade_id>:* `Instantly delete the given trade in the database`\n"
"*/whitelist:* `Show current whitelist` \n"
"*/whitelist [sorted] [baseonly]:* `Show current whitelist. Optionally in "
"order and/or only displaying the base currency of each pairing.`\n"
"*/blacklist [pair]:* `Show current blacklist, or adds one or more pairs "
"to the blacklist.` \n"
"*/blacklist_delete [pairs]| /bl_delete [pairs]:* "
@@ -1524,7 +1532,7 @@ class Telegram(RPCHandler):
"*/weekly <n>:* `Shows statistics per week, over the last n weeks`\n"
"*/monthly <n>:* `Shows statistics per month, over the last n months`\n"
"*/stats:* `Shows Wins / losses by Sell reason as well as "
"Avg. holding durationsfor buys and sells.`\n"
"Avg. holding durations for buys and sells.`\n"
"*/help:* `This help message`\n"
"*/version:* `Show version`"
)

View File

@@ -145,11 +145,29 @@ class IStrategy(ABC, HyperStrategyMixin):
informative_data.candle_type = config['candle_type_def']
self._ft_informative.append((informative_data, cls_method))
def load_freqAI_model(self) -> None:
if self.config.get('freqai', {}).get('enabled', False):
# Import here to avoid importing this if freqAI is disabled
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)
self.freqai_info = self.config["freqai"]
else:
# Gracious failures if freqAI is disabled but "start" is called.
class DummyClass():
def start(self, *args, **kwargs):
raise OperationalException(
'freqAI is not enabled. '
'Please enable it in your config to use this strategy.')
self.freqai = DummyClass() # type: ignore
def ft_bot_start(self, **kwargs) -> None:
"""
Strategy init - runs after dataprovider has been added.
Must call bot_start()
"""
self.load_freqAI_model()
strategy_safe_wrapper(self.bot_start)()
self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT)
@@ -557,6 +575,22 @@ class IStrategy(ABC, HyperStrategyMixin):
"""
return None
def populate_any_indicators(self, pair: str, df: DataFrame, tf: str,
informative: DataFrame = None,
set_generalized_indicators: bool = False) -> DataFrame:
"""
Function designed to automatically generate, name and merge features
from user indicated timeframes in the configuration file. User can add
additional features here, but must follow the naming convention.
This method is *only* used in FreqaiDataKitchen class and therefore
it is only called if FreqAI is active.
:param pair: pair to be used as informative
:param df: strategy dataframe which will receive merges from informatives
:param tf: timeframe of the dataframe which will modify the feature names
:param informative: the dataframe associated with the informative pair
"""
return df
###
# END - Intended to be overridden by strategy
###

View File

@@ -0,0 +1,328 @@
import logging
from functools import reduce
import pandas as pd
import talib.abstract as ta
from pandas import DataFrame
from technical import qtpylib
from freqtrade.exchange import timeframe_to_prev_date
from freqtrade.persistence import Trade
from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair
logger = logging.getLogger(__name__)
class FreqaiExampleStrategy(IStrategy):
"""
Example strategy showing how the user connects their own
IFreqaiModel to the strategy. Namely, the user uses:
self.freqai.start(dataframe, metadata)
to make predictions on their data. populate_any_indicators() automatically
generates the variety of features indicated by the user in the
canonical freqtrade configuration file under config['freqai'].
"""
minimal_roi = {"0": 0.1, "240": -1}
plot_config = {
"main_plot": {},
"subplots": {
"prediction": {"prediction": {"color": "blue"}},
"target_roi": {
"target_roi": {"color": "brown"},
},
"do_predict": {
"do_predict": {"color": "brown"},
},
},
}
process_only_new_candles = True
stoploss = -0.05
use_exit_signal = True
startup_candle_count: int = 300
can_short = False
linear_roi_offset = DecimalParameter(
0.00, 0.02, default=0.005, space="sell", optimize=False, load=True
)
max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
def informative_pairs(self):
whitelist_pairs = self.dp.current_whitelist()
corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"]
informative_pairs = []
for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]:
for pair in whitelist_pairs:
informative_pairs.append((pair, tf))
for pair in corr_pairs:
if pair in whitelist_pairs:
continue # avoid duplication
informative_pairs.append((pair, tf))
return informative_pairs
def populate_any_indicators(
self, pair, df, tf, informative=None, set_generalized_indicators=False
):
"""
Function designed to automatically generate, name and merge features
from user indicated timeframes in the configuration file. User controls the indicators
passed to the training/prediction by prepending indicators with `'%-' + coin `
(see convention below). I.e. user should not prepend any supporting metrics
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
model.
:param pair: pair to be used as informative
:param df: strategy dataframe which will receive merges from informatives
:param tf: timeframe of the dataframe which will modify the feature names
:param informative: the dataframe associated with the informative pair
"""
coin = pair.split('/')[0]
if informative is None:
informative = self.dp.get_pair_dataframe(pair, tf)
# first loop is automatically duplicating indicators for time periods
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
t = int(t)
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
bollinger = qtpylib.bollinger_bands(
qtpylib.typical_price(informative), window=t, stds=2.2
)
informative[f"{coin}bb_lowerband-period_{t}"] = bollinger["lower"]
informative[f"{coin}bb_middleband-period_{t}"] = bollinger["mid"]
informative[f"{coin}bb_upperband-period_{t}"] = bollinger["upper"]
informative[f"%-{coin}bb_width-period_{t}"] = (
informative[f"{coin}bb_upperband-period_{t}"]
- informative[f"{coin}bb_lowerband-period_{t}"]
) / informative[f"{coin}bb_middleband-period_{t}"]
informative[f"%-{coin}close-bb_lower-period_{t}"] = (
informative["close"] / informative[f"{coin}bb_lowerband-period_{t}"]
)
informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
informative[f"%-{coin}relative_volume-period_{t}"] = (
informative["volume"] / informative["volume"].rolling(t).mean()
)
informative[f"%-{coin}pct-change"] = informative["close"].pct_change()
informative[f"%-{coin}raw_volume"] = informative["volume"]
informative[f"%-{coin}raw_price"] = informative["close"]
indicators = [col for col in informative if col.startswith("%")]
# This loop duplicates and shifts all indicators to add a sense of recency to data
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
if n == 0:
continue
informative_shift = informative[indicators].shift(n)
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
informative = pd.concat((informative, informative_shift), axis=1)
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
skip_columns = [
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
]
df = df.drop(columns=skip_columns)
# Add generalized indicators here (because in live, it will call this
# function to populate indicators during training). Notice how we ensure not to
# add them multiple times
if set_generalized_indicators:
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
# user adds targets here by prepending them with &- (see convention below)
df["&-s_close"] = (
df["close"]
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
.mean()
/ df["close"]
- 1
)
# Classifiers are typically set up with strings as targets:
# df['&s-up_or_down'] = np.where( df["close"].shift(-100) >
# df["close"], 'up', 'down')
# If user wishes to use multiple targets, they can add more by
# appending more columns with '&'. User should keep in mind that multi targets
# requires a multioutput prediction model such as
# templates/CatboostPredictionMultiModel.py,
# df["&-s_range"] = (
# df["close"]
# .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
# .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
# .max()
# -
# df["close"]
# .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
# .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
# .min()
# )
return df
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# All indicators must be populated by populate_any_indicators() for live functionality
# to work correctly.
# the model will return all labels created by user in `populate_any_indicators`
# (& appended targets), an indication of whether or not the prediction should be accepted,
# the target mean/std values for each of the labels created by user in
# `populate_any_indicators()` for each training period.
dataframe = self.freqai.start(dataframe, metadata, self)
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
return dataframe
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
if enter_long_conditions:
df.loc[
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
] = (1, "long")
enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"]]
if enter_short_conditions:
df.loc[
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
] = (1, "short")
return df
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"] * 0.25]
if exit_long_conditions:
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"] * 0.25]
if exit_short_conditions:
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
return df
def get_ticker_indicator(self):
return int(self.config["timeframe"][:-1])
def custom_exit(
self, pair: str, trade: Trade, current_time, current_rate, current_profit, **kwargs
):
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
trade_date = timeframe_to_prev_date(self.config["timeframe"], trade.open_date_utc)
trade_candle = dataframe.loc[(dataframe["date"] == trade_date)]
if trade_candle.empty:
return None
trade_candle = trade_candle.squeeze()
follow_mode = self.config.get("freqai", {}).get("follow_mode", False)
if not follow_mode:
pair_dict = self.freqai.dd.pair_dict
else:
pair_dict = self.freqai.dd.follower_dict
entry_tag = trade.enter_tag
if (
"prediction" + entry_tag not in pair_dict[pair]
or pair_dict[pair]['extras']["prediction" + entry_tag] == 0
):
pair_dict[pair]['extras']["prediction" + entry_tag] = abs(trade_candle["&-s_close"])
if not follow_mode:
self.freqai.dd.save_drawer_to_disk()
else:
self.freqai.dd.save_follower_dict_to_disk()
roi_price = pair_dict[pair]['extras']["prediction" + entry_tag]
roi_time = self.max_roi_time_long.value
roi_decay = roi_price * (
1 - ((current_time - trade.open_date_utc).seconds) / (roi_time * 60)
)
if roi_decay < 0:
roi_decay = self.linear_roi_offset.value
else:
roi_decay += self.linear_roi_offset.value
if current_profit > roi_decay:
return "roi_custom_win"
if current_profit < -roi_decay:
return "roi_custom_loss"
def confirm_trade_exit(
self,
pair: str,
trade: Trade,
order_type: str,
amount: float,
rate: float,
time_in_force: str,
exit_reason: str,
current_time,
**kwargs,
) -> bool:
entry_tag = trade.enter_tag
follow_mode = self.config.get("freqai", {}).get("follow_mode", False)
if not follow_mode:
pair_dict = self.freqai.dd.pair_dict
else:
pair_dict = self.freqai.dd.follower_dict
pair_dict[pair]['extras']["prediction" + entry_tag] = 0
if not follow_mode:
self.freqai.dd.save_drawer_to_disk()
else:
self.freqai.dd.save_follower_dict_to_disk()
return True
def confirm_trade_entry(
self,
pair: str,
order_type: str,
amount: float,
rate: float,
time_in_force: str,
current_time,
entry_tag,
side: str,
**kwargs,
) -> bool:
df, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
last_candle = df.iloc[-1].squeeze()
if side == "long":
if rate > (last_candle["close"] * (1 + 0.0025)):
return False
else:
if rate < (last_candle["close"] * (1 - 0.0025)):
return False
return True