merge in inference timer and historic predictions handling improvements.
This commit is contained in:
parent
ad846cdb76
commit
8961b8d560
@ -85,6 +85,7 @@ class FreqaiDataDrawer:
|
||||
self.training_queue: Dict[str, int] = {}
|
||||
self.history_lock = threading.Lock()
|
||||
self.save_lock = threading.Lock()
|
||||
self.pair_dict_lock = threading.Lock()
|
||||
self.old_DBSCAN_eps: Dict[str, float] = {}
|
||||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
@ -228,6 +229,7 @@ class FreqaiDataDrawer:
|
||||
|
||||
def pair_to_end_of_training_queue(self, pair: str) -> None:
|
||||
# march all pairs up in the queue
|
||||
with self.pair_dict_lock:
|
||||
for p in self.pair_dict:
|
||||
self.pair_dict[p]["priority"] -= 1
|
||||
# send pair to end of queue
|
||||
@ -261,13 +263,14 @@ class FreqaiDataDrawer:
|
||||
the strategy originally. Doing this allows FreqUI to always display the correct
|
||||
historic predictions.
|
||||
"""
|
||||
df = self.historic_predictions[pair]
|
||||
|
||||
# here are some pandas hula hoops to accommodate the possibility of a series
|
||||
# or dataframe depending number of labels requested by user
|
||||
nan_df = pd.DataFrame(np.nan, index=df.index[-2:] + 2, columns=df.columns)
|
||||
df = pd.concat([df, nan_df], ignore_index=True, axis=0)
|
||||
df = self.historic_predictions[pair] = df[:-1]
|
||||
index = self.historic_predictions[pair].index[-1:]
|
||||
columns = self.historic_predictions[pair].columns
|
||||
|
||||
nan_df = pd.DataFrame(np.nan, index=index, columns=columns)
|
||||
self.historic_predictions[pair] = pd.concat(
|
||||
[self.historic_predictions[pair], nan_df], ignore_index=True, axis=0)
|
||||
df = self.historic_predictions[pair]
|
||||
|
||||
# model outputs and associated statistics
|
||||
for label in predictions.columns:
|
||||
@ -523,7 +526,7 @@ class FreqaiDataDrawer:
|
||||
history_data[pair][tf] = pd.concat(
|
||||
[
|
||||
history_data[pair][tf],
|
||||
strategy.dp.get_pair_dataframe(pair, tf).iloc[index:],
|
||||
df_dp.iloc[index:],
|
||||
],
|
||||
ignore_index=True,
|
||||
axis=0,
|
||||
|
@ -7,11 +7,11 @@ import time
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from numpy.typing import NDArray
|
||||
from pandas import DataFrame
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from threading import Lock
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.enums import RunMode
|
||||
@ -82,6 +82,9 @@ class IFreqaiModel(ABC):
|
||||
self.last_trade_database_summary: DataFrame = {}
|
||||
self.current_trade_database_summary: DataFrame = {}
|
||||
self.analysis_lock = Lock()
|
||||
self.inference_time: float = 0
|
||||
self.begin_time: float = 0
|
||||
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
||||
|
||||
def assert_config(self, config: Dict[str, Any]) -> None:
|
||||
|
||||
@ -104,6 +107,7 @@ class IFreqaiModel(ABC):
|
||||
self.dd.set_pair_dict_info(metadata)
|
||||
|
||||
if self.live:
|
||||
self.inference_timer('start')
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
dk = self.start_live(dataframe, metadata, strategy, self.dk)
|
||||
|
||||
@ -123,6 +127,8 @@ class IFreqaiModel(ABC):
|
||||
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
del dk
|
||||
if self.live:
|
||||
self.inference_timer('stop')
|
||||
return dataframe
|
||||
|
||||
@threaded
|
||||
@ -155,6 +161,8 @@ class IFreqaiModel(ABC):
|
||||
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
||||
)
|
||||
|
||||
self.dd.save_historic_predictions_to_disk()
|
||||
|
||||
def start_backtesting(
|
||||
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
|
||||
) -> FreqaiDataKitchen:
|
||||
@ -340,7 +348,6 @@ class IFreqaiModel(ABC):
|
||||
# historical accuracy reasons.
|
||||
pred_df, do_preds = self.predict(dataframe.iloc[-self.CONV_WIDTH:], dk, first=False)
|
||||
|
||||
self.dd.save_historic_predictions_to_disk()
|
||||
if self.freqai_info.get('fit_live_predictions_candles', 0) and self.live:
|
||||
self.fit_live_predictions(dk, pair)
|
||||
self.dd.append_model_predictions(pair, pred_df, do_preds, dk, len(dataframe))
|
||||
@ -503,7 +510,6 @@ class IFreqaiModel(ABC):
|
||||
dk.set_new_model_names(pair, new_trained_timerange)
|
||||
self.dd.pair_dict[pair]["first"] = False
|
||||
if self.dd.pair_dict[pair]["priority"] == 1 and self.scanning:
|
||||
with self.analysis_lock:
|
||||
self.dd.pair_to_end_of_training_queue(pair)
|
||||
self.dd.save_data(model, pair, dk)
|
||||
|
||||
@ -582,6 +588,28 @@ class IFreqaiModel(ABC):
|
||||
|
||||
return
|
||||
|
||||
def inference_timer(self, do='start'):
|
||||
"""
|
||||
Timer designed to track the cumulative time spent in FreqAI for one pass through
|
||||
the whitelist. This will check if the time spent is more than 1/4 the time
|
||||
of a single candle, and if so, it will warn the user of degraded performance
|
||||
"""
|
||||
if do == 'start':
|
||||
self.pair_it += 1
|
||||
self.begin_time = time.time()
|
||||
elif do == 'stop':
|
||||
end = time.time()
|
||||
self.inference_time += (end - self.begin_time)
|
||||
if self.pair_it == self.total_pairs:
|
||||
logger.info(
|
||||
f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')
|
||||
if self.inference_time > 0.25 * self.base_tf_seconds:
|
||||
logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to'
|
||||
' avoid blinding open trades and degrading performance.')
|
||||
self.pair_it = 0
|
||||
self.inference_time = 0
|
||||
return
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user