Merge remote-tracking branch 'origin/develop' into feat/convolutional-neural-net
This commit is contained in:
@@ -60,10 +60,4 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
|
||||
|
||||
logger.info('Starting freqtrade in analysis mode')
|
||||
|
||||
process_entry_exit_reasons(config['exportfilename'],
|
||||
config['exchange']['pair_whitelist'],
|
||||
config['analysis_groups'],
|
||||
config['enter_reason_list'],
|
||||
config['exit_reason_list'],
|
||||
config['indicator_list']
|
||||
)
|
||||
process_entry_exit_reasons(config)
|
||||
|
@@ -106,7 +106,7 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
|
||||
"disableparamexport", "backtest_breakdown"]
|
||||
|
||||
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
||||
"exit_reason_list", "indicator_list"]
|
||||
"exit_reason_list", "indicator_list", "timerange"]
|
||||
|
||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
||||
|
@@ -462,6 +462,9 @@ class Configuration:
|
||||
self._args_to_config(config, argname='indicator_list',
|
||||
logstring='Analysis indicator list: {}')
|
||||
|
||||
self._args_to_config(config, argname='timerange',
|
||||
logstring='Filter trades by timerange: {}')
|
||||
|
||||
def _process_runmode(self, config: Config) -> None:
|
||||
|
||||
self._args_to_config(config, argname='dry_run',
|
||||
|
@@ -591,6 +591,7 @@ CONF_SCHEMA = {
|
||||
"model_type": {"type": "string", "default": "PPO"},
|
||||
"policy_type": {"type": "string", "default": "MlpPolicy"},
|
||||
"net_arch": {"type": "array", "default": [128, 128]},
|
||||
"randomize_startinng_position": {"type": "boolean", "default": False},
|
||||
"model_reward_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@@ -1,11 +1,12 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data,
|
||||
load_backtest_stats)
|
||||
from freqtrade.exceptions import OperationalException
|
||||
@@ -152,37 +153,55 @@ def _do_group_table_output(bigdf, glist):
|
||||
logger.warning("Invalid group mask specified.")
|
||||
|
||||
|
||||
def _print_results(analysed_trades, stratname, analysis_groups,
|
||||
enter_reason_list, exit_reason_list,
|
||||
indicator_list, columns=None):
|
||||
if columns is None:
|
||||
columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason']
|
||||
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
df = df.loc[(df[df_date_col] >= timerange.startdt)]
|
||||
if timerange.stoptype == 'date':
|
||||
df = df.loc[(df[df_date_col] < timerange.stopdt)]
|
||||
return df
|
||||
|
||||
bigdf = pd.DataFrame()
|
||||
|
||||
def _select_rows_by_tags(df, enter_reason_list, exit_reason_list):
|
||||
if enter_reason_list and "all" not in enter_reason_list:
|
||||
df = df.loc[(df['enter_reason'].isin(enter_reason_list))]
|
||||
|
||||
if exit_reason_list and "all" not in exit_reason_list:
|
||||
df = df.loc[(df['exit_reason'].isin(exit_reason_list))]
|
||||
return df
|
||||
|
||||
|
||||
def prepare_results(analysed_trades, stratname,
|
||||
enter_reason_list, exit_reason_list,
|
||||
timerange=None):
|
||||
res_df = pd.DataFrame()
|
||||
for pair, trades in analysed_trades[stratname].items():
|
||||
bigdf = pd.concat([bigdf, trades], ignore_index=True)
|
||||
res_df = pd.concat([res_df, trades], ignore_index=True)
|
||||
|
||||
if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns):
|
||||
res_df = _select_rows_within_dates(res_df, timerange)
|
||||
|
||||
if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns):
|
||||
res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list)
|
||||
|
||||
return res_df
|
||||
|
||||
|
||||
def print_results(res_df, analysis_groups, indicator_list):
|
||||
if res_df.shape[0] > 0:
|
||||
if analysis_groups:
|
||||
_do_group_table_output(bigdf, analysis_groups)
|
||||
|
||||
if enter_reason_list and "all" not in enter_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))]
|
||||
|
||||
if exit_reason_list and "all" not in exit_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))]
|
||||
_do_group_table_output(res_df, analysis_groups)
|
||||
|
||||
if "all" in indicator_list:
|
||||
print(bigdf)
|
||||
print(res_df)
|
||||
elif indicator_list is not None:
|
||||
available_inds = []
|
||||
for ind in indicator_list:
|
||||
if ind in bigdf:
|
||||
if ind in res_df:
|
||||
available_inds.append(ind)
|
||||
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
||||
_print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
else:
|
||||
print("\\_ No trades to show")
|
||||
print("\\No trades to show")
|
||||
|
||||
|
||||
def _print_table(df, sortcols=None, show_index=False):
|
||||
@@ -201,27 +220,34 @@ def _print_table(df, sortcols=None, show_index=False):
|
||||
)
|
||||
|
||||
|
||||
def process_entry_exit_reasons(backtest_dir: Path,
|
||||
pairlist: List[str],
|
||||
analysis_groups: Optional[List[str]] = ["0", "1", "2"],
|
||||
enter_reason_list: Optional[List[str]] = ["all"],
|
||||
exit_reason_list: Optional[List[str]] = ["all"],
|
||||
indicator_list: Optional[List[str]] = []):
|
||||
def process_entry_exit_reasons(config: Config):
|
||||
try:
|
||||
backtest_stats = load_backtest_stats(backtest_dir)
|
||||
analysis_groups = config.get('analysis_groups', [])
|
||||
enter_reason_list = config.get('enter_reason_list', ["all"])
|
||||
exit_reason_list = config.get('exit_reason_list', ["all"])
|
||||
indicator_list = config.get('indicator_list', [])
|
||||
|
||||
timerange = TimeRange.parse_timerange(None if config.get(
|
||||
'timerange') is None else str(config.get('timerange')))
|
||||
|
||||
backtest_stats = load_backtest_stats(config['exportfilename'])
|
||||
|
||||
for strategy_name, results in backtest_stats['strategy'].items():
|
||||
trades = load_backtest_data(backtest_dir, strategy_name)
|
||||
trades = load_backtest_data(config['exportfilename'], strategy_name)
|
||||
|
||||
if not trades.empty:
|
||||
signal_candles = _load_signal_candles(backtest_dir)
|
||||
analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name,
|
||||
trades, signal_candles)
|
||||
_print_results(analysed_trades_dict,
|
||||
strategy_name,
|
||||
analysis_groups,
|
||||
enter_reason_list,
|
||||
exit_reason_list,
|
||||
indicator_list)
|
||||
signal_candles = _load_signal_candles(config['exportfilename'])
|
||||
analysed_trades_dict = _process_candles_and_indicators(
|
||||
config['exchange']['pair_whitelist'], strategy_name,
|
||||
trades, signal_candles)
|
||||
|
||||
res_df = prepare_results(analysed_trades_dict, strategy_name,
|
||||
enter_reason_list, exit_reason_list,
|
||||
timerange=timerange)
|
||||
|
||||
print_results(res_df,
|
||||
analysis_groups,
|
||||
indicator_list)
|
||||
|
||||
except ValueError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import random
|
||||
from abc import abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
@@ -121,6 +122,10 @@ class BaseEnvironment(gym.Env):
|
||||
self._done = False
|
||||
|
||||
if self.starting_point is True:
|
||||
if self.rl_config.get('randomize_starting_position', False):
|
||||
length_of_data = int(self._end_tick / 4)
|
||||
start_tick = random.randint(self.window_size + 1, length_of_data)
|
||||
self._start_tick = start_tick
|
||||
self._position_history = (self._start_tick * [None]) + [self._position]
|
||||
else:
|
||||
self._position_history = (self.window_size * [None]) + [self._position]
|
||||
@@ -189,12 +194,12 @@ class BaseEnvironment(gym.Env):
|
||||
if self._position == Positions.Neutral:
|
||||
return 0.
|
||||
elif self._position == Positions.Short:
|
||||
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
|
||||
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
return (last_trade_price - current_price) / last_trade_price
|
||||
elif self._position == Positions.Long:
|
||||
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
|
||||
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
return (last_trade_price - current_price) / last_trade_price
|
||||
elif self._position == Positions.Long:
|
||||
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
|
||||
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
return (current_price - last_trade_price) / last_trade_price
|
||||
else:
|
||||
return 0.
|
||||
|
@@ -64,6 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||
self.policy_type = self.freqai_info['rl_config']['policy_type']
|
||||
self.unset_outlier_removal()
|
||||
self.net_arch = self.rl_config.get('net_arch', [128, 128])
|
||||
self.dd.model_type = import_str
|
||||
|
||||
def unset_outlier_removal(self):
|
||||
"""
|
||||
@@ -192,6 +193,10 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||
now = datetime.now(timezone.utc).timestamp()
|
||||
trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds)
|
||||
current_profit = trade.calc_profit_ratio(current_rate)
|
||||
if trade.is_short:
|
||||
market_side = 0
|
||||
else:
|
||||
market_side = 1
|
||||
|
||||
return market_side, current_profit, int(trade_duration)
|
||||
|
||||
|
@@ -4,7 +4,7 @@ import logging
|
||||
import re
|
||||
import shutil
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Tuple, TypedDict
|
||||
|
||||
@@ -82,6 +82,7 @@ class FreqaiDataDrawer:
|
||||
self.historic_predictions_bkp_path = Path(
|
||||
self.full_path / "historic_predictions.backup.pkl")
|
||||
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
||||
self.global_metadata_path = Path(self.full_path / "global_metadata.json")
|
||||
self.metric_tracker_path = Path(self.full_path / "metric_tracker.json")
|
||||
self.follow_mode = follow_mode
|
||||
if follow_mode:
|
||||
@@ -99,12 +100,7 @@ class FreqaiDataDrawer:
|
||||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
"data_path": "", "extras": {}}
|
||||
if 'Reinforcement' in self.config['freqaimodel']:
|
||||
self.model_type = 'stable_baselines'
|
||||
logger.warning('User passed a ReinforcementLearner model, FreqAI will '
|
||||
'now use stable_baselines3 to save models.')
|
||||
else:
|
||||
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
|
||||
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
|
||||
|
||||
def update_metric_tracker(self, metric: str, value: float, pair: str) -> None:
|
||||
"""
|
||||
@@ -132,6 +128,17 @@ class FreqaiDataDrawer:
|
||||
self.update_metric_tracker('cpu_load5min', load5 / cpus, pair)
|
||||
self.update_metric_tracker('cpu_load15min', load15 / cpus, pair)
|
||||
|
||||
def load_global_metadata_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved global metadata in present model folder.
|
||||
"""
|
||||
exists = self.global_metadata_path.is_file()
|
||||
if exists:
|
||||
with open(self.global_metadata_path, "r") as fp:
|
||||
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||
return metatada_dict
|
||||
return {}
|
||||
|
||||
def load_drawer_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved data drawer full of all pair model metadata in
|
||||
@@ -232,6 +239,15 @@ class FreqaiDataDrawer:
|
||||
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def save_global_metadata_to_disk(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Save global metadata json to disk
|
||||
"""
|
||||
with self.save_lock:
|
||||
with open(self.global_metadata_path, 'w') as fp:
|
||||
rapidjson.dump(metadata, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def create_follower_dict(self):
|
||||
"""
|
||||
Create or dictionary for each follower to maintain unique persistent prediction targets
|
||||
@@ -487,7 +503,7 @@ class FreqaiDataDrawer:
|
||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||
elif self.model_type == 'keras':
|
||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||
elif 'stable_baselines' in self.model_type:
|
||||
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
||||
model.save(save_path / f"{dk.model_filename}_model.zip")
|
||||
|
||||
if dk.svm_model is not None:
|
||||
@@ -573,9 +589,9 @@ class FreqaiDataDrawer:
|
||||
elif self.model_type == 'keras':
|
||||
from tensorflow import keras
|
||||
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
||||
elif self.model_type == 'stable_baselines':
|
||||
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
||||
mod = importlib.import_module(
|
||||
'stable_baselines3', self.freqai_info['rl_config']['model_type'])
|
||||
self.model_type, self.freqai_info['rl_config']['model_type'])
|
||||
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
||||
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||
|
||||
@@ -701,3 +717,31 @@ class FreqaiDataDrawer:
|
||||
).reset_index(drop=True)
|
||||
|
||||
return corr_dataframes, base_dataframes
|
||||
|
||||
def get_timerange_from_live_historic_predictions(self) -> TimeRange:
|
||||
"""
|
||||
Returns timerange information based on historic predictions file
|
||||
:return: timerange calculated from saved live data
|
||||
"""
|
||||
if not self.historic_predictions_path.is_file():
|
||||
raise OperationalException(
|
||||
'Historic predictions not found. Historic predictions data is required '
|
||||
'to run backtest with the freqai-backtest-live-models option '
|
||||
)
|
||||
|
||||
self.load_historic_predictions_from_disk()
|
||||
|
||||
all_pairs_end_dates = []
|
||||
for pair in self.historic_predictions:
|
||||
pair_historic_data = self.historic_predictions[pair]
|
||||
all_pairs_end_dates.append(pair_historic_data.date_pred.max())
|
||||
|
||||
global_metadata = self.load_global_metadata_from_disk()
|
||||
start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"]))
|
||||
end_date = max(all_pairs_end_dates)
|
||||
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
||||
end_date = end_date + timedelta(days=1)
|
||||
backtesting_timerange = TimeRange(
|
||||
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
||||
)
|
||||
return backtesting_timerange
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import copy
|
||||
import logging
|
||||
import shutil
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from datetime import datetime, timezone
|
||||
from math import cos, sin
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
@@ -87,12 +87,7 @@ class FreqaiDataKitchen:
|
||||
if not self.live:
|
||||
self.full_path = self.get_full_models_path(self.config)
|
||||
|
||||
if self.backtest_live_models:
|
||||
if self.pair:
|
||||
self.set_timerange_from_ready_models()
|
||||
(self.training_timeranges,
|
||||
self.backtesting_timeranges) = self.split_timerange_live_models()
|
||||
else:
|
||||
if not self.backtest_live_models:
|
||||
self.full_timerange = self.create_fulltimerange(
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days", 0)
|
||||
)
|
||||
@@ -460,29 +455,6 @@ class FreqaiDataKitchen:
|
||||
# print(tr_training_list, tr_backtesting_list)
|
||||
return tr_training_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def split_timerange_live_models(
|
||||
self
|
||||
) -> Tuple[list, list]:
|
||||
|
||||
tr_backtesting_list_timerange = []
|
||||
asset = self.pair.split("/")[0]
|
||||
if asset not in self.backtest_live_models_data["assets_end_dates"]:
|
||||
raise OperationalException(
|
||||
f"Model not available for pair {self.pair}. "
|
||||
"Please, try again after removing this pair from the configuration file."
|
||||
)
|
||||
asset_data = self.backtest_live_models_data["assets_end_dates"][asset]
|
||||
backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"]
|
||||
model_end_dates = [x for x in asset_data]
|
||||
model_end_dates.append(backtesting_timerange.stopts)
|
||||
model_end_dates.sort()
|
||||
for index, item in enumerate(model_end_dates):
|
||||
if len(model_end_dates) > (index + 1):
|
||||
tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1])
|
||||
tr_backtesting_list_timerange.append(tr_to_add)
|
||||
|
||||
return tr_backtesting_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Given a full dataframe, extract the user desired window
|
||||
@@ -978,7 +950,8 @@ class FreqaiDataKitchen:
|
||||
return weights
|
||||
|
||||
def get_predictions_to_append(self, predictions: DataFrame,
|
||||
do_predict: npt.ArrayLike) -> DataFrame:
|
||||
do_predict: npt.ArrayLike,
|
||||
dataframe_backtest: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Get backtest prediction from current backtest period
|
||||
"""
|
||||
@@ -1000,7 +973,9 @@ class FreqaiDataKitchen:
|
||||
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
append_df["DI_values"] = self.DI_values
|
||||
|
||||
return append_df
|
||||
dataframe_backtest.reset_index(drop=True, inplace=True)
|
||||
merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1)
|
||||
return merged_df
|
||||
|
||||
def append_predictions(self, append_df: DataFrame) -> None:
|
||||
"""
|
||||
@@ -1010,23 +985,18 @@ class FreqaiDataKitchen:
|
||||
if self.full_df.empty:
|
||||
self.full_df = append_df
|
||||
else:
|
||||
self.full_df = pd.concat([self.full_df, append_df], axis=0)
|
||||
self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True)
|
||||
|
||||
def fill_predictions(self, dataframe):
|
||||
"""
|
||||
Back fill values to before the backtesting range so that the dataframe matches size
|
||||
when it goes back to the strategy. These rows are not included in the backtest.
|
||||
"""
|
||||
|
||||
len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count
|
||||
filler_df = pd.DataFrame(
|
||||
np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns
|
||||
)
|
||||
|
||||
self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True)
|
||||
|
||||
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
||||
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
||||
self.return_dataframe = pd.merge(dataframe[to_keep],
|
||||
self.full_df, how='left', on='date')
|
||||
self.return_dataframe[self.full_df.columns] = (
|
||||
self.return_dataframe[self.full_df.columns].fillna(value=0))
|
||||
self.full_df = DataFrame()
|
||||
|
||||
return
|
||||
@@ -1323,22 +1293,22 @@ class FreqaiDataKitchen:
|
||||
self, append_df: DataFrame
|
||||
) -> None:
|
||||
"""
|
||||
Save prediction dataframe from backtesting to h5 file format
|
||||
Save prediction dataframe from backtesting to feather file format
|
||||
:param append_df: dataframe for backtesting period
|
||||
"""
|
||||
full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder)
|
||||
if not full_predictions_folder.is_dir():
|
||||
full_predictions_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w')
|
||||
append_df.to_feather(self.backtesting_results_path)
|
||||
|
||||
def get_backtesting_prediction(
|
||||
self
|
||||
) -> DataFrame:
|
||||
"""
|
||||
Get prediction dataframe from h5 file format
|
||||
Get prediction dataframe from feather file format
|
||||
"""
|
||||
append_df = pd.read_hdf(self.backtesting_results_path)
|
||||
append_df = pd.read_feather(self.backtesting_results_path)
|
||||
return append_df
|
||||
|
||||
def check_if_backtest_prediction_is_valid(
|
||||
@@ -1354,19 +1324,20 @@ class FreqaiDataKitchen:
|
||||
"""
|
||||
path_to_predictionfile = Path(self.full_path /
|
||||
self.backtest_predictions_folder /
|
||||
f"{self.model_filename}_prediction.h5")
|
||||
f"{self.model_filename}_prediction.feather")
|
||||
self.backtesting_results_path = path_to_predictionfile
|
||||
|
||||
file_exists = path_to_predictionfile.is_file()
|
||||
|
||||
if file_exists:
|
||||
append_df = self.get_backtesting_prediction()
|
||||
if len(append_df) == len_backtest_df:
|
||||
if len(append_df) == len_backtest_df and 'date' in append_df:
|
||||
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
|
||||
return True
|
||||
else:
|
||||
logger.info("A new backtesting prediction file is required. "
|
||||
"(Number of predictions is different from dataframe length).")
|
||||
"(Number of predictions is different from dataframe length or "
|
||||
"old prediction file version).")
|
||||
return False
|
||||
else:
|
||||
logger.info(
|
||||
@@ -1374,17 +1345,6 @@ class FreqaiDataKitchen:
|
||||
)
|
||||
return False
|
||||
|
||||
def set_timerange_from_ready_models(self):
|
||||
backtesting_timerange, \
|
||||
assets_end_dates = (
|
||||
self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path))
|
||||
|
||||
self.backtest_live_models_data = {
|
||||
"backtesting_timerange": backtesting_timerange,
|
||||
"assets_end_dates": assets_end_dates
|
||||
}
|
||||
return
|
||||
|
||||
def get_full_models_path(self, config: Config) -> Path:
|
||||
"""
|
||||
Returns default FreqAI model path
|
||||
@@ -1395,88 +1355,6 @@ class FreqaiDataKitchen:
|
||||
config["user_data_dir"] / "models" / str(freqai_config.get("identifier"))
|
||||
)
|
||||
|
||||
def get_timerange_and_assets_end_dates_from_ready_models(
|
||||
self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]:
|
||||
"""
|
||||
Returns timerange information based on a FreqAI model directory
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Tuple with (Timerange calculated from directory and
|
||||
a Dict with pair and model end training dates info)
|
||||
"""
|
||||
all_models_end_dates = []
|
||||
assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models(
|
||||
models_path)
|
||||
for key in assets_end_dates:
|
||||
for model_end_date in assets_end_dates[key]:
|
||||
if model_end_date not in all_models_end_dates:
|
||||
all_models_end_dates.append(model_end_date)
|
||||
|
||||
if len(all_models_end_dates) == 0:
|
||||
raise OperationalException(
|
||||
'At least 1 saved model is required to '
|
||||
'run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
|
||||
if len(all_models_end_dates) == 1:
|
||||
logger.warning(
|
||||
"Only 1 model was found. Backtesting will run with the "
|
||||
"timerange from the end of the training date to the current date"
|
||||
)
|
||||
|
||||
finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp())
|
||||
if len(all_models_end_dates) > 1:
|
||||
# After last model end date, use the same period from previous model
|
||||
# to finish the backtest
|
||||
all_models_end_dates.sort(reverse=True)
|
||||
finish_timestamp = all_models_end_dates[0] + \
|
||||
(all_models_end_dates[0] - all_models_end_dates[1])
|
||||
|
||||
all_models_end_dates.append(finish_timestamp)
|
||||
all_models_end_dates.sort()
|
||||
start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
|
||||
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
||||
end_date = end_date + timedelta(days=1)
|
||||
backtesting_timerange = TimeRange(
|
||||
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
||||
)
|
||||
return backtesting_timerange, assets_end_dates
|
||||
|
||||
def get_assets_timestamps_training_from_ready_models(
|
||||
self, models_path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
Scan the models path and returns all assets end training dates (timestamp)
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Dict with asset and model end training dates info
|
||||
"""
|
||||
assets_end_dates: Dict[str, Any] = {}
|
||||
if not models_path.is_dir():
|
||||
raise OperationalException(
|
||||
'Model folders not found. Saved models are required '
|
||||
'to run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
for model_dir in models_path.iterdir():
|
||||
if str(model_dir.name).startswith("sub-train"):
|
||||
model_end_date = int(model_dir.name.split("_")[1])
|
||||
asset = model_dir.name.split("_")[0].replace("sub-train-", "")
|
||||
model_file_name = (
|
||||
f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}"
|
||||
"_model.joblib"
|
||||
)
|
||||
|
||||
model_path_file = Path(model_dir / model_file_name)
|
||||
if model_path_file.is_file():
|
||||
if asset not in assets_end_dates:
|
||||
assets_end_dates[asset] = []
|
||||
assets_end_dates[asset].append(model_end_date)
|
||||
|
||||
return assets_end_dates
|
||||
|
||||
def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Remove all special characters from feature strings (:)
|
||||
|
@@ -69,6 +69,7 @@ class IFreqaiModel(ABC):
|
||||
self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True)
|
||||
if self.save_backtest_models:
|
||||
logger.info('Backtesting module configured to save all models.')
|
||||
|
||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||
# set current candle to arbitrary historical date
|
||||
self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc)
|
||||
@@ -100,6 +101,7 @@ class IFreqaiModel(ABC):
|
||||
self.get_corr_dataframes: bool = True
|
||||
self._threads: List[threading.Thread] = []
|
||||
self._stop_event = threading.Event()
|
||||
self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk()
|
||||
self.data_provider: Optional[DataProvider] = None
|
||||
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||
|
||||
@@ -136,6 +138,7 @@ class IFreqaiModel(ABC):
|
||||
self.inference_timer('start')
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
dk = self.start_live(dataframe, metadata, strategy, self.dk)
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
|
||||
# For backtesting, each pair enters and then gets trained for each window along the
|
||||
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
|
||||
@@ -144,20 +147,24 @@ class IFreqaiModel(ABC):
|
||||
# the concatenated results for the full backtesting period back to the strategy.
|
||||
elif not self.follow_mode:
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
if self.dk.backtest_live_models:
|
||||
logger.info(
|
||||
f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)")
|
||||
else:
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||
if not self.config.get("freqai_backtest_live_models", False):
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
else:
|
||||
logger.info(
|
||||
"Backtesting using historic predictions (live models)")
|
||||
dk = self.start_backtesting_from_historic_predictions(
|
||||
dataframe, metadata, self.dk)
|
||||
dataframe = dk.return_dataframe
|
||||
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
self.clean_up()
|
||||
if self.live:
|
||||
self.inference_timer('stop', metadata["pair"])
|
||||
|
||||
return dataframe
|
||||
|
||||
def clean_up(self):
|
||||
@@ -316,10 +323,11 @@ class IFreqaiModel(ABC):
|
||||
self.model = self.dd.load_data(pair, dk)
|
||||
|
||||
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
||||
append_df = dk.get_predictions_to_append(pred_df, do_preds)
|
||||
append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest)
|
||||
dk.append_predictions(append_df)
|
||||
dk.save_backtesting_prediction(append_df)
|
||||
|
||||
self.backtesting_fit_live_predictions(dk)
|
||||
dk.fill_predictions(dataframe)
|
||||
|
||||
return dk
|
||||
@@ -632,6 +640,8 @@ class IFreqaiModel(ABC):
|
||||
self.dd.historic_predictions[pair] = pred_df
|
||||
hist_preds_df = self.dd.historic_predictions[pair]
|
||||
|
||||
self.set_start_dry_live_date(strat_df)
|
||||
|
||||
for label in hist_preds_df.columns:
|
||||
if hist_preds_df[label].dtype == object:
|
||||
continue
|
||||
@@ -672,7 +682,8 @@ class IFreqaiModel(ABC):
|
||||
for label in full_labels:
|
||||
if self.dd.historic_predictions[dk.pair][label].dtype == object:
|
||||
continue
|
||||
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
f = spy.stats.norm.fit(
|
||||
self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
|
||||
|
||||
return
|
||||
@@ -826,6 +837,81 @@ class IFreqaiModel(ABC):
|
||||
f"to {tr_train.stop_fmt}, {train_it}/{total_trains} "
|
||||
"trains"
|
||||
)
|
||||
|
||||
def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen):
|
||||
"""
|
||||
Apply fit_live_predictions function in backtesting with a dummy historic_predictions
|
||||
The loop is required to simulate dry/live operation, as it is not possible to predict
|
||||
the type of logic implemented by the user.
|
||||
:param dk: datakitchen object
|
||||
"""
|
||||
fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0)
|
||||
if fit_live_predictions_candles:
|
||||
logger.info("Applying fit_live_predictions in backtesting")
|
||||
label_columns = [col for col in dk.full_df.columns if (
|
||||
col.startswith("&") and
|
||||
not (col.startswith("&") and col.endswith("_mean")) and
|
||||
not (col.startswith("&") and col.endswith("_std")) and
|
||||
col not in self.dk.data["extra_returns_per_train"])
|
||||
]
|
||||
|
||||
for index in range(len(dk.full_df)):
|
||||
if index >= fit_live_predictions_candles:
|
||||
self.dd.historic_predictions[self.dk.pair] = (
|
||||
dk.full_df.iloc[index - fit_live_predictions_candles:index])
|
||||
self.fit_live_predictions(self.dk, self.dk.pair)
|
||||
for label in label_columns:
|
||||
if dk.full_df[label].dtype == object:
|
||||
continue
|
||||
if "labels_mean" in self.dk.data:
|
||||
dk.full_df.at[index, f"{label}_mean"] = (
|
||||
self.dk.data["labels_mean"][label])
|
||||
if "labels_std" in self.dk.data:
|
||||
dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label]
|
||||
|
||||
for extra_col in self.dk.data["extra_returns_per_train"]:
|
||||
dk.full_df.at[index, f"{extra_col}"] = (
|
||||
self.dk.data["extra_returns_per_train"][extra_col])
|
||||
|
||||
return
|
||||
|
||||
def update_metadata(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Update global metadata and save the updated json file
|
||||
:param metadata: new global metadata dict
|
||||
"""
|
||||
self.dd.save_global_metadata_to_disk(metadata)
|
||||
self.metadata = metadata
|
||||
|
||||
def set_start_dry_live_date(self, live_dataframe: DataFrame):
|
||||
key_name = "start_dry_live_date"
|
||||
if key_name not in self.metadata:
|
||||
metadata = self.metadata
|
||||
metadata[key_name] = int(
|
||||
pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp())
|
||||
self.update_metadata(metadata)
|
||||
|
||||
def start_backtesting_from_historic_predictions(
|
||||
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
|
||||
) -> FreqaiDataKitchen:
|
||||
"""
|
||||
:param dataframe: DataFrame = strategy passed dataframe
|
||||
:param metadata: Dict = pair metadata
|
||||
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
:return:
|
||||
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
"""
|
||||
pair = metadata["pair"]
|
||||
dk.return_dataframe = dataframe
|
||||
saved_dataframe = self.dd.historic_predictions[pair]
|
||||
columns_to_drop = list(set(saved_dataframe.columns).intersection(
|
||||
dk.return_dataframe.columns))
|
||||
dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop))
|
||||
dk.return_dataframe = pd.merge(
|
||||
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
|
||||
# dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0)
|
||||
return dk
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
|
@@ -14,6 +14,7 @@ from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.exchange.exchange import market_is_active
|
||||
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
|
||||
|
||||
@@ -229,5 +230,6 @@ def get_timerange_backtest_live_models(config: Config) -> str:
|
||||
"""
|
||||
dk = FreqaiDataKitchen(config)
|
||||
models_path = dk.get_full_models_path(config)
|
||||
timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path)
|
||||
dd = FreqaiDataDrawer(models_path, config)
|
||||
timerange = dd.get_timerange_from_live_historic_predictions()
|
||||
return timerange.timerange_str
|
||||
|
@@ -7,6 +7,8 @@ import logging
|
||||
import sys
|
||||
from typing import Any, List
|
||||
|
||||
from freqtrade.util.gc_setup import gc_set_threshold
|
||||
|
||||
|
||||
# check min. python version
|
||||
if sys.version_info < (3, 8): # pragma: no cover
|
||||
@@ -36,6 +38,7 @@ def main(sysargv: List[str] = None) -> None:
|
||||
# Call subcommand.
|
||||
if 'func' in args:
|
||||
logger.info(f'freqtrade {__version__}')
|
||||
gc_set_threshold()
|
||||
return_code = args['func'](args)
|
||||
else:
|
||||
# No subcommand was issued.
|
||||
|
@@ -87,7 +87,7 @@ class PairLocks():
|
||||
Get the lock that expires the latest for the pair given.
|
||||
"""
|
||||
locks = PairLocks.get_pair_locks(pair, now, side=side)
|
||||
locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True)
|
||||
locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True)
|
||||
return locks[0] if locks else None
|
||||
|
||||
@staticmethod
|
||||
|
@@ -740,6 +740,24 @@ class RPC:
|
||||
self._freqtrade.wallets.update()
|
||||
return {'result': f'Created sell order for trade {trade_id}.'}
|
||||
|
||||
def _force_entry_validations(self, pair: str, order_side: SignalDirection):
|
||||
if not self._freqtrade.config.get('force_entry_enable', False):
|
||||
raise RPCException('Force_entry not enabled.')
|
||||
|
||||
if self._freqtrade.state != State.RUNNING:
|
||||
raise RPCException('trader is not running')
|
||||
|
||||
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
|
||||
raise RPCException("Can't go short on Spot markets.")
|
||||
|
||||
if pair not in self._freqtrade.exchange.get_markets(tradable_only=True):
|
||||
raise RPCException('Symbol does not exist or market is not active.')
|
||||
# Check if pair quote currency equals to the stake currency.
|
||||
stake_currency = self._freqtrade.config.get('stake_currency')
|
||||
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
|
||||
raise RPCException(
|
||||
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
|
||||
|
||||
def _rpc_force_entry(self, pair: str, price: Optional[float], *,
|
||||
order_type: Optional[str] = None,
|
||||
order_side: SignalDirection = SignalDirection.LONG,
|
||||
@@ -750,21 +768,8 @@ class RPC:
|
||||
Handler for forcebuy <asset> <price>
|
||||
Buys a pair trade at the given or current price
|
||||
"""
|
||||
self._force_entry_validations(pair, order_side)
|
||||
|
||||
if not self._freqtrade.config.get('force_entry_enable', False):
|
||||
raise RPCException('Force_entry not enabled.')
|
||||
|
||||
if self._freqtrade.state != State.RUNNING:
|
||||
raise RPCException('trader is not running')
|
||||
|
||||
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
|
||||
raise RPCException("Can't go short on Spot markets.")
|
||||
|
||||
# Check if pair quote currency equals to the stake currency.
|
||||
stake_currency = self._freqtrade.config.get('stake_currency')
|
||||
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
|
||||
raise RPCException(
|
||||
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
|
||||
# check if valid pair
|
||||
|
||||
# check if pair already has an open pair
|
||||
|
@@ -79,6 +79,8 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]:
|
||||
)
|
||||
try:
|
||||
return command_handler(self, *args, **kwargs)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
except BaseException:
|
||||
logger.exception('Exception occurred within Telegram module')
|
||||
|
||||
@@ -538,72 +540,67 @@ class Telegram(RPCHandler):
|
||||
handler for `/status` and `/status <id>`.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Check if there's at least one numerical ID provided.
|
||||
# If so, try to get only these trades.
|
||||
trade_ids = []
|
||||
if context.args and len(context.args) > 0:
|
||||
trade_ids = [int(i) for i in context.args if i.isnumeric()]
|
||||
|
||||
# Check if there's at least one numerical ID provided.
|
||||
# If so, try to get only these trades.
|
||||
trade_ids = []
|
||||
if context.args and len(context.args) > 0:
|
||||
trade_ids = [int(i) for i in context.args if i.isnumeric()]
|
||||
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
|
||||
position_adjust = self._config.get('position_adjustment_enable', False)
|
||||
max_entries = self._config.get('max_entry_position_adjustment', -1)
|
||||
for r in results:
|
||||
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
|
||||
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
|
||||
r['exit_reason'] = r.get('exit_reason', "")
|
||||
lines = [
|
||||
"*Trade ID:* `{trade_id}`" +
|
||||
(" `(since {open_date_hum})`" if r['is_open'] else ""),
|
||||
"*Current Pair:* {pair}",
|
||||
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
|
||||
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
|
||||
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
|
||||
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
|
||||
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
|
||||
]
|
||||
|
||||
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
|
||||
position_adjust = self._config.get('position_adjustment_enable', False)
|
||||
max_entries = self._config.get('max_entry_position_adjustment', -1)
|
||||
for r in results:
|
||||
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
|
||||
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
|
||||
r['exit_reason'] = r.get('exit_reason', "")
|
||||
lines = [
|
||||
"*Trade ID:* `{trade_id}`" +
|
||||
(" `(since {open_date_hum})`" if r['is_open'] else ""),
|
||||
"*Current Pair:* {pair}",
|
||||
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
|
||||
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
|
||||
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
|
||||
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
|
||||
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
|
||||
]
|
||||
if position_adjust:
|
||||
max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "")
|
||||
lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str)
|
||||
|
||||
if position_adjust:
|
||||
max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "")
|
||||
lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str)
|
||||
lines.extend([
|
||||
"*Open Rate:* `{open_rate:.8f}`",
|
||||
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
|
||||
"*Open Date:* `{open_date}`",
|
||||
"*Close Date:* `{close_date}`" if r['close_date'] else "",
|
||||
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
|
||||
("*Current Profit:* " if r['is_open'] else "*Close Profit: *")
|
||||
+ "`{profit_ratio:.2%}`",
|
||||
])
|
||||
|
||||
lines.extend([
|
||||
"*Open Rate:* `{open_rate:.8f}`",
|
||||
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
|
||||
"*Open Date:* `{open_date}`",
|
||||
"*Close Date:* `{close_date}`" if r['close_date'] else "",
|
||||
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
|
||||
("*Current Profit:* " if r['is_open'] else "*Close Profit: *")
|
||||
+ "`{profit_ratio:.2%}`",
|
||||
])
|
||||
if r['is_open']:
|
||||
if r.get('realized_profit'):
|
||||
lines.append("*Realized Profit:* `{realized_profit:.8f}`")
|
||||
if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
|
||||
and r['initial_stop_loss_ratio'] is not None):
|
||||
# Adding initial stoploss only if it is different from stoploss
|
||||
lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` "
|
||||
"`({initial_stop_loss_ratio:.2%})`")
|
||||
|
||||
if r['is_open']:
|
||||
if r.get('realized_profit'):
|
||||
lines.append("*Realized Profit:* `{realized_profit:.8f}`")
|
||||
if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
|
||||
and r['initial_stop_loss_ratio'] is not None):
|
||||
# Adding initial stoploss only if it is different from stoploss
|
||||
lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` "
|
||||
"`({initial_stop_loss_ratio:.2%})`")
|
||||
# Adding stoploss and stoploss percentage only if it is not None
|
||||
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
|
||||
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
|
||||
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
|
||||
"`({stoploss_current_dist_ratio:.2%})`")
|
||||
if r['open_order']:
|
||||
lines.append(
|
||||
"*Open Order:* `{open_order}`"
|
||||
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
|
||||
|
||||
# Adding stoploss and stoploss percentage only if it is not None
|
||||
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
|
||||
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
|
||||
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
|
||||
"`({stoploss_current_dist_ratio:.2%})`")
|
||||
if r['open_order']:
|
||||
lines.append(
|
||||
"*Open Order:* `{open_order}`"
|
||||
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
|
||||
|
||||
lines_detail = self._prepare_order_details(
|
||||
r['orders'], r['quote_currency'], r['is_open'])
|
||||
lines.extend(lines_detail if lines_detail else "")
|
||||
self.__send_status_msg(lines, r)
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
lines_detail = self._prepare_order_details(
|
||||
r['orders'], r['quote_currency'], r['is_open'])
|
||||
lines.extend(lines_detail if lines_detail else "")
|
||||
self.__send_status_msg(lines, r)
|
||||
|
||||
def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
|
||||
"""
|
||||
@@ -630,37 +627,34 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
fiat_currency = self._config.get('fiat_display_currency', '')
|
||||
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
|
||||
self._config['stake_currency'], fiat_currency)
|
||||
fiat_currency = self._config.get('fiat_display_currency', '')
|
||||
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
|
||||
self._config['stake_currency'], fiat_currency)
|
||||
|
||||
show_total = not isnan(fiat_profit_sum) and len(statlist) > 1
|
||||
max_trades_per_msg = 50
|
||||
"""
|
||||
Calculate the number of messages of 50 trades per message
|
||||
0.99 is used to make sure that there are no extra (empty) messages
|
||||
As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message
|
||||
"""
|
||||
messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1)
|
||||
for i in range(0, messages_count):
|
||||
trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg]
|
||||
if show_total and i == messages_count - 1:
|
||||
# append total line
|
||||
trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"])
|
||||
show_total = not isnan(fiat_profit_sum) and len(statlist) > 1
|
||||
max_trades_per_msg = 50
|
||||
"""
|
||||
Calculate the number of messages of 50 trades per message
|
||||
0.99 is used to make sure that there are no extra (empty) messages
|
||||
As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message
|
||||
"""
|
||||
messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1)
|
||||
for i in range(0, messages_count):
|
||||
trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg]
|
||||
if show_total and i == messages_count - 1:
|
||||
# append total line
|
||||
trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"])
|
||||
|
||||
message = tabulate(trades,
|
||||
headers=head,
|
||||
tablefmt='simple')
|
||||
if show_total and i == messages_count - 1:
|
||||
# insert separators line between Total
|
||||
lines = message.split("\n")
|
||||
message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]])
|
||||
self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_status_table",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
message = tabulate(trades,
|
||||
headers=head,
|
||||
tablefmt='simple')
|
||||
if show_total and i == messages_count - 1:
|
||||
# insert separators line between Total
|
||||
lines = message.split("\n")
|
||||
message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]])
|
||||
self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_status_table",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None:
|
||||
@@ -686,35 +680,32 @@ class Telegram(RPCHandler):
|
||||
timescale = int(context.args[0]) if context.args else val.default
|
||||
except (TypeError, ValueError, IndexError):
|
||||
timescale = val.default
|
||||
try:
|
||||
stats = self._rpc._rpc_timeunit_profit(
|
||||
timescale,
|
||||
stake_cur,
|
||||
fiat_disp_cur,
|
||||
unit
|
||||
)
|
||||
stats_tab = tabulate(
|
||||
[[f"{period['date']} ({period['trade_count']})",
|
||||
f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}",
|
||||
f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}",
|
||||
f"{period['rel_profit']:.2%}",
|
||||
] for period in stats['data']],
|
||||
headers=[
|
||||
f"{val.header} (count)",
|
||||
f'{stake_cur}',
|
||||
f'{fiat_disp_cur}',
|
||||
'Profit %',
|
||||
'Trades',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (
|
||||
f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n'
|
||||
f'<pre>{stats_tab}</pre>'
|
||||
)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True,
|
||||
callback_path=val.callback, query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
stats = self._rpc._rpc_timeunit_profit(
|
||||
timescale,
|
||||
stake_cur,
|
||||
fiat_disp_cur,
|
||||
unit
|
||||
)
|
||||
stats_tab = tabulate(
|
||||
[[f"{period['date']} ({period['trade_count']})",
|
||||
f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}",
|
||||
f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}",
|
||||
f"{period['rel_profit']:.2%}",
|
||||
] for period in stats['data']],
|
||||
headers=[
|
||||
f"{val.header} (count)",
|
||||
f'{stake_cur}',
|
||||
f'{fiat_disp_cur}',
|
||||
'Profit %',
|
||||
'Trades',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (
|
||||
f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n'
|
||||
f'<pre>{stats_tab}</pre>'
|
||||
)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True,
|
||||
callback_path=val.callback, query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _daily(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -878,79 +869,76 @@ class Telegram(RPCHandler):
|
||||
@authorized_only
|
||||
def _balance(self, update: Update, context: CallbackContext) -> None:
|
||||
""" Handler for /balance """
|
||||
try:
|
||||
result = self._rpc._rpc_balance(self._config['stake_currency'],
|
||||
self._config.get('fiat_display_currency', ''))
|
||||
result = self._rpc._rpc_balance(self._config['stake_currency'],
|
||||
self._config.get('fiat_display_currency', ''))
|
||||
|
||||
balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0)
|
||||
if not balance_dust_level:
|
||||
balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0)
|
||||
balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0)
|
||||
if not balance_dust_level:
|
||||
balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0)
|
||||
|
||||
output = ''
|
||||
if self._config['dry_run']:
|
||||
output += "*Warning:* Simulated balances in Dry Mode.\n"
|
||||
starting_cap = round_coin_value(
|
||||
result['starting_capital'], self._config['stake_currency'])
|
||||
output += f"Starting capital: `{starting_cap}`"
|
||||
starting_cap_fiat = round_coin_value(
|
||||
result['starting_capital_fiat'], self._config['fiat_display_currency']
|
||||
) if result['starting_capital_fiat'] > 0 else ''
|
||||
output += (f" `, {starting_cap_fiat}`.\n"
|
||||
) if result['starting_capital_fiat'] > 0 else '.\n'
|
||||
output = ''
|
||||
if self._config['dry_run']:
|
||||
output += "*Warning:* Simulated balances in Dry Mode.\n"
|
||||
starting_cap = round_coin_value(
|
||||
result['starting_capital'], self._config['stake_currency'])
|
||||
output += f"Starting capital: `{starting_cap}`"
|
||||
starting_cap_fiat = round_coin_value(
|
||||
result['starting_capital_fiat'], self._config['fiat_display_currency']
|
||||
) if result['starting_capital_fiat'] > 0 else ''
|
||||
output += (f" `, {starting_cap_fiat}`.\n"
|
||||
) if result['starting_capital_fiat'] > 0 else '.\n'
|
||||
|
||||
total_dust_balance = 0
|
||||
total_dust_currencies = 0
|
||||
for curr in result['currencies']:
|
||||
curr_output = ''
|
||||
if curr['est_stake'] > balance_dust_level:
|
||||
if curr['is_position']:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`{curr['side']}: {curr['position']:.8f}`\n"
|
||||
f"\t`Leverage: {curr['leverage']:.1f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
else:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`Available: {curr['free']:.8f}`\n"
|
||||
f"\t`Balance: {curr['balance']:.8f}`\n"
|
||||
f"\t`Pending: {curr['used']:.8f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
elif curr['est_stake'] <= balance_dust_level:
|
||||
total_dust_balance += curr['est_stake']
|
||||
total_dust_currencies += 1
|
||||
|
||||
# Handle overflowing message length
|
||||
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output)
|
||||
output = curr_output
|
||||
total_dust_balance = 0
|
||||
total_dust_currencies = 0
|
||||
for curr in result['currencies']:
|
||||
curr_output = ''
|
||||
if curr['est_stake'] > balance_dust_level:
|
||||
if curr['is_position']:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`{curr['side']}: {curr['position']:.8f}`\n"
|
||||
f"\t`Leverage: {curr['leverage']:.1f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
else:
|
||||
output += curr_output
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`Available: {curr['free']:.8f}`\n"
|
||||
f"\t`Balance: {curr['balance']:.8f}`\n"
|
||||
f"\t`Pending: {curr['used']:.8f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
elif curr['est_stake'] <= balance_dust_level:
|
||||
total_dust_balance += curr['est_stake']
|
||||
total_dust_currencies += 1
|
||||
|
||||
if total_dust_balance > 0:
|
||||
output += (
|
||||
f"*{total_dust_currencies} Other "
|
||||
f"{plural(total_dust_currencies, 'Currency', 'Currencies')} "
|
||||
f"(< {balance_dust_level} {result['stake']}):*\n"
|
||||
f"\t`Est. {result['stake']}: "
|
||||
f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
|
||||
tc = result['trade_count'] > 0
|
||||
stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
|
||||
fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
|
||||
# Handle overflowing message length
|
||||
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output)
|
||||
output = curr_output
|
||||
else:
|
||||
output += curr_output
|
||||
|
||||
output += ("\n*Estimated Value*:\n"
|
||||
f"\t`{result['stake']}: "
|
||||
f"{round_coin_value(result['total'], result['stake'], False)}`"
|
||||
f"{stake_improve}\n"
|
||||
f"\t`{result['symbol']}: "
|
||||
f"{round_coin_value(result['value'], result['symbol'], False)}`"
|
||||
f"{fiat_val}\n")
|
||||
self._send_msg(output, reload_able=True, callback_path="update_balance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
if total_dust_balance > 0:
|
||||
output += (
|
||||
f"*{total_dust_currencies} Other "
|
||||
f"{plural(total_dust_currencies, 'Currency', 'Currencies')} "
|
||||
f"(< {balance_dust_level} {result['stake']}):*\n"
|
||||
f"\t`Est. {result['stake']}: "
|
||||
f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
|
||||
tc = result['trade_count'] > 0
|
||||
stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
|
||||
fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
|
||||
|
||||
output += ("\n*Estimated Value*:\n"
|
||||
f"\t`{result['stake']}: "
|
||||
f"{round_coin_value(result['total'], result['stake'], False)}`"
|
||||
f"{stake_improve}\n"
|
||||
f"\t`{result['symbol']}: "
|
||||
f"{round_coin_value(result['value'], result['symbol'], False)}`"
|
||||
f"{fiat_val}\n")
|
||||
self._send_msg(output, reload_able=True, callback_path="update_balance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _start(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1125,26 +1113,23 @@ class Telegram(RPCHandler):
|
||||
nrecent = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
nrecent = 10
|
||||
try:
|
||||
trades = self._rpc._rpc_trade_history(
|
||||
nrecent
|
||||
)
|
||||
trades_tab = tabulate(
|
||||
[[arrow.get(trade['close_date']).humanize(),
|
||||
trade['pair'] + " (#" + str(trade['trade_id']) + ")",
|
||||
f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"]
|
||||
for trade in trades['trades']],
|
||||
headers=[
|
||||
'Close Date',
|
||||
'Pair (ID)',
|
||||
f'Profit ({stake_cur})',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n"
|
||||
+ (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else ''))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
trades = self._rpc._rpc_trade_history(
|
||||
nrecent
|
||||
)
|
||||
trades_tab = tabulate(
|
||||
[[arrow.get(trade['close_date']).humanize(),
|
||||
trade['pair'] + " (#" + str(trade['trade_id']) + ")",
|
||||
f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"]
|
||||
for trade in trades['trades']],
|
||||
headers=[
|
||||
'Close Date',
|
||||
'Pair (ID)',
|
||||
f'Profit ({stake_cur})',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n"
|
||||
+ (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else ''))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
@authorized_only
|
||||
def _delete_trade(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1155,18 +1140,14 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
if not context.args or len(context.args) == 0:
|
||||
raise RPCException("Trade-id not set.")
|
||||
trade_id = int(context.args[0])
|
||||
msg = self._rpc._rpc_delete(trade_id)
|
||||
self._send_msg((
|
||||
f"`{msg['result_msg']}`\n"
|
||||
'Please make sure to take care of this asset on the exchange manually.'
|
||||
))
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
if not context.args or len(context.args) == 0:
|
||||
raise RPCException("Trade-id not set.")
|
||||
trade_id = int(context.args[0])
|
||||
msg = self._rpc._rpc_delete(trade_id)
|
||||
self._send_msg((
|
||||
f"`{msg['result_msg']}`\n"
|
||||
'Please make sure to take care of this asset on the exchange manually.'
|
||||
))
|
||||
|
||||
@authorized_only
|
||||
def _performance(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1177,27 +1158,24 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
trades = self._rpc._rpc_performance()
|
||||
output = "<b>Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['pair']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_performance()
|
||||
output = "<b>Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['pair']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1208,31 +1186,28 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_enter_tag_performance(pair)
|
||||
output = "<b>Entry Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['enter_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_enter_tag_performance(pair)
|
||||
output = "<b>Entry Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['enter_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_enter_tag_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_enter_tag_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1243,31 +1218,28 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_exit_reason_performance(pair)
|
||||
output = "<b>Exit Reason Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['exit_reason']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_exit_reason_performance(pair)
|
||||
output = "<b>Exit Reason Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['exit_reason']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_exit_reason_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_exit_reason_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1278,31 +1250,28 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_mix_tag_performance(pair)
|
||||
output = "<b>Mix Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['mix_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_mix_tag_performance(pair)
|
||||
output = "<b>Mix Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['mix_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_mix_tag_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_mix_tag_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _count(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1313,18 +1282,15 @@ class Telegram(RPCHandler):
|
||||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
counts = self._rpc._rpc_count()
|
||||
message = tabulate({k: [v] for k, v in counts.items()},
|
||||
headers=['current', 'max', 'total stake'],
|
||||
tablefmt='simple')
|
||||
message = "<pre>{}</pre>".format(message)
|
||||
logger.debug(message)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_count",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
counts = self._rpc._rpc_count()
|
||||
message = tabulate({k: [v] for k, v in counts.items()},
|
||||
headers=['current', 'max', 'total stake'],
|
||||
tablefmt='simple')
|
||||
message = "<pre>{}</pre>".format(message)
|
||||
logger.debug(message)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_count",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _locks(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1372,22 +1338,19 @@ class Telegram(RPCHandler):
|
||||
Handler for /whitelist
|
||||
Shows the currently active whitelist
|
||||
"""
|
||||
try:
|
||||
whitelist = self._rpc._rpc_whitelist()
|
||||
whitelist = self._rpc._rpc_whitelist()
|
||||
|
||||
if context.args:
|
||||
if "sorted" in context.args:
|
||||
whitelist['whitelist'] = sorted(whitelist['whitelist'])
|
||||
if "baseonly" in context.args:
|
||||
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
|
||||
if context.args:
|
||||
if "sorted" in context.args:
|
||||
whitelist['whitelist'] = sorted(whitelist['whitelist'])
|
||||
if "baseonly" in context.args:
|
||||
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
|
||||
|
||||
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
|
||||
message += f"`{', '.join(whitelist['whitelist'])}`"
|
||||
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
|
||||
message += f"`{', '.join(whitelist['whitelist'])}`"
|
||||
|
||||
logger.debug(message)
|
||||
self._send_msg(message)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
logger.debug(message)
|
||||
self._send_msg(message)
|
||||
|
||||
@authorized_only
|
||||
def _blacklist(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1425,30 +1388,27 @@ class Telegram(RPCHandler):
|
||||
Shows the latest logs
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
limit = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
limit = 10
|
||||
logs = RPC._rpc_get_logs(limit)['logs']
|
||||
msgs = ''
|
||||
msg_template = "*{}* {}: {} \\- `{}`"
|
||||
for logrec in logs:
|
||||
msg = msg_template.format(escape_markdown(logrec[0], version=2),
|
||||
escape_markdown(logrec[2], version=2),
|
||||
escape_markdown(logrec[3], version=2),
|
||||
escape_markdown(logrec[4], version=2))
|
||||
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
|
||||
# Send message immediately if it would become too long
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
msgs = msg + '\n'
|
||||
else:
|
||||
# Append message to messages to send
|
||||
msgs += msg + '\n'
|
||||
|
||||
if msgs:
|
||||
limit = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
limit = 10
|
||||
logs = RPC._rpc_get_logs(limit)['logs']
|
||||
msgs = ''
|
||||
msg_template = "*{}* {}: {} \\- `{}`"
|
||||
for logrec in logs:
|
||||
msg = msg_template.format(escape_markdown(logrec[0], version=2),
|
||||
escape_markdown(logrec[2], version=2),
|
||||
escape_markdown(logrec[3], version=2),
|
||||
escape_markdown(logrec[4], version=2))
|
||||
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
|
||||
# Send message immediately if it would become too long
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
msgs = msg + '\n'
|
||||
else:
|
||||
# Append message to messages to send
|
||||
msgs += msg + '\n'
|
||||
|
||||
if msgs:
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
|
||||
@authorized_only
|
||||
def _edge(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1456,21 +1416,17 @@ class Telegram(RPCHandler):
|
||||
Handler for /edge
|
||||
Shows information related to Edge
|
||||
"""
|
||||
try:
|
||||
edge_pairs = self._rpc._rpc_edge()
|
||||
if not edge_pairs:
|
||||
message = '<b>Edge only validated following pairs:</b>'
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
edge_pairs = self._rpc._rpc_edge()
|
||||
if not edge_pairs:
|
||||
message = '<b>Edge only validated following pairs:</b>'
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
for chunk in chunks(edge_pairs, 25):
|
||||
edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple')
|
||||
message = (f'<b>Edge only validated following pairs:</b>\n'
|
||||
f'<pre>{edge_pairs_tab}</pre>')
|
||||
for chunk in chunks(edge_pairs, 25):
|
||||
edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple')
|
||||
message = (f'<b>Edge only validated following pairs:</b>\n'
|
||||
f'<pre>{edge_pairs_tab}</pre>')
|
||||
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
@authorized_only
|
||||
def _help(self, update: Update, context: CallbackContext) -> None:
|
||||
@@ -1551,12 +1507,9 @@ class Telegram(RPCHandler):
|
||||
Handler for /health
|
||||
Shows the last process timestamp
|
||||
"""
|
||||
try:
|
||||
health = self._rpc._health()
|
||||
message = f"Last process: `{health['last_process_loc']}`"
|
||||
self._send_msg(message)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
health = self._rpc._health()
|
||||
message = f"Last process: `{health['last_process_loc']}`"
|
||||
self._send_msg(message)
|
||||
|
||||
@authorized_only
|
||||
def _version(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
||||
|
||||
Launching this strategy would be:
|
||||
|
||||
freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates
|
||||
freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates
|
||||
--freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json
|
||||
|
||||
or the user simply adds this to their config:
|
||||
@@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
||||
process_only_new_candles = True
|
||||
stoploss = -0.05
|
||||
use_exit_signal = True
|
||||
startup_candle_count: int = 300
|
||||
startup_candle_count: int = 30
|
||||
can_short = True
|
||||
|
||||
# Hyperoptable parameters
|
||||
|
@@ -328,7 +328,7 @@
|
||||
"# Show graph inline\n",
|
||||
"# graph.show()\n",
|
||||
"\n",
|
||||
"# Render graph in a seperate window\n",
|
||||
"# Render graph in a separate window\n",
|
||||
"graph.show(renderer=\"browser\")\n"
|
||||
]
|
||||
},
|
||||
|
18
freqtrade/util/gc_setup.py
Normal file
18
freqtrade/util/gc_setup.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import gc
|
||||
import logging
|
||||
import platform
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def gc_set_threshold():
|
||||
"""
|
||||
Reduce number of GC runs to improve performance (explanation video)
|
||||
https://www.youtube.com/watch?v=p4Sn6UcFTOU
|
||||
|
||||
"""
|
||||
if platform.python_implementation() == "CPython":
|
||||
# allocs, g1, g2 = gc.get_threshold()
|
||||
gc.set_threshold(50_000, 500, 1000)
|
||||
logger.debug("Adjusting python allocations to reduce GC runs")
|
Reference in New Issue
Block a user